+++ /dev/null
-# As config was originally based on an example by Olivier Grisel. Thanks!
-# https://github.com/ogrisel/python-appveyor-demo/blob/master/appveyor.yml
-clone_depth: 50
-
-# No reason for us to restrict the number concurrent jobs
-max_jobs: 100
-
-cache:
- - '%LOCALAPPDATA%\pip\Cache'
-
-environment:
- global:
- MINGW_32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin
- MINGW_64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
- OPENBLAS_32: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win32.zip
- OPENBLAS_64: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win64.zip
- APPVEYOR_SAVE_CACHE_ON_ERROR: true
- APPVEYOR_SKIP_FINALIZE_ON_EXIT: true
- TEST_TIMEOUT: 1000
- NPY_NUM_BUILD_JOBS: 4
-
- matrix:
- - PYTHON: C:\Python36
- PYTHON_VERSION: 3.6
- PYTHON_ARCH: 32
- TEST_MODE: fast
-
- - PYTHON: C:\Python37
- PYTHON_VERSION: 3.7
- PYTHON_ARCH: 32
- TEST_MODE: fast
-
- - PYTHON: C:\Python36-x64
- PYTHON_VERSION: 3.6
- PYTHON_ARCH: 64
- TEST_MODE: full
- INSTALL_PICKLE5: 1
-
- - PYTHON: C:\Python37-x64
- PYTHON_VERSION: 3.7
- PYTHON_ARCH: 64
- TEST_MODE: full
- INSTALL_PICKLE5: 1
-
-init:
- - "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%"
- - "ECHO \"%APPVEYOR_SCHEDULED_BUILD%\""
- # If there is a newer build queued for the same PR, cancel this one.
- # The AppVeyor 'rollout builds' option is supposed to serve the same
- # purpose but it is problematic because it tends to cancel builds pushed
- # directly to master instead of just PR builds (or the converse).
- # credits: JuliaLang developers.
- - ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod `
- https://ci.appveyor.com/api/projects/$env:APPVEYOR_ACCOUNT_NAME/$env:APPVEYOR_PROJECT_SLUG/history?recordsNumber=50).builds | `
- Where-Object pullRequestId -eq $env:APPVEYOR_PULL_REQUEST_NUMBER)[0].buildNumber) { `
- raise "There are newer queued builds for this pull request, skipping build."
- }
-
-install:
- # Prepend newly installed Python to the PATH of this build (this cannot be
- # done from inside the powershell script as it would require to restart
- # the parent CMD process).
- - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH%
- - if [%PYTHON_ARCH%]==[32] SET PATH=%MINGW_32%;%PATH% & SET OPENBLAS=%OPENBLAS_32%
- - if [%PYTHON_ARCH%]==[64] SET PATH=%MINGW_64%;%PATH% & SET OPENBLAS=%OPENBLAS_64%
-
- # Check that we have the expected version and architecture for Python
- - python --version
- - >-
- %CMD_IN_ENV%
- python -c "import sys,platform,struct;
- print(sys.platform, platform.machine(), struct.calcsize('P') * 8, )"
-
- # Install "openblas.a" to PYTHON\lib
- # Library provided by Matthew Brett at https://github.com/matthew-brett/build-openblas
- - ps: |
- $clnt = new-object System.Net.WebClient
- $file = "$(New-TemporaryFile).zip"
- $tmpdir = New-TemporaryFile | %{ rm $_; mkdir $_ }
- $destination = "$env:PYTHON\lib\openblas.a"
-
- echo $file
- echo $tmpdir
- echo $env:OPENBLAS
-
- $clnt.DownloadFile($env:OPENBLAS, $file)
- Get-FileHash $file | Format-List
-
- Expand-Archive $file $tmpdir
-
- rm $tmpdir\$env:PYTHON_ARCH\lib\*.dll.a
- $lib = ls $tmpdir\$env:PYTHON_ARCH\lib\*.a | ForEach { ls $_ } | Select-Object -first 1
- echo $lib
-
- cp $lib $destination
- ls $destination
-
- # Upgrade to the latest pip.
- - 'python -m pip install -U pip setuptools wheel'
-
- - if [%INSTALL_PICKLE5%]==[1] echo pickle5 >> tools/ci/appveyor/requirements.txt
-
- # Install the numpy test dependencies.
- - 'pip install -U --timeout 5 --retries 2 -r tools/ci/appveyor/requirements.txt'
-
-build_script:
- # Here, we add MinGW to the path to be able to link an OpenBLAS.dll
- # We then use the import library from the DLL to compile with MSVC
- - ps: |
- pip wheel -v -v -v --wheel-dir=dist .
-
- # For each wheel that pip has placed in the "dist" directory
- # First, upload the wheel to the "artifacts" tab and then
- # install the wheel. If we have only built numpy (as is the case here),
- # then there will be one wheel to install.
-
- # This method is more representative of what will be distributed,
- # because it actually tests what the built wheels will be rather than
- # what 'setup.py install' will do and at it uploads the wheels so that
- # they can be inspected.
-
- ls dist -r | Foreach-Object {
- Push-AppveyorArtifact $_.FullName
- pip install $_.FullName
- }
-
-test_script:
- python runtests.py -v -n -m %TEST_MODE% -- --junitxml=%cd%\junit-results.xml
-
-after_build:
- # Remove old or huge cache files to hopefully not exceed the 1GB cache limit.
- #
- # If the cache limit is reached, the cache will not be updated (of not even
- # created in the first run). So this is a trade of between keeping the cache
- # current and having a cache at all.
- # NB: This is done only `on_success` since the cache in uploaded only on
- # success anyway.
- - C:\cygwin\bin\find "%LOCALAPPDATA%\pip" -type f -mtime +360 -delete
- - C:\cygwin\bin\find "%LOCALAPPDATA%\pip" -type f -size +10M -delete
- - C:\cygwin\bin\find "%LOCALAPPDATA%\pip" -empty -delete
- # Show size of cache
- - C:\cygwin\bin\du -hs "%LOCALAPPDATA%\pip\Cache"
-
-on_finish:
- # We can get a nice display of test results in the "test" tab with py.test
- # For now, this does nothing.
- - ps: |
- If (Test-Path .\junit-results.xml) {
- (new-object net.webclient).UploadFile(
- "https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)",
- (Resolve-Path .\junit-results.xml)
- )
- }
- $LastExitCode = 0
python3 -m venv venv
ln -s $(which python3) venv/bin/python3.6
. venv/bin/activate
- pip install cython sphinx==1.8.5 matplotlib ipython
+ pip install cython sphinx==2.2.0 matplotlib ipython
sudo apt-get update
sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex
pip install .
pip install scipy
+ - run:
+ name: create release notes
+ command: |
+ . venv/bin/activate
+ pip install git+https://github.com/hawkowl/towncrier.git@master
+ VERSION=$(python -c "import setup; print(setup.VERSION)")
+ towncrier --version $VERSION --yes
+ ./tools/ci/test_all_newsfragments_used.py
- run:
name: build devdocs
command: |
codecov:
- ci:
- - !appveyor
notify:
require_ci_to_pass: no
after_n_builds: 1
[run]
branch = True
include = */numpy/*
+disable_warnings = include-ignored
--- /dev/null
+version: 1
+update_configs:
+ - package_manager: "python"
+ directory: "/"
+ update_schedule: "weekly"
+ commit_message:
+ prefix: "MAINT"
+ default_labels:
+ - "03 - Maintenance"
+ If this is your first time contributing to a project on GitHub, please read
through our
-[guide to contributing to numpy](https://docs.scipy.org/doc/numpy/dev/index.html)
+[guide to contributing to numpy](https://numpy.org/devdocs/dev/index.html)
+ If you have contributed to other projects on GitHub you can go straight to our
-[development workflow](https://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html)
+[development workflow](https://numpy.org/devdocs/dev/development_workflow.html)
Either way, please be sure to follow our
-[convention for commit messages](https://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html#writing-the-commit-message).
+[convention for commit messages](https://numpy.org/devdocs/dev/development_workflow.html#writing-the-commit-message).
If you are writing new C code, please follow the style described in
``doc/C_STYLE_GUIDE``.
+github: [numfocus]
tidelift: pypi/numpy
custom: https://www.numpy.org/#support-numpy
numpy/core/src/npysort/selection.c
numpy/core/src/npysort/timsort.c
numpy/core/src/npysort/sort.c
-numpy/core/src/common/npy_binsearch.h
-numpy/core/src/common/npy_partition.h
-numpy/core/src/common/npy_sort.h
-numpy/core/src/common/templ_common.h
numpy/core/src/private/npy_binsearch.h
numpy/core/src/private/npy_partition.h
numpy/core/src/private/templ_common.h
index:
build_command:
- python3 setup.py build
+ after_prepare:
+ - pip3 install --upgrade --user cython
+ - export PATH="$HOME/.local/bin:$PATH"
queries:
- include: py/file-not-closed
Christoph Gohlke <cgohlke@uci.edu> cgholke <?@?>
Christoph Gohlke <cgohlke@uci.edu> cgohlke <cgohlke@uci.edu>
Christoph Gohlke <cgohlke@uci.edu> Christolph Gohlke <cgohlke@uci.edu>
+Colin Snyder <47012605+colinsnyder@users.noreply.github.com> colinsnyder <47012605+colinsnyder@users.noreply.github.com>
Daniel B Allan <daniel.b.allan@gmail.com> danielballan <daniel.b.allan@gmail.com>
Daniel da Silva <mail@danieldasilva.org> Daniel da Silva <daniel@meltingwax.net>
Daniel da Silva <mail@danieldasilva.org> Daniel da Silva <var.mail.daniel@gmail.com>
Jerome Kelleher <jerome.kelleher@ed.ac.uk> jeromekelleher <jerome.kelleher@ed.ac.uk>
Johannes Hampp <johannes.hampp@zeu.uni-giessen.de> euronion <42553970+euronion@users.noreply.github.com>
Johannes Schönberger <hannesschoenberger@gmail.com> Johannes Schönberger <jschoenberger@demuc.de>
+Johann Faouzi <johann.faouzi@gmail.com> johann.faouzi <johann.faouzi@icm-institute.org>
John Darbyshire <24256554+attack68@users.noreply.github.com> attack68 <24256554+attack68@users.noreply.github.com>
Joseph Fox-Rabinovitz <jfoxrabinovitz@gmail.com> Joseph Fox-Rabinovitz <joseph.r.fox-rabinovitz@nasa.gov>
Joseph Fox-Rabinovitz <jfoxrabinovitz@gmail.com> Joseph Fox-Rabinovitz <madphysicist@users.noreply.github.com>
Julian Taylor <juliantaylor108@gmail.com> Julian Taylor <juliantaylor108@googlemail.com>
Julien Lhermitte <jrmlhermitte@gmail.com> Julien Lhermitte <lhermitte@bnl.gov>
Julien Schueller <julien.schueller@gmail.com> jschueller <julien.schueller@gmail.com>
+Justus Magin <keewis@posteo.de> keewis <keewis@users.noreply.github.com>
Kai Striega <kaistriega@gmail.com> kai <kaistriega@gmail.com>
Kai Striega <kaistriega@gmail.com> kai-striega <kaistriega@gmail.com>
Kai Striega <kaistriega@gmail.com> kai-striega <kaistriega+github@gmail.com>
Lars Grüter <lagru@mailbox.org> Lars G <lagru@mailbox.org>
Luis Pedro Coelho <luis@luispedro.org> Luis Pedro Coelho <lpc@cmu.edu>
Luke Zoltan Kelley <lkelley@cfa.harvard.edu> lzkelley <lkelley@cfa.harvard.edu>
+Magdalena Proszewska <magdalena.proszewska@gmail.com> mpro <magdalena.proszewska@gmail.com>
+Magdalena Proszewska <magdalena.proszewska@gmail.com> mproszewska <38814059+mproszewska@users.noreply.github.com>
Manoj Kumar <manojkumarsivaraj334@gmail.com> MechCoder <manojkumarsivaraj334@gmail.com>
Mark DePristo <mdepristo@synapdx.com> markdepristo <mdepristo@synapdx.com>
Mark Weissman <mw9050@gmail.com> m-d-w <mw9050@gmail.com>
Mark Wiebe <mwwiebe@gmail.com> Mark Wiebe <mwiebe@enthought.com>
Mark Wiebe <mwwiebe@gmail.com> Mark Wiebe <mwiebe@georg.(none)>
Martin Goodson <martingoodson@gmail.com> martingoodson <martingoodson@gmail.com>
+Martin Reinecke <martin@mpa-garching.mpg.de> mreineck <martin@mpa-garching.mpg.de>
Martin Teichmann <martin.teichmann@xfel.eu> Martin Teichmann <lkb.teichmann@gmail.com>
Martino Sorbaro <martino.sorbaro@ed.ac.uk> martinosorb <martino.sorbaro@ed.ac.uk>
Mattheus Ueckermann <empeeu@yahoo.com> empeeu <empeeu@yahoo.com>
Yuji Kanagawa <yuji.kngw.80s.revive@gmail.com> kngwyu <yuji.kngw.80s.revive@gmail.com>
Yury Kirienko <yury.kirienko@gmail.com> kirienko <yury.kirienko@gmail.com>
Zixu Zhao <zixu.zhao.tireless@gmail.com> ZZhaoTireless <zixu.zhao.tireless@gmail.com>
-Ziyan Zhou<ziyan.zhou@mujin.co.jp> Ziyan <ziyan.zhou@mujin.co.jp>
+Ziyan Zhou <ziyan.zhou@mujin.co.jp> Ziyan <ziyan.zhou@mujin.co.jp>
+Zieji Pohz <poh.ziji@gmail.com> jpoh <poh.zijie@gmail.com>
+Zieji Pohz <poh.ziji@gmail.com> zjpoh <poh.zijie@gmail.com>
+Zieji Pohz <poh.ziji@gmail.com> Zijie (ZJ) Poh <8103276+zjpoh@users.noreply.github.com>
+Zolisa Bleki <zolisa.bleki@gmail.com> zoj613 <44142765+zoj613@users.noreply.github.com>
+Zolisa Bleki <zolisa.bleki@gmail.com> RedRuM <44142765+zoj613@users.noreply.github.com>
luzpaz <kunda@scribus.net> luz.paz <luzpaz@users.noreply.github.com>
luzpaz <kunda@scribus.net> luzpaz <luzpaz@users.noreply.github.com>
spacescientist <aspacescientist@protonmail.com> spacescientist <spacescientist@pm.me>
directories:
- $HOME/.cache/pip
+stage: Comprehensive tests
+
+stages:
+ # Do the style check and a single test job, don't proceed if it fails
+ - name: Initial tests
+ # Do the rest of the tests
+ - name: Comprehensive tests
+
env:
global:
- - OpenBLAS_version=0.3.5
+ - OpenBLAS_version=0.3.7
- WHEELHOUSE_UPLOADER_USERNAME=travis.numpy
# The following is generated with the command:
# travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY
iFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAM\
ahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU="
-python:
- - 3.5
- - 3.6
- - 3.8-dev
matrix:
include:
- - python: 3.7
- env: INSTALL_PICKLE5=1
+ # Do all python versions without environment variables set
+ - stage: Initial tests
+ python: 3.8
+
- python: 3.5
+ - python: 3.6
+ - python: 3.7
+
+ - python: 3.6
+ dist: bionic
env: USE_DEBUG=1
addons:
apt:
- python3-dbg
- python3-dev
- python3-setuptools
- - python: 3.6
+
+ - python: 3.7
env: USE_WHEEL=1 RUN_FULL_TESTS=1 RUN_COVERAGE=1 INSTALL_PICKLE5=1
- - python: 3.6
+
+ - python: 3.7
env: USE_SDIST=1
- - python: 3.6
+
+ - python: 3.7
env:
- PYTHONOPTIMIZE=2
- BLAS=None
- NPY_BLAS_ORDER=mkl,blis,openblas,atlas,accelerate,blas
- NPY_LAPACK_ORDER=MKL,OPENBLAS,ATLAS,ACCELERATE,LAPACK
- USE_ASV=1
- - python: 3.5
+
+ - python: 3.7
env: NPY_RELAXED_STRIDES_CHECKING=0
- - python: 3.6
+
+ - python: 3.7
env: USE_WHEEL=1 NPY_RELAXED_STRIDES_DEBUG=1
- - python: 3.6
+
+ - python: 3.7
env: NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=0
- - python: 3.6
+
+ - python: 3.7
env:
- BLAS=None
- LAPACK=None
- ATLAS=None
- - os: linux-ppc64le
- python: 3.6
+
+ - python: 3.7
+ os: linux
+ arch: ppc64le
env:
- # for matrix annotation only
- - PPC64_LE=1
- # use POWER8 OpenBLAS build, not system ATLAS
+ # use ppc64le OpenBLAS build, not system ATLAS
- ATLAS=None
+ - python: 3.7
+ os: linux
+ arch: s390x
+ env:
+ # use s390x OpenBLAS build, not system ATLAS
+ - ATLAS=None
+
+
before_install:
- ./tools/travis-before-install.sh
Prerequisites
=============
-Building NumPy requires the following software installed:
+Building NumPy requires the following installed software:
1) For Python 3, Python__ 3.5.x or newer.
2) Cython >= 0.29.2 (for development versions of numpy, not for released
versions)
+
3) pytest__ (optional) 1.15 or later
This is required for testing numpy, but not for using it.
pytest__ http://pytest.readthedocs.io
-.. note::
+.. note::
If you want to build NumPy in order to work on NumPy itself, use
``runtests.py``. For more details, see
building Scipy a Fortran compiler is needed though, so we include some details
on Fortran compilers in the rest of this section.
-On OS X and Linux, all common compilers will work. Note that C99 support is
-required. For compilers that don't support the C99 language standard by
-default (such as ``gcc`` versions < 5.0), it should be enabled. For ``gcc``::
-
- export CFLAGS='-std=c99'
+On OS X and Linux, all common compilers will work.
For Fortran, ``gfortran`` works, ``g77`` does not. In case ``g77`` is
installed then ``g77`` will be detected and used first. To explicitly select
Windows
-------
-On Windows, building from source can be difficult. Currently the most robust
+On Windows, building from source can be difficult. Currently, the most robust
option is to use the Intel compilers, or alternatively MSVC (the same version
as used to build Python itself) with Intel ifort. Intel itself maintains a
good `application note <https://software.intel.com/en-us/articles/numpyscipy-with-intel-mkl>`_
Windows
-------
-The Intel compilers work with Intel MKL, see the application note linked above.
+The Intel compilers work with Intel MKL, see the application note linked above.
MingwPy__ works with OpenBLAS.
-For an overview of the state of BLAS/LAPACK libraries on Windows, see
+For an overview of the state of BLAS/LAPACK libraries on Windows, see
`here <https://mingwpy.github.io/blas_lapack.html>`_.
OS X
Ubuntu/Debian
-------------
-For best performance a development package providing BLAS and CBLAS should be
+For best performance, a development package providing BLAS and CBLAS should be
installed. Some of the options available are:
- ``libblas-dev``: reference BLAS (not very optimized)
# Avoid using MANIFEST.in for that.
#
include MANIFEST.in
+include pyproject.toml
include pytest.ini
include *.txt
include README.md
include site.cfg.example
+include runtests.py
+include tox.ini
+include .coveragerc
+include test_requirements.txt
recursive-include numpy/random *.pyx *.pxd *.pyx.in *.pxd.in
+include numpy/random/include/*
+include numpy/__init__.pxd
# Add build support that should go in sdist, but not go in bdist/be installed
# Note that sub-directories that don't have __init__ are apparently not
# included by 'recursive-include', so list those separately
recursive-include numpy *
recursive-include numpy/_build_utils *
recursive-include numpy/linalg/lapack_lite *
-include runtests.py
-include tox.ini pytest.ini .coveragerc
recursive-include tools *
# Add sdist files whose use depends on local configuration.
include numpy/core/src/common/cblasfuncs.c
[](
https://travis-ci.org/numpy/numpy)
-[](
- https://ci.appveyor.com/project/charris/numpy)
[](
https://dev.azure.com/numpy/numpy/_build/latest?definitionId=5)
[](
NumPy is the fundamental package needed for scientific computing with Python.
- **Website:** https://www.numpy.org
-- **Documentation:** http://docs.scipy.org/
+- **Documentation:** https://docs.scipy.org/
- **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion
- **Source code:** https://github.com/numpy/numpy
- **Contributing:** https://www.numpy.org/devdocs/dev/index.html
python -c 'import numpy; numpy.test()'
+
+Call for Contributions
+----------------------
+
+NumPy appreciates help from a wide range of different backgrounds.
+Work such as high level documentation or website improvements are valuable
+and we would like to grow our team with people filling these roles.
+Small improvements or fixes are always appreciated and issues labeled as easy
+may be a good starting point.
+If you are considering larger contributions outside the traditional coding work,
+please contact us through the mailing list.
+
+
[](https://numfocus.org)
variables:
# OpenBLAS_version should be updated
# to match numpy-wheels repo
- OpenBLAS_version: 0.3.7.dev
+ OpenBLAS_version: 0.3.7
-jobs:
-- job: Linux_Python_36_32bit_full_with_asserts
- pool:
- vmImage: 'ubuntu-16.04'
- steps:
- - script: |
- docker pull i386/ubuntu:bionic
- docker run -v $(pwd):/numpy i386/ubuntu:bionic /bin/bash -c "cd numpy && \
- apt-get -y update && \
- apt-get -y install python3.6-dev python3-pip locales python3-certifi && \
- locale-gen fr_FR && update-locale && \
- pip3 install setuptools nose cython==0.29.0 pytest pytz pickle5 && \
- apt-get -y install gfortran-5 wget && \
- target=\$(python3 tools/openblas_support.py) && \
- cp -r \$target/usr/local/lib/* /usr/lib && \
- cp \$target/usr/local/include/* /usr/include && \
- python3 -m pip install . && \
- F77=gfortran-5 F90=gfortran-5 \
- CFLAGS='-UNDEBUG -std=c99' python3 runtests.py -n --mode=full -- -rsx --junitxml=junit/test-results.xml && \
- python3 tools/openblas_support.py --check_version $(OpenBLAS_version)"
- displayName: 'Run 32-bit Ubuntu Docker Build / Tests'
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- testResultsFiles: '**/test-*.xml'
- failTaskOnFailedTests: true
- testRunTitle: 'Publish test results for Python 3.6-32 bit full Linux'
-- job: macOS
- pool:
- # NOTE: at time of writing, there is a danger
- # that using an invalid vmIMage string for macOS
- # image silently redirects to a Windows build on Azure;
- # for now, use the only image name officially present in
- # the docs even though i.e., numba uses another in their
- # azure config for mac os -- Microsoft has indicated
- # they will patch this issue
- vmImage: macOS-10.13
- steps:
- # the @0 refers to the (major) version of the *task* on Microsoft's
- # end, not the order in the build matrix nor anything to do
- # with version of Python selected
- - task: UsePythonVersion@0
- inputs:
- versionSpec: '3.6'
- addToPath: true
- architecture: 'x64'
- # NOTE: do we have a compelling reason to use older / newer
- # versions of Xcode toolchain for testing?
- - script: /bin/bash -c "sudo xcode-select -s /Applications/Xcode_10.app/Contents/Developer"
- displayName: 'select Xcode version'
- # NOTE: might be better if we could avoid installing
- # two C compilers, but with homebrew looks like we're
- # now stuck getting the full gcc toolchain instead of
- # just pulling in gfortran
- - script: |
- # same version of gfortran as the wheel builds
- brew install gcc49
- # manually link critical gfortran libraries
- ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libgfortran.3.dylib /usr/local/lib/libgfortran.3.dylib
- ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libquadmath.0.dylib /usr/local/lib/libquadmath.0.dylib
- # manually symlink gfortran-4.9 to plain gfortran
- # for f2py
- ln -s /usr/local/bin/gfortran-4.9 /usr/local/bin/gfortran
- displayName: 'make gfortran available on mac os vm'
- # use the pre-built openblas binary that most closely
- # matches our MacOS wheel builds -- currently based
- # primarily on file size / name details
- - script: |
- target=$(python tools/openblas_support.py)
- # manually link to appropriate system paths
- cp $target/usr/local/lib/* /usr/local/lib/
- cp $target/usr/local/include/* /usr/local/include/
- displayName: 'install pre-built openblas'
- - script: python -m pip install --upgrade pip setuptools wheel
- displayName: 'Install tools'
- - script: python -m pip install cython nose pytz pytest pickle5 vulture docutils sphinx==1.8.5 numpydoc
- displayName: 'Install dependencies; some are optional to avoid test skips'
- - script: /bin/bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/ | grep 'unreachable'"
- displayName: 'Check for unreachable code paths in Python modules'
- # prefer usage of clang over gcc proper
- # to match likely scenario on many user mac machines
- - script: python setup.py build -j 4 install
- displayName: 'Build NumPy'
- env:
- BLAS: None
- LAPACK: None
- ATLAS: None
- ACCELERATE: None
- CC: /usr/bin/clang
- # wait until after dev build of NumPy to pip
- # install matplotlib to avoid pip install of older numpy
- - script: python -m pip install matplotlib
- displayName: 'Install matplotlib before refguide run'
- - script: python runtests.py -g --refguide-check
- displayName: 'Run Refuide Check'
- - script: python runtests.py -n --mode=full -- -rsx --junitxml=junit/test-results.xml
- displayName: 'Run Full NumPy Test Suite'
- - bash: python tools/openblas_support.py --check_version $(OpenBLAS_version)
- displayName: 'Verify OpenBLAS version'
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- testResultsFiles: '**/test-*.xml'
- failTaskOnFailedTests: true
- testRunTitle: 'Publish test results for Python 3.6 64-bit full Mac OS'
-- job: Windows
- pool:
- vmImage: 'VS2017-Win2016'
- strategy:
- maxParallel: 6
- matrix:
- Python36-32bit-fast:
- PYTHON_VERSION: '3.6'
- PYTHON_ARCH: 'x86'
- TEST_MODE: fast
- BITS: 32
- Python37-32bit-fast:
- PYTHON_VERSION: '3.7'
- PYTHON_ARCH: 'x86'
- TEST_MODE: fast
- BITS: 32
- Python35-64bit-full:
- PYTHON_VERSION: '3.5'
- PYTHON_ARCH: 'x64'
- TEST_MODE: full
- BITS: 64
- Python36-64bit-full:
- PYTHON_VERSION: '3.6'
- PYTHON_ARCH: 'x64'
- TEST_MODE: full
- INSTALL_PICKLE5: 1
- BITS: 64
- Python37-64bit-full:
- PYTHON_VERSION: '3.7'
- PYTHON_ARCH: 'x64'
- TEST_MODE: full
- INSTALL_PICKLE5: 1
- BITS: 64
- steps:
- - task: UsePythonVersion@0
- inputs:
- versionSpec: $(PYTHON_VERSION)
- addToPath: true
- architecture: $(PYTHON_ARCH)
- - script: python -m pip install --upgrade pip setuptools wheel
- displayName: 'Install tools'
- - script: python -m pip install cython nose pytz pytest
- displayName: 'Install dependencies; some are optional to avoid test skips'
- - script: if [%INSTALL_PICKLE5%]==[1] python -m pip install pickle5
- displayName: 'Install optional pickle5 backport (only for python3.6 and 3.7)'
+stages:
+- stage: InitialTests
+ jobs:
+ - job: WindowsFast
+ pool:
+ vmImage: 'VS2017-Win2016'
+ strategy:
+ matrix:
+ Python36-64bit-fast:
+ PYTHON_VERSION: '3.6'
+ PYTHON_ARCH: 'x64'
+ TEST_MODE: fast
+ BITS: 64
+ steps:
+ - template: azure-steps-windows.yml
- - powershell: |
- $pyversion = python -c "from __future__ import print_function; import sys; print(sys.version.split()[0])"
- Write-Host "Python Version: $pyversion"
- $target = "C:\\hostedtoolcache\\windows\\Python\\$pyversion\\$(PYTHON_ARCH)\\lib\\openblas.a"
- Write-Host "target path: $target"
- $openblas = python tools/openblas_support.py
- cp $openblas $target
- displayName: 'Download / Install OpenBLAS'
+- stage: ComprehensiveTests
+ jobs:
+ - job: Linux_Python_36_32bit_full_with_asserts
+ pool:
+ vmImage: 'ubuntu-16.04'
+ steps:
+ - script: |
+ docker pull i386/ubuntu:bionic
+ docker run -v $(pwd):/numpy i386/ubuntu:bionic /bin/bash -c "cd numpy && \
+ apt-get -y update && \
+ apt-get -y install python3.6-dev python3-pip locales python3-certifi && \
+ locale-gen fr_FR && update-locale && \
+ apt-get -y install gfortran-5 wget && \
+ target=\$(python3 tools/openblas_support.py) && \
+ cp -r \$target/usr/local/lib/* /usr/lib && \
+ cp \$target/usr/local/include/* /usr/include && \
+ python3 -m pip install --user --upgrade pip setuptools && \
+ python3 -m pip install --user -r test_requirements.txt && \
+ python3 -m pip install . && \
+ F77=gfortran-5 F90=gfortran-5 \
+ CFLAGS=-UNDEBUG python3 runtests.py -n --debug-info --mode=full -- -rsx --junitxml=junit/test-results.xml && \
+ python3 tools/openblas_support.py --check_version $(OpenBLAS_version)"
+ displayName: 'Run 32-bit Ubuntu Docker Build / Tests'
+ - task: PublishTestResults@2
+ condition: succeededOrFailed()
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ failTaskOnFailedTests: true
+ testRunTitle: 'Publish test results for Python 3.6-32 bit full Linux'
+ - job: macOS
+ pool:
+ # NOTE: at time of writing, there is a danger
+ # that using an invalid vmIMage string for macOS
+ # image silently redirects to a Windows build on Azure;
+ # for now, use the only image name officially present in
+ # the docs even though i.e., numba uses another in their
+ # azure config for mac os -- Microsoft has indicated
+ # they will patch this issue
+ vmImage: macOS-10.14
+ steps:
+ # the @0 refers to the (major) version of the *task* on Microsoft's
+ # end, not the order in the build matrix nor anything to do
+ # with version of Python selected
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '3.6'
+ addToPath: true
+ architecture: 'x64'
+ # NOTE: do we have a compelling reason to use older / newer
+ # versions of Xcode toolchain for testing?
+ - script: /bin/bash -c "sudo xcode-select -s /Applications/Xcode_10.app/Contents/Developer"
+ displayName: 'select Xcode version'
+ # NOTE: might be better if we could avoid installing
+ # two C compilers, but with homebrew looks like we're
+ # now stuck getting the full gcc toolchain instead of
+ # just pulling in gfortran
+ - script: |
+ # same version of gfortran as the wheel builds
+ brew install gcc49
+ # manually link critical gfortran libraries
+ ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libgfortran.3.dylib /usr/local/lib/libgfortran.3.dylib
+ ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libquadmath.0.dylib /usr/local/lib/libquadmath.0.dylib
+ # manually symlink gfortran-4.9 to plain gfortran
+ # for f2py
+ ln -s /usr/local/bin/gfortran-4.9 /usr/local/bin/gfortran
+ displayName: 'make gfortran available on mac os vm'
+ # use the pre-built openblas binary that most closely
+ # matches our MacOS wheel builds -- currently based
+ # primarily on file size / name details
+ - script: |
+ target=$(python tools/openblas_support.py)
+ # manually link to appropriate system paths
+ cp $target/usr/local/lib/* /usr/local/lib/
+ cp $target/usr/local/include/* /usr/local/include/
+ displayName: 'install pre-built openblas'
+ - script: python -m pip install --upgrade pip setuptools wheel
+ displayName: 'Install tools'
+ - script: |
+ python -m pip install -r test_requirements.txt
+ python -m pip install vulture docutils sphinx==2.2.0 numpydoc
+ displayName: 'Install dependencies; some are optional to avoid test skips'
+ - script: /bin/bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/ | grep 'unreachable'"
+ displayName: 'Check for unreachable code paths in Python modules'
+ # prefer usage of clang over gcc proper
+ # to match likely scenario on many user mac machines
+ - script: python setup.py build -j 4 build_src --verbose-cfg install
+ displayName: 'Build NumPy'
+ env:
+ BLAS: None
+ LAPACK: None
+ ATLAS: None
+ ACCELERATE: None
+ CC: /usr/bin/clang
+ # wait until after dev build of NumPy to pip
+ # install matplotlib to avoid pip install of older numpy
+ - script: python -m pip install matplotlib
+ displayName: 'Install matplotlib before refguide run'
+ - script: python runtests.py -g --refguide-check
+ displayName: 'Run Refuide Check'
+ - script: python runtests.py -n --mode=full -- -rsx --junitxml=junit/test-results.xml
+ displayName: 'Run Full NumPy Test Suite'
+ - bash: python tools/openblas_support.py --check_version $(OpenBLAS_version)
+ displayName: 'Verify OpenBLAS version'
+ - task: PublishTestResults@2
+ condition: succeededOrFailed()
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ failTaskOnFailedTests: true
+ testRunTitle: 'Publish test results for Python 3.6 64-bit full Mac OS'
+ - job: Windows
+ pool:
+ vmImage: 'VS2017-Win2016'
+ strategy:
+ maxParallel: 6
+ matrix:
+ Python36-32bit-fast:
+ PYTHON_VERSION: '3.6'
+ PYTHON_ARCH: 'x86'
+ TEST_MODE: fast
+ BITS: 32
+ Python37-32bit-fast:
+ PYTHON_VERSION: '3.7'
+ PYTHON_ARCH: 'x86'
+ TEST_MODE: fast
+ BITS: 32
+ Python38-32bit-fast:
+ PYTHON_VERSION: '3.8'
+ PYTHON_ARCH: 'x86'
+ TEST_MODE: fast
+ BITS: 32
+ Python35-64bit-full:
+ PYTHON_VERSION: '3.5'
+ PYTHON_ARCH: 'x64'
+ TEST_MODE: full
+ BITS: 64
+ Python36-64bit-full:
+ PYTHON_VERSION: '3.6'
+ PYTHON_ARCH: 'x64'
+ TEST_MODE: full
+ BITS: 64
+ Python37-64bit-full:
+ PYTHON_VERSION: '3.7'
+ PYTHON_ARCH: 'x64'
+ TEST_MODE: full
+ BITS: 64
+ Python38-64bit-full:
+ PYTHON_VERSION: '3.8'
+ PYTHON_ARCH: 'x64'
+ TEST_MODE: full
+ BITS: 64
+ steps:
+ - template: azure-steps-windows.yml
+ - job: Linux_PyPy3
+ pool:
+ vmIMage: 'ubuntu-16.04'
+ steps:
+ - script: source tools/pypy-test.sh
+ displayName: 'Run PyPy3 Build / Tests'
+ - task: PublishTestResults@2
+ condition: succeededOrFailed()
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ testRunTitle: 'Publish test results for PyPy3'
+ failTaskOnFailedTests: true
+ - job: Linux_gcc48
+ pool:
+ vmImage: 'ubuntu-18.04'
+ steps:
+ - script: |
+ if ! `gcc-4.8 2>/dev/null`; then
+ sudo apt install gcc-4.8
+ fi
+ displayName: 'add gcc 4.8'
+ - script: |
+ python3 -m pip install --user --upgrade pip setuptools
+ python3 -m pip install --user -r test_requirements.txt
+ CPPFLAGS='' CC=gcc-4.8 F77=gfortran-5 F90=gfortran-5 \
+ python3 runtests.py --debug-info --mode=full -- -rsx --junitxml=junit/test-results.xml
+ displayName: 'Run gcc4.8 Build / Tests'
+ - task: PublishTestResults@2
+ condition: succeededOrFailed()
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ failTaskOnFailedTests: true
+ testRunTitle: 'Publish test results for gcc 4.8'
- - powershell: |
- choco install -y mingw --forcex86 --force --version=5.3.0
- displayName: 'Install 32-bit mingw for 32-bit builds'
- condition: eq(variables['BITS'], 32)
- # NOTE: for Windows builds it seems much more tractable to use runtests.py
- # vs. manual setup.py and then runtests.py for testing only
- - powershell: |
- If ($(BITS) -eq 32) {
- $env:NPY_DISTUTILS_APPEND_FLAGS = 1
- $env:CFLAGS = "-m32"
- $env:LDFLAGS = "-m32"
- $env:PATH = "C:\\tools\\mingw32\\bin;" + $env:PATH
- refreshenv
- }
- python -c "from tools import openblas_support; openblas_support.make_init('numpy')"
- pip wheel -v -v -v --wheel-dir=dist .
-
- ls dist -r | Foreach-Object {
- pip install $_.FullName
- }
- displayName: 'Build NumPy'
- - bash: |
- pushd . && cd .. && target=$(python -c "import numpy, os; print(os.path.abspath(os.path.join(os.path.dirname(numpy.__file__), '.libs')))") && popd
- pip download -d destination --only-binary --no-deps numpy==1.14
- cd destination && unzip numpy*.whl && cp numpy/.libs/*.dll $target
- ls $target
- displayName: 'Add extraneous & older DLL to numpy/.libs to probe DLL handling robustness'
- condition: eq(variables['PYTHON_VERSION'], '3.6')
- - script: pushd . && cd .. && python -c "from ctypes import windll; windll.kernel32.SetDefaultDllDirectories(0x00000800); import numpy" && popd
- displayName: 'For gh-12667; Windows DLL resolution'
- - script: python runtests.py -n --show-build-log --mode=$(TEST_MODE) -- -rsx --junitxml=junit/test-results.xml
- displayName: 'Run NumPy Test Suite'
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- testResultsFiles: '**/test-*.xml'
- failTaskOnFailedTests: true
- testRunTitle: 'Publish test results for Python $(PYTHON_VERSION) $(BITS)-bit $(TEST_MODE) Windows'
-
-- job: Linux_PyPy3
- pool:
- vmIMage: 'ubuntu-16.04'
- steps:
- - script: source tools/pypy-test.sh
- displayName: 'Run PyPy3 Build / Tests'
- - task: PublishTestResults@2
- condition: succeededOrFailed()
- inputs:
- testResultsFiles: '**/test-*.xml'
- testRunTitle: 'Publish test results for PyPy3'
- failTaskOnFailedTests: true
--- /dev/null
+steps:
+- task: UsePythonVersion@0
+ inputs:
+ versionSpec: $(PYTHON_VERSION)
+ addToPath: true
+ architecture: $(PYTHON_ARCH)
+- script: python -m pip install --upgrade pip setuptools wheel
+ displayName: 'Install tools'
+- script: python -m pip install -r test_requirements.txt
+ displayName: 'Install dependencies; some are optional to avoid test skips'
+- powershell: |
+ $pyversion = python -c "from __future__ import print_function; import sys; print(sys.version.split()[0])"
+ Write-Host "Python Version: $pyversion"
+ $target = "C:\\hostedtoolcache\\windows\\Python\\$pyversion\\$(PYTHON_ARCH)\\lib\\openblas.a"
+ Write-Host "target path: $target"
+ $openblas = python tools/openblas_support.py
+ cp $openblas $target
+ displayName: 'Download / Install OpenBLAS'
+
+- powershell: |
+ choco install -y mingw --forcex86 --force --version=5.3.0
+ displayName: 'Install 32-bit mingw for 32-bit builds'
+ condition: eq(variables['BITS'], 32)
+# NOTE: for Windows builds it seems much more tractable to use runtests.py
+# vs. manual setup.py and then runtests.py for testing only
+- powershell: |
+ If ($(BITS) -eq 32) {
+ $env:CFLAGS = "-m32"
+ $env:LDFLAGS = "-m32"
+ $env:PATH = "C:\\tools\\mingw32\\bin;" + $env:PATH
+ refreshenv
+ }
+ python -c "from tools import openblas_support; openblas_support.make_init('numpy')"
+ pip wheel -v -v -v --wheel-dir=dist .
+
+ ls dist -r | Foreach-Object {
+ pip install $_.FullName
+ }
+ displayName: 'Build NumPy'
+- bash: |
+ pushd . && cd .. && target=$(python -c "import numpy, os; print(os.path.abspath(os.path.join(os.path.dirname(numpy.__file__), '.libs')))") && popd
+ pip download -d destination --only-binary --no-deps numpy==1.14
+ cd destination && unzip numpy*.whl && cp numpy/.libs/*.dll $target
+ ls $target
+ displayName: 'Add extraneous & older DLL to numpy/.libs to probe DLL handling robustness'
+ condition: eq(variables['PYTHON_VERSION'], '3.6')
+- script: pushd . && cd .. && python -c "from ctypes import windll; windll.kernel32.SetDefaultDllDirectories(0x00000800); import numpy" && popd
+ displayName: 'For gh-12667; Windows DLL resolution'
+- script: python runtests.py -n --show-build-log --mode=$(TEST_MODE) -- -rsx --junitxml=junit/test-results.xml
+ displayName: 'Run NumPy Test Suite'
+- task: PublishTestResults@2
+ condition: succeededOrFailed()
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ failTaskOnFailedTests: true
+ testRunTitle: 'Publish test results for Python $(PYTHON_VERSION) $(BITS)-bit $(TEST_MODE) Windows'
\ No newline at end of file
// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
- "pythons": ["3.6"],
+ "pythons": ["3.7"],
// The matrix of dependencies to test. Each key is the name of a
// package (in PyPI) and the values are version numbers. An empty
// version.
"matrix": {
"six": [],
+ "Cython": [],
},
// The directory (relative to the current directory) that benchmarks are
// `asv` will cache wheels of the recent builds in each
// environment, making them faster to install next time. This is
// number of builds to keep, per environment.
- "wheel_cache_size": 2,
+ "build_cache_size": 8,
// The commits after which the regression search in `asv publish`
// should start looking for regressions. Dictionary whose keys are
--- /dev/null
+from __future__ import absolute_import, division, print_function
+
+from .common import Benchmark
+
+import numpy as np
+
+avx_ufuncs = ['sqrt',
+ 'absolute',
+ 'reciprocal',
+ 'square',
+ 'rint',
+ 'floor',
+ 'ceil' ,
+ 'trunc']
+stride = [1, 2, 4]
+dtype = ['f', 'd']
+
+class AVX_UFunc(Benchmark):
+ params = [avx_ufuncs, stride, dtype]
+ param_names = ['avx_based_ufunc', 'stride', 'dtype']
+ timeout = 10
+
+ def setup(self, ufuncname, stride, dtype):
+ np.seterr(all='ignore')
+ try:
+ self.f = getattr(np, ufuncname)
+ except AttributeError:
+ raise NotImplementedError()
+ N = 10000
+ self.arr = np.ones(stride*N, dtype)
+
+ def time_ufunc(self, ufuncname, stride, dtype):
+ self.f(self.arr[::stride])
+
after processing all source generators, no extension module will
be built. This is the recommended way to conditionally define
extension modules. Source generator functions are called by the
- ``build_src`` command of ``numpy.distutils``.
+ ``build_src`` sub-command of ``numpy.distutils``.
For example, here is a typical source generator function::
Source tree
-----------
-* INSTALL.txt
+* INSTALL.rst.txt
* release.sh
* pavement.py
.. note:: The following steps are repeated for the beta(s), release
candidates(s) and the final release.
-Check that docs can be built
-----------------------------
-Do::
-
- cd doc/
- make dist
-
-to check that the documentation is in a buildable state. See
-doc/HOWTO_BUILD_DOCS.rst.txt for more details and for how to update
-https://docs.scipy.org.
-
Check deprecations
------------------
Before the release branch is made, it should be checked that all deprecated
Check the release notes
-----------------------
-Check that the release notes are up-to-date.
+Use `towncrier`_ to build the release note and
+commit the changes. This will remove all the fragments from
+``doc/release/upcoming_changes`` and add ``doc/release/<version>-note.rst``.
+Note that currently towncrier must be installed from its master branch as the
+last release (19.2.0) is outdated.
+
+ towncrier --version "<version>"
+ git commit -m"Create release note"
-Write or update the release notes in a file named for the release, such as
-``doc/release/1.11.0-notes.rst``.
+Check that the release notes are up-to-date.
-Mention at least the following:
+Update the release notes with a Highlights section. Mention some of the
+following:
- major new features
- deprecated and removed features
- for SciPy, supported NumPy version(s)
- outlook for the near future
-Also make sure that as soon as the branch is made, there is a new release
-notes file in the master branch for the next release.
+.. _towncrier: https://github.com/hawkowl/towncrier
Update the release status and create a release "tag"
----------------------------------------------------
And make sure the ``VERSION`` variable is set properly.
Now you can make the release commit and tag. We recommend you don't push
-the commit or tag immediately, just in case you need to do more cleanup. We
+the commit or tag immediately, just in case you need to do more cleanup. We
prefer to defer the push of the tag until we're confident this is the exact
form of the released code (see: :ref:`push-tag-and-commit`):
paver write_release_and_log
-The tar-files and binary releases for distribution should be uploaded to SourceForge,
-together with the Release Notes and the Changelog. Uploading can be done
-through a web interface or, more efficiently, through scp/sftp/rsync as
-described in the SourceForge
-`upload guide <https://sourceforge.net/apps/trac/sourceforge/wiki/Release%20files%20for%20download>`_ (dead link).
-For example::
+Build and archive documentation
+-------------------------------
+Do::
- scp <filename> <username>,numpy@frs.sourceforge.net:/home/frs/project/n/nu/numpy/NumPy/<releasedir>/
+ cd doc/
+ make dist
+
+to check that the documentation is in a buildable state. Then, after tagging,
+create an archive of the documentation in the numpy/doc repo::
+
+ # This checks out github.com/numpy/doc and adds (``git add``) the
+ # documentation to the checked out repo.
+ make merge-doc
+ # Now edit the ``index.html`` file in the repo to reflect the new content,
+ # and commit the changes
+ git -C dist/merge commit -a "Add documentation for <version>"
+ # Push to numpy/doc repo
+ git -C push
Update PyPI
-----------
where ``upstream`` points to the main https://github.com/numpy/numpy.git
repository.
-Update docs.scipy.org
----------------------
-
-All documentation for a release can be updated on https://docs.scipy.org/ with:
-
- make dist
- make upload USERNAME=<yourname> RELEASE=1.11.0
-
-Note that ``<username>`` must have SSH credentials on the server. If you don't
-have those, ask someone who does (the list currently includes @rgommers,
-@juliantaylor and @pv).
-
-Also rebuild and upload ``docs.scipy.org`` front page, if the release
-series is a new one. The front page sources have their own repo:
-https://github.com/scipy/docs.scipy.org. Do the following:
-
-- Update ``index.rst`` for the new version.
-- ``make dist``
-- Check that the built documentation is OK.
-- ``touch output-is-fine``
-- ``make upload USERNAME=<username> RELEASE=1.x.y``
-
Update scipy.org
----------------
SPHINXOPTS ?=
SPHINXBUILD ?= LANG=C sphinx-build
PAPER ?=
+# For merging a documentation archive into a git checkout of numpy/doc
+# Turn a tag like v1.18.0 into 1.18
+# Use sed -n -e 's/patttern/match/p' to return a blank value if no match
+TAG ?= $(shell git describe --tag | sed -n -e's,v\([1-9]\.[0-9]*\)\.[0-9].*,\1,p')
FILES=
$(SPHINXOPTS) source
.PHONY: help clean html web pickle htmlhelp latex changes linkcheck \
- dist dist-build gitwash-update version-check html-build latex-build
+ dist dist-build gitwash-update version-check html-build latex-build \
+ merge-doc
#------------------------------------------------------------------------------
@echo " dist PYVER=... to make a distribution-ready tree"
@echo " gitwash-update GITWASH=path/to/gitwash update gitwash developer docs"
@echo " upload USERNAME=... RELEASE=... to upload built docs to docs.scipy.org"
+ @echo " merge-doc TAG=... to clone numpy/doc and archive documentation into it"
clean:
-rm -rf build/*
endif
-dist:
+dist: build/dist.tar.gz
+
+build/dist.tar.gz:
make $(DIST_VARS) real-dist
real-dist: dist-build html-build html-scipyorg
install -d $(subst :, ,$(INSTALL_PPH))
$(PYTHON) `which easy_install` --prefix=$(INSTALL_DIR) ../dist/*.egg
-upload:
+upload: build/dist.tar.gz
# SSH must be correctly configured for this to work.
# Assumes that ``make dist`` was already run
# Example usage: ``make upload USERNAME=rgommers RELEASE=1.10.1``
ssh $(USERNAME)@docs.scipy.org rm $(UPLOAD_DIR)/dist.tar.gz
ssh $(USERNAME)@docs.scipy.org ln -snf numpy-$(RELEASE) /srv/docs_scipy_org/doc/numpy
+
+merge-doc: build/dist.tar.gz
+ifeq "$(TAG)" ""
+ echo tag "$(TAG)" not of the form 1.18;
+ exit 1;
+endif
+ @# Only clone if the directory does not exist
+ @if ! test -d build/merge; then \
+ git clone https://github.com/numpy/doc build/merge; \
+ fi;
+ @# Remove any old content and copy in the new, add it to git
+ -rm -rf build/merge/$(TAG)/*
+ -mkdir -p build/merge/$(TAG)
+ @# -C changes working directory
+ tar -C build/merge/$(TAG) -xf build/dist.tar.gz
+ git -C build/merge add $(TAG)
+ @# For now, the user must do this. If it is onerous, automate it and change
+ @# the instructions in doc/HOWTO_RELEASE.rst.txt
+ @echo " "
+ @echo New documentation archive added to ./build/merge.
+ @echo Now add/modify the appropiate section after
+ @echo " <!-- insert here -->"
+ @echo in build/merge/index.html,
+ @echo then \"git commit\", \"git push\"
+
+
#------------------------------------------------------------------------------
# Basic Sphinx generation rules for different formats
#------------------------------------------------------------------------------
PySequenceMethods in py3k are binary compatible with py2k, but some of the
slots have gone away. I suspect this means some functions need redefining so
-the semantics of the slots needs to be checked.
-
-PySequenceMethods foo_sequence_methods = {
- (lenfunc)0, /* sq_length */
- (binaryfunc)0, /* sq_concat */
- (ssizeargfunc)0, /* sq_repeat */
- (ssizeargfunc)0, /* sq_item */
- (void *)0, /* nee sq_slice */
- (ssizeobjargproc)0, /* sq_ass_item */
- (void *)0, /* nee sq_ass_slice */
- (objobjproc)0, /* sq_contains */
- (binaryfunc)0, /* sq_inplace_concat */
- (ssizeargfunc)0 /* sq_inplace_repeat */
-};
+the semantics of the slots needs to be checked::
+
+ PySequenceMethods foo_sequence_methods = {
+ (lenfunc)0, /* sq_length */
+ (binaryfunc)0, /* sq_concat */
+ (ssizeargfunc)0, /* sq_repeat */
+ (ssizeargfunc)0, /* sq_item */
+ (void *)0, /* nee sq_slice */
+ (ssizeobjargproc)0, /* sq_ass_item */
+ (void *)0, /* nee sq_ass_slice */
+ (objobjproc)0, /* sq_contains */
+ (binaryfunc)0, /* sq_inplace_concat */
+ (ssizeargfunc)0 /* sq_inplace_repeat */
+ };
PyMappingMethods
* multiarray/arrayobject.c
PyMappingMethods in py3k look to be the same as in py2k. The semantics
-of the slots needs to be checked.
+of the slots needs to be checked::
-PyMappingMethods foo_mapping_methods = {
- (lenfunc)0, /* mp_length */
- (binaryfunc)0, /* mp_subscript */
- (objobjargproc)0 /* mp_ass_subscript */
-};
+ PyMappingMethods foo_mapping_methods = {
+ (lenfunc)0, /* mp_length */
+ (binaryfunc)0, /* mp_subscript */
+ (objobjargproc)0 /* mp_ass_subscript */
+ };
PyFile
Finish the Release Note
-----------------------
+.. note:
+
+ This has changed now that we use ``towncrier``. See the instructions for
+ creating the release note in ``doc/release/upcoming_changes/README.rst``.
+
Fill out the release note ``doc/release/1.14.5-notes.rst`` calling out
significant changes.
$ git checkout maintenance/1.14.x
$ git pull upstream maintenance/1.14.x
$ git submodule update
- $ git clean -xdf > /dev/null
+ $ git clean -xdfq
Edit pavement.py and setup.py as detailed in HOWTO_RELEASE::
``release/installers`` directories and put the ``*.zip`` and ``*.tar.gz``
source releases in the latter. ::
- $ cython --version # check that you have the correct cython version
+ $ python3 -m cython --version # check for correct cython version
$ paver sdist # sdist will do a git clean -xdf, so we omit that
- Hit the ``{Publish,Update} release`` button at the bottom.
-Upload documents to docs.scipy.org
-----------------------------------
+Upload documents to numpy.org
+-----------------------------
This step is only needed for final releases and can be skipped for
-pre-releases. You will also need upload permission for the document server, if
-you do not have permission ping Pauli Virtanen or Ralf Gommers to generate and
-upload the documentation. Otherwise::
+pre-releases. ``make merge-doc`` clones the ``numpy/doc`` repo into
+``doc/build/merge`` and updates it with the new documentation::
$ pushd doc
$ make dist
- $ make upload USERNAME=<yourname> RELEASE=v1.14.5
+ $ make merge-doc
$ popd
-If the release series is a new one, you will need to rebuild and upload the
-``docs.scipy.org`` front page::
+If the release series is a new one, you will need to add a new section to the
+``doc/build/merge/index.html`` front page just after the "insert here" comment::
+
+ $ gvim doc/build/merge/index.html +/'insert here'
+
+Otherwise, only the ``zip`` and ``pdf`` links should be updated with the
+new tag name::
- $ cd ../docs.scipy.org
- $ gvim index.rst
+ $ gvim doc/build/merge/index.html +/'tag v1.14'
-Note: there is discussion about moving the docs to github. This section will be
-updated when/if that happens.
+You can "test run" the new documentation in a browser to make sure the links
+work::
+ $ firefox doc/build/merge/index.html
+
+Once everything seems satisfactory, commit and upload the changes::
+
+ $ pushd doc/build/merge
+ $ git commit -am"Add documentation for v1.14.5"
+ $ git push
+ $ popd
Announce the release on scipy.org
---------------------------------
+++ /dev/null
-
-Contributors
-============
-
-A total of 10 people contributed to this release.
-
-* CakeWithSteak
-* Charles Harris
-* Chris Burr
-* Eric Wieser
-* Fernando Saravia
-* Lars Grueter
-* Matti Picus
-* Maxwell Aladago
-* Qiming Sun
-* Warren Weckesser
-
-Pull requests merged
-====================
-
-A total of 14 pull requests were merged for this release.
-
-* `#14211 <https://github.com/numpy/numpy/pull/14211>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative...
-* `#14275 <https://github.com/numpy/numpy/pull/14275>`__: BUG: fixing to allow unpickling of PY3 pickles from PY2
-* `#14340 <https://github.com/numpy/numpy/pull/14340>`__: BUG: Fix misuse of .names and .fields in various places (backport...
-* `#14423 <https://github.com/numpy/numpy/pull/14423>`__: BUG: test, fix regression in converting to ctypes.
-* `#14434 <https://github.com/numpy/numpy/pull/14434>`__: BUG: Fixed maximum relative error reporting in assert_allclose
-* `#14509 <https://github.com/numpy/numpy/pull/14509>`__: BUG: Fix regression in boolean matmul.
-* `#14686 <https://github.com/numpy/numpy/pull/14686>`__: BUG: properly define PyArray_DescrCheck
-* `#14853 <https://github.com/numpy/numpy/pull/14853>`__: BLD: add 'apt update' to shippable
-* `#14854 <https://github.com/numpy/numpy/pull/14854>`__: BUG: Fix _ctypes class circular reference. (#13808)
-* `#14856 <https://github.com/numpy/numpy/pull/14856>`__: BUG: Fix `np.einsum` errors on Power9 Linux and z/Linux
-* `#14863 <https://github.com/numpy/numpy/pull/14863>`__: BLD: Prevent -flto from optimising long double representation...
-* `#14864 <https://github.com/numpy/numpy/pull/14864>`__: BUG: lib: Fix histogram problem with signed integer arrays.
-* `#15172 <https://github.com/numpy/numpy/pull/15172>`__: ENH: Backport improvements to testing functions.
-* `#15191 <https://github.com/numpy/numpy/pull/15191>`__: REL: Prepare for 1.16.6 release.
+++ /dev/null
-
-Contributors
-============
-
-A total of 6 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Charles Harris
-* Eric Wieser
-* Ilhan Polat
-* Matti Picus
-* Michael Hudson-Doyle
-* Ralf Gommers
-
-Pull requests merged
-====================
-
-A total of 7 pull requests were merged for this release.
-
-* `#14593 <https://github.com/numpy/numpy/pull/14593>`__: MAINT: backport Cython API cleanup to 1.17.x, remove docs
-* `#14937 <https://github.com/numpy/numpy/pull/14937>`__: BUG: fix integer size confusion in handling array's ndmin argument
-* `#14939 <https://github.com/numpy/numpy/pull/14939>`__: BUILD: remove SSE2 flag from numpy.random builds
-* `#14993 <https://github.com/numpy/numpy/pull/14993>`__: MAINT: Added Python3.8 branch to dll lib discovery
-* `#15038 <https://github.com/numpy/numpy/pull/15038>`__: BUG: Fix refcounting in ufunc object loops
-* `#15067 <https://github.com/numpy/numpy/pull/15067>`__: BUG: Exceptions tracebacks are dropped
-* `#15175 <https://github.com/numpy/numpy/pull/15175>`__: ENH: Backport improvements to testing functions.
--- /dev/null
+
+Contributors
+============
+
+A total of 114 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Abhinav Sagar
+* Alex Henrie +
+* Alexander Jung +
+* Allan Haldane
+* Andrea Pattori
+* Andrew Liu +
+* Anis Ladram +
+* Anne Bonner +
+* Antoine Dechaume +
+* Aryan Naraghi +
+* Bastian Eichenberger +
+* Brian Wignall +
+* Brigitta Sipocz
+* CakeWithSteak +
+* Charles Harris
+* Chris Barker
+* Chris Burr +
+* Chris Markiewicz +
+* Christoph Gohlke
+* Christopher Whelan
+* Colin Snyder
+* Dan Allan
+* Daniel Ching
+* David Stansby +
+* David Zwicker +
+* Dieter Werthmüller
+* Disconnect3d +
+* Dmytro +
+* Doug Davis +
+* Eric Larson
+* Eric Wieser
+* Esben Haabendal +
+* Eugene Prilepin +
+* Felix Divo +
+* Gary Gurlaskie
+* Gina +
+* Giuseppe Cuccu +
+* Grzegorz Bokota +
+* Guanqun Lu +
+* Guilherme Leobas +
+* Guillaume Horel
+* Géraud Le Falher +
+* Hameer Abbasi
+* Harmon
+* Hiroyuki V. Yamazaki
+* Huang, Guangtai +
+* Hugo +
+* Hyeonguk Ryu +
+* Ilhan Polat +
+* Isaac Virshup
+* Jack J. Woehr +
+* Jack Woehr +
+* Jackie Leng
+* Jaime Fernandez
+* Jeff Hale +
+* Johann Faouzi +
+* Jon Dufresne +
+* Joseph Fox-Rabinovitz
+* Joseph R. Fox-Rabinovitz +
+* João Marcos Gris +
+* Justus Magin +
+* Jérémie du Boisberranger
+* Kai Striega
+* Kevin Sheppard
+* Kexuan Sun
+* Kmol Yuan +
+* Kriti Singh
+* Larry Bradley +
+* Lars Grueter
+* Luis Pedro Coelho
+* MSeifert04
+* Magdalena Proszewska +
+* Manny +
+* Mark Harfouche
+* Martin Reinecke
+* Martin Thoma
+* Matt Haberland +
+* Matt McCormick +
+* Matthias Bussonnier
+* Matti Picus
+* Max Bolingbroke +
+* Maxwell Aladago +
+* Michael Hudson-Doyle +
+* Oleksandr Pavlyk
+* Omar Merghany +
+* Pauli Virtanen
+* Peter Andreas Entschev
+* Peter Bell
+* Peter Cock +
+* Pradeep Reddy Raamana +
+* Qiming Sun +
+* Raghuveer Devulapalli
+* Ralf Gommers
+* Samesh +
+* Samesh Lakhotia +
+* Sebastian Berg
+* Sergei Lebedev
+* Seth Troisi +
+* Siddhesh Poyarekar +
+* Simon +
+* Simon Notley +
+* Stefan van der Walt
+* Stephan Hoyer
+* Steve Stagg
+* Thomas A Caswell
+* Thomas Kluyver
+* Tim Hoffmann +
+* Tirth Patel +
+* Tyler Reddy
+* Vladimir Pershin +
+* Warren Weckesser
+* Yadong Zhang +
+* Zieji Pohz +
+* Zolisa Bleki +
+
+Pull requests merged
+====================
+
+A total of 406 pull requests were merged for this release.
+
+* `#9301 <https://github.com/numpy/numpy/pull/9301>`__: DOC: added note to docstring of numpy.savez
+* `#10151 <https://github.com/numpy/numpy/pull/10151>`__: BUG: Numpy scalar types sometimes have the same name
+* `#12129 <https://github.com/numpy/numpy/pull/12129>`__: DOC: Improve axes shift description and example in np.tensordot
+* `#12205 <https://github.com/numpy/numpy/pull/12205>`__: MAINT: avoid relying on `np.generic.__name__` in `np.dtype.name`
+* `#12284 <https://github.com/numpy/numpy/pull/12284>`__: ENH: supply our version of numpy.pxd, requires cython>=0.29
+* `#12633 <https://github.com/numpy/numpy/pull/12633>`__: BUG: General fixes to f2py reference counts (dereferencing)
+* `#12658 <https://github.com/numpy/numpy/pull/12658>`__: BUG: NaT now sorts to ends of arrays
+* `#12828 <https://github.com/numpy/numpy/pull/12828>`__: DOC: Updates to nditer usage instructions
+* `#13003 <https://github.com/numpy/numpy/pull/13003>`__: BUG: Do not crash on recursive `.dtype` attribute lookup.
+* `#13368 <https://github.com/numpy/numpy/pull/13368>`__: ENH: Use AVX for float32 implementation of np.sin & np.cos
+* `#13605 <https://github.com/numpy/numpy/pull/13605>`__: DEP: Deprecate silent ignoring of bad data in fromfile/fromstring
+* `#13610 <https://github.com/numpy/numpy/pull/13610>`__: ENH: Always produce a consistent shape in the result of `argwhere`
+* `#13673 <https://github.com/numpy/numpy/pull/13673>`__: DOC: array(obj, dtype=dt) can downcast
+* `#13698 <https://github.com/numpy/numpy/pull/13698>`__: DOC: Document ma.filled behavior with non-scalar fill_value
+* `#13710 <https://github.com/numpy/numpy/pull/13710>`__: DOC: Add note to irfft-like functions about the default sizes
+* `#13739 <https://github.com/numpy/numpy/pull/13739>`__: BUG: Don't produce undefined behavior for a << b if b >= bitsof(a)
+* `#13766 <https://github.com/numpy/numpy/pull/13766>`__: MAINT: Update NEP template.
+* `#13794 <https://github.com/numpy/numpy/pull/13794>`__: ENH: random: Add the multivariate hypergeometric distribution.
+* `#13799 <https://github.com/numpy/numpy/pull/13799>`__: DOC: Fix unrendered links
+* `#13812 <https://github.com/numpy/numpy/pull/13812>`__: MAINT: Rewrite Floyd algorithm
+* `#13825 <https://github.com/numpy/numpy/pull/13825>`__: DOC: Add missing macros to C-API documentation
+* `#13829 <https://github.com/numpy/numpy/pull/13829>`__: ENH: Add axis argument to random.permutation and random.shuffle
+* `#13847 <https://github.com/numpy/numpy/pull/13847>`__: DOC: Adds documentation of functions exposed in numpy namespace
+* `#13860 <https://github.com/numpy/numpy/pull/13860>`__: BUG: Refcount fixes
+* `#13871 <https://github.com/numpy/numpy/pull/13871>`__: MAINT: Ensure array_dealloc does not modify refcount of self
+* `#13874 <https://github.com/numpy/numpy/pull/13874>`__: MAINT: Prepare master for 1.18.0 development.
+* `#13876 <https://github.com/numpy/numpy/pull/13876>`__: MAINT,BUG,DOC: Fix errors in _add_newdocs
+* `#13880 <https://github.com/numpy/numpy/pull/13880>`__: MAINT: Remove an unnessary backslash between two string literals
+* `#13881 <https://github.com/numpy/numpy/pull/13881>`__: MAINT: Update pavement to use python3 in shell commands.
+* `#13882 <https://github.com/numpy/numpy/pull/13882>`__: MAINT: Remove unnecessary backslashes (and replace others by...
+* `#13883 <https://github.com/numpy/numpy/pull/13883>`__: MAINT: Replace integers in places where booleans are expected
+* `#13884 <https://github.com/numpy/numpy/pull/13884>`__: DOC: Add missing parameter description for keepdims in MaskedArray
+* `#13885 <https://github.com/numpy/numpy/pull/13885>`__: ENH: use AVX for float32 and float64 implementation of sqrt,...
+* `#13886 <https://github.com/numpy/numpy/pull/13886>`__: DOC: reformat top-level release index
+* `#13892 <https://github.com/numpy/numpy/pull/13892>`__: DOC : Refactor Array API documentation -- Array Structure and...
+* `#13895 <https://github.com/numpy/numpy/pull/13895>`__: DOC: Fix typo in "make_mask" documentation
+* `#13896 <https://github.com/numpy/numpy/pull/13896>`__: MAINT: Delete unused _aliased_types.py
+* `#13901 <https://github.com/numpy/numpy/pull/13901>`__: BLD: Remove Trusty dist in Travis CI build
+* `#13907 <https://github.com/numpy/numpy/pull/13907>`__: BUG: Handle weird bytestrings in dtype()
+* `#13908 <https://github.com/numpy/numpy/pull/13908>`__: ENH: use towncrier to build the release note
+* `#13913 <https://github.com/numpy/numpy/pull/13913>`__: ENH: improve error message for ragged-array creation failure
+* `#13914 <https://github.com/numpy/numpy/pull/13914>`__: DOC: Update the description of byteswap
+* `#13916 <https://github.com/numpy/numpy/pull/13916>`__: BUG: i0 Bessel function regression on array-likes supporting...
+* `#13920 <https://github.com/numpy/numpy/pull/13920>`__: ENH, BUILD: refactor all OpenBLAS downloads into a single, testable...
+* `#13922 <https://github.com/numpy/numpy/pull/13922>`__: MAINT: Remove unnecessary parenthesis in numpy.ma.core
+* `#13925 <https://github.com/numpy/numpy/pull/13925>`__: MAINT: Fix wrong spelling of ufunc
+* `#13926 <https://github.com/numpy/numpy/pull/13926>`__: DOC: Remove explicit .next method calls with built-in next function...
+* `#13928 <https://github.com/numpy/numpy/pull/13928>`__: DOC: Don't override MaskedArray.view documentation with the one...
+* `#13930 <https://github.com/numpy/numpy/pull/13930>`__: BUG: Fix incorrect GIL release in array.nonzero
+* `#13935 <https://github.com/numpy/numpy/pull/13935>`__: MAINT: Warn if `_add_newdocs.py` is used to add docstrings to...
+* `#13943 <https://github.com/numpy/numpy/pull/13943>`__: MAINT: Revert #13876, "MAINT,BUG,DOC: Fix errors in _add_newdocs"
+* `#13944 <https://github.com/numpy/numpy/pull/13944>`__: MAINT,BUG,DOC: Fix errors in _add_newdocs
+* `#13945 <https://github.com/numpy/numpy/pull/13945>`__: DOC, MAINT: emphasize random API changes, remove Generator.randint
+* `#13946 <https://github.com/numpy/numpy/pull/13946>`__: DOC: Add a numpy-doc docstring to add_newdoc
+* `#13947 <https://github.com/numpy/numpy/pull/13947>`__: DOC: Fix rst rendering in data types
+* `#13948 <https://github.com/numpy/numpy/pull/13948>`__: DOC:Update the description of set_printoptions in quickstart...
+* `#13950 <https://github.com/numpy/numpy/pull/13950>`__: Fixing failure on Python 2.7 on Windows 7
+* `#13952 <https://github.com/numpy/numpy/pull/13952>`__: Fix a typo related to the range of indices
+* `#13959 <https://github.com/numpy/numpy/pull/13959>`__: DOC: add space between words across lines
+* `#13964 <https://github.com/numpy/numpy/pull/13964>`__: BUG, DOC: add new recfunctions to `__all__`
+* `#13967 <https://github.com/numpy/numpy/pull/13967>`__: DOC: Change (old) range() to np.arange()
+* `#13968 <https://github.com/numpy/numpy/pull/13968>`__: DOC: improve np.sort docstring
+* `#13970 <https://github.com/numpy/numpy/pull/13970>`__: DOC: spellcheck numpy/doc/broadcasting.py
+* `#13976 <https://github.com/numpy/numpy/pull/13976>`__: MAINT, TST: remove test-installed-numpy.py
+* `#13979 <https://github.com/numpy/numpy/pull/13979>`__: DOC: Document array_function at a higher level.
+* `#13985 <https://github.com/numpy/numpy/pull/13985>`__: DOC: show workaround for backward compatibility
+* `#13988 <https://github.com/numpy/numpy/pull/13988>`__: DOC: Add a call for contribution paragraph to the readme
+* `#13989 <https://github.com/numpy/numpy/pull/13989>`__: BUG: Missing warnings import in polyutils
+* `#13990 <https://github.com/numpy/numpy/pull/13990>`__: BUILD: adapt "make version-check" to "make dist"
+* `#13991 <https://github.com/numpy/numpy/pull/13991>`__: DOC: emphasize need for matching numpy, git versions
+* `#14002 <https://github.com/numpy/numpy/pull/14002>`__: TST, MAINT, BUG: expand OpenBLAS version checking
+* `#14004 <https://github.com/numpy/numpy/pull/14004>`__: ENH: Chain exception for typed item assignment
+* `#14005 <https://github.com/numpy/numpy/pull/14005>`__: MAINT: Fix spelling error in npy_tempita kwarg
+* `#14010 <https://github.com/numpy/numpy/pull/14010>`__: DOC: Array API : Directory restructure and code cleanup
+* `#14011 <https://github.com/numpy/numpy/pull/14011>`__: [DOC] Remove unused/deprecated functions
+* `#14022 <https://github.com/numpy/numpy/pull/14022>`__: Update system_info.py
+* `#14025 <https://github.com/numpy/numpy/pull/14025>`__: DOC:Link between the two indexing documentation pages
+* `#14026 <https://github.com/numpy/numpy/pull/14026>`__: DOC: Update NumFOCUS subcommittee replacing Nathaniel with Sebastian
+* `#14027 <https://github.com/numpy/numpy/pull/14027>`__: DOC: update "Contributing to NumPy" with more activities/roles
+* `#14028 <https://github.com/numpy/numpy/pull/14028>`__: DOC: Improve quickstart documentation of new random Generator
+* `#14030 <https://github.com/numpy/numpy/pull/14030>`__: DEP: Speed up WarnOnWrite deprecation in buffer interface
+* `#14032 <https://github.com/numpy/numpy/pull/14032>`__: NEP: numpy.org website redesign
+* `#14035 <https://github.com/numpy/numpy/pull/14035>`__: DOC: Fix docstring of numpy.allclose regarding NaNs
+* `#14036 <https://github.com/numpy/numpy/pull/14036>`__: DEP: Raise warnings for deprecated functions PyArray_As1D, PyArray_As2D
+* `#14039 <https://github.com/numpy/numpy/pull/14039>`__: DEP: Remove np.rank which has been deprecated for more than 5...
+* `#14048 <https://github.com/numpy/numpy/pull/14048>`__: BUG, TEST: Adding validation test suite to validate float32 exp
+* `#14051 <https://github.com/numpy/numpy/pull/14051>`__: ENH,DEP: Allow multiple axes in expand_dims
+* `#14053 <https://github.com/numpy/numpy/pull/14053>`__: ENH: add pyproject.toml
+* `#14060 <https://github.com/numpy/numpy/pull/14060>`__: DOC: Update cversions.py links and wording
+* `#14062 <https://github.com/numpy/numpy/pull/14062>`__: DOC, BUILD: cleanups and fix (again) 'make dist'
+* `#14063 <https://github.com/numpy/numpy/pull/14063>`__: BUG: Fix file-like object check when saving arrays
+* `#14064 <https://github.com/numpy/numpy/pull/14064>`__: DOC: Resolve bad references in Sphinx warnings
+* `#14068 <https://github.com/numpy/numpy/pull/14068>`__: MAINT: bump ARMv8 / POWER8 OpenBLAS in CI
+* `#14069 <https://github.com/numpy/numpy/pull/14069>`__: DOC: Emphasize the need to run tests when building from source
+* `#14070 <https://github.com/numpy/numpy/pull/14070>`__: DOC:Add example to clarify "numpy.save" behavior on already open...
+* `#14072 <https://github.com/numpy/numpy/pull/14072>`__: DEP: Deprecate full and economic modes for linalg.qr
+* `#14073 <https://github.com/numpy/numpy/pull/14073>`__: DOC: Doc release
+* `#14074 <https://github.com/numpy/numpy/pull/14074>`__: BUG: fix build issue on icc 2016
+* `#14076 <https://github.com/numpy/numpy/pull/14076>`__: TST: Add 3.8-dev to travisCI testing.
+* `#14085 <https://github.com/numpy/numpy/pull/14085>`__: DOC: Add blank line above doctest for intersect1d
+* `#14086 <https://github.com/numpy/numpy/pull/14086>`__: ENH: Propose standard policy for dropping support of old Python...
+* `#14089 <https://github.com/numpy/numpy/pull/14089>`__: DOC: Use `pip install .` where possible instead of calling setup.py
+* `#14091 <https://github.com/numpy/numpy/pull/14091>`__: MAINT: adjustments to test_ufunc_noncontigous
+* `#14092 <https://github.com/numpy/numpy/pull/14092>`__: MAINT: Improve NEP template
+* `#14096 <https://github.com/numpy/numpy/pull/14096>`__: DOC: fix documentation of i and j for tri.
+* `#14097 <https://github.com/numpy/numpy/pull/14097>`__: MAINT: Lazy import testing on python >=3.7
+* `#14100 <https://github.com/numpy/numpy/pull/14100>`__: DEP: Deprecate PyArray_FromDimsAndDataAndDescr, PyArray_FromDims
+* `#14101 <https://github.com/numpy/numpy/pull/14101>`__: MAINT: Clearer error message while padding with stat_length=0
+* `#14106 <https://github.com/numpy/numpy/pull/14106>`__: MAINT: remove duplicate variable assignments
+* `#14108 <https://github.com/numpy/numpy/pull/14108>`__: BUG: initialize variable that is passed by pointer
+* `#14110 <https://github.com/numpy/numpy/pull/14110>`__: DOC: fix typo in c-api/array.rst doc
+* `#14121 <https://github.com/numpy/numpy/pull/14121>`__: BUG: Add gcd/lcm definitions to npy_math.h
+* `#14122 <https://github.com/numpy/numpy/pull/14122>`__: MAINT: Mark umath accuracy test xfail.
+* `#14124 <https://github.com/numpy/numpy/pull/14124>`__: MAINT: Use equality instead of identity check with literal
+* `#14130 <https://github.com/numpy/numpy/pull/14130>`__: MAINT: Fix small typo in quickstart docs
+* `#14134 <https://github.com/numpy/numpy/pull/14134>`__: DOC, MAINT: Update master after 1.17.0 release.
+* `#14141 <https://github.com/numpy/numpy/pull/14141>`__: ENH: add c-imported modules for freeze analysis in np.random
+* `#14143 <https://github.com/numpy/numpy/pull/14143>`__: BUG: Fix DeprecationWarning in python 3.8
+* `#14144 <https://github.com/numpy/numpy/pull/14144>`__: BUG: Remove stray print that causes a SystemError on python 3.7...
+* `#14145 <https://github.com/numpy/numpy/pull/14145>`__: BUG: Remove the broken clip wrapper
+* `#14152 <https://github.com/numpy/numpy/pull/14152>`__: BUG: avx2_scalef_ps must be static
+* `#14153 <https://github.com/numpy/numpy/pull/14153>`__: TST: Allow fuss in testing strided/non-strided exp/log loops
+* `#14170 <https://github.com/numpy/numpy/pull/14170>`__: NEP: Proposal for __duckarray__ protocol
+* `#14171 <https://github.com/numpy/numpy/pull/14171>`__: BUG: Make advanced indexing result on read-only subclass writeable
+* `#14178 <https://github.com/numpy/numpy/pull/14178>`__: TST: Clean up of test_pocketfft.py
+* `#14181 <https://github.com/numpy/numpy/pull/14181>`__: DEP: Deprecate np.alen
+* `#14185 <https://github.com/numpy/numpy/pull/14185>`__: MAINT: Workaround for Intel compiler bug leading to failing test
+* `#14190 <https://github.com/numpy/numpy/pull/14190>`__: DOC: Fix hermitian argument docs in `svd`
+* `#14195 <https://github.com/numpy/numpy/pull/14195>`__: MAINT: Fix a docstring typo.
+* `#14196 <https://github.com/numpy/numpy/pull/14196>`__: DOC: Fix links in `/.github/CONTRIBUTING.md`.
+* `#14197 <https://github.com/numpy/numpy/pull/14197>`__: ENH: Multivariate normal speedups
+* `#14203 <https://github.com/numpy/numpy/pull/14203>`__: MAINT: Improve mismatch message of np.testing.assert_array_equal
+* `#14204 <https://github.com/numpy/numpy/pull/14204>`__: DOC,MAINT: Move towncrier files and fixup categories
+* `#14207 <https://github.com/numpy/numpy/pull/14207>`__: BUG: Fixed default BitGenerator name
+* `#14209 <https://github.com/numpy/numpy/pull/14209>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative...
+* `#14216 <https://github.com/numpy/numpy/pull/14216>`__: ENH: Enable huge pages in all Linux builds
+* `#14217 <https://github.com/numpy/numpy/pull/14217>`__: BUG: Fix leak in the f2py-generated module init and `PyMem_Del`...
+* `#14219 <https://github.com/numpy/numpy/pull/14219>`__: DOC: new nan_to_num keywords are from 1.17 onwards
+* `#14223 <https://github.com/numpy/numpy/pull/14223>`__: TST: Add tests for deprecated C functions (PyArray_As1D, PyArray_As1D)
+* `#14224 <https://github.com/numpy/numpy/pull/14224>`__: DOC: mention `take_along_axis` in `choose`
+* `#14227 <https://github.com/numpy/numpy/pull/14227>`__: ENH: Parse complex number from string
+* `#14231 <https://github.com/numpy/numpy/pull/14231>`__: DOC: update or remove outdated sourceforge links
+* `#14234 <https://github.com/numpy/numpy/pull/14234>`__: MAINT: Better error message for norm
+* `#14235 <https://github.com/numpy/numpy/pull/14235>`__: DOC: add backlinks to numpy.org
+* `#14240 <https://github.com/numpy/numpy/pull/14240>`__: BUG: Don't fail when lexsorting some empty arrays.
+* `#14241 <https://github.com/numpy/numpy/pull/14241>`__: BUG: Fix segfault in `random.permutation(x)` when x is a string.
+* `#14245 <https://github.com/numpy/numpy/pull/14245>`__: Doc: fix a typo in NEP21
+* `#14249 <https://github.com/numpy/numpy/pull/14249>`__: DOC: set status of NEP 28 (website redesign) to Accepted
+* `#14250 <https://github.com/numpy/numpy/pull/14250>`__: BLD: MAINT: change default behavior of build flag appending.
+* `#14252 <https://github.com/numpy/numpy/pull/14252>`__: BUG: Fixes StopIteration error from 'np.genfromtext' for empty...
+* `#14255 <https://github.com/numpy/numpy/pull/14255>`__: BUG: fix inconsistent axes ordering for axis in function `unique`
+* `#14256 <https://github.com/numpy/numpy/pull/14256>`__: DEP: Deprecate load/dump functions in favour of pickle methods
+* `#14257 <https://github.com/numpy/numpy/pull/14257>`__: MAINT: Update NEP-30
+* `#14259 <https://github.com/numpy/numpy/pull/14259>`__: DEP: Deprecate arrayprint formatting functions
+* `#14266 <https://github.com/numpy/numpy/pull/14266>`__: DOC: remove scipy.org from the breadcrumb formattiong
+* `#14270 <https://github.com/numpy/numpy/pull/14270>`__: BUG: Fix formatting error in exception message
+* `#14272 <https://github.com/numpy/numpy/pull/14272>`__: DOC: Address typos in dispatch docs
+* `#14279 <https://github.com/numpy/numpy/pull/14279>`__: BUG: Fix ZeroDivisionError for zero length arrays in pocketfft.
+* `#14290 <https://github.com/numpy/numpy/pull/14290>`__: BUG: Fix misuse of .names and .fields in various places
+* `#14291 <https://github.com/numpy/numpy/pull/14291>`__: TST, BUG: Use python3.6-dbg.
+* `#14295 <https://github.com/numpy/numpy/pull/14295>`__: BUG: core: Handle large negative np.int64 args in binary_repr.
+* `#14298 <https://github.com/numpy/numpy/pull/14298>`__: BUG: Fix numpy.random bug in platform detection
+* `#14303 <https://github.com/numpy/numpy/pull/14303>`__: MAINT: random: Match type of SeedSequence.pool_size to DEFAULT_POOL_SIZE.
+* `#14310 <https://github.com/numpy/numpy/pull/14310>`__: Bug: Fix behavior of structured_to_unstructured on non-trivial...
+* `#14311 <https://github.com/numpy/numpy/pull/14311>`__: DOC: add two commas, move one word
+* `#14313 <https://github.com/numpy/numpy/pull/14313>`__: DOC: Clarify rules about broadcasting when empty arrays are involved.
+* `#14321 <https://github.com/numpy/numpy/pull/14321>`__: TST, MAINT: bump to OpenBLAS 0.3.7 stable
+* `#14325 <https://github.com/numpy/numpy/pull/14325>`__: DEP: numpy.testing.rand
+* `#14335 <https://github.com/numpy/numpy/pull/14335>`__: DEP: Deprecate class `SafeEval`
+* `#14341 <https://github.com/numpy/numpy/pull/14341>`__: BUG: revert detecting and raising error on ragged arrays
+* `#14342 <https://github.com/numpy/numpy/pull/14342>`__: DOC: Improve documentation of `isscalar`.
+* `#14349 <https://github.com/numpy/numpy/pull/14349>`__: MAINT: Fix bloated mismatch error percentage in array comparisons.
+* `#14351 <https://github.com/numpy/numpy/pull/14351>`__: DOC: Fix a minor typo in dispatch documentation.
+* `#14352 <https://github.com/numpy/numpy/pull/14352>`__: MAINT: Remove redundant deprecation checks
+* `#14353 <https://github.com/numpy/numpy/pull/14353>`__: MAINT: polynomial: Add an N-d vander implementation used under...
+* `#14355 <https://github.com/numpy/numpy/pull/14355>`__: DOC: clarify that PytestTester is non-public
+* `#14356 <https://github.com/numpy/numpy/pull/14356>`__: DOC: support and require sphinx>=2.2
+* `#14360 <https://github.com/numpy/numpy/pull/14360>`__: DOC: random: fix doc linking, was referencing private submodules.
+* `#14364 <https://github.com/numpy/numpy/pull/14364>`__: MAINT: Fixes for prospective Python 3.10 and 4.0
+* `#14365 <https://github.com/numpy/numpy/pull/14365>`__: DOC: lib: Add more explanation of the weighted average calculation.
+* `#14368 <https://github.com/numpy/numpy/pull/14368>`__: MAINT: Avoid BytesWarning in PyArray_DescrConverter()
+* `#14369 <https://github.com/numpy/numpy/pull/14369>`__: MAINT: Post NumPy 1.17.1 update.
+* `#14370 <https://github.com/numpy/numpy/pull/14370>`__: DOC: Fixed dtype docs for var, nanvar.
+* `#14372 <https://github.com/numpy/numpy/pull/14372>`__: DOC: Document project as Python 3 only with a trove classifier
+* `#14378 <https://github.com/numpy/numpy/pull/14378>`__: BUILD: move all test dependencies to ./test_requirements.txt
+* `#14381 <https://github.com/numpy/numpy/pull/14381>`__: BUG: lib: Fix histogram problem with signed integer arrays.
+* `#14385 <https://github.com/numpy/numpy/pull/14385>`__: REL: Update master after NumPy 1.16.5 release.
+* `#14387 <https://github.com/numpy/numpy/pull/14387>`__: BUG: test, fix regression in converting to ctypes
+* `#14389 <https://github.com/numpy/numpy/pull/14389>`__: NEP: Add initial draft of NEP-31: Context-local and global overrides...
+* `#14390 <https://github.com/numpy/numpy/pull/14390>`__: DOC: document numpy/doc update process
+* `#14392 <https://github.com/numpy/numpy/pull/14392>`__: DOC: update np.around docstring with note about floating-point...
+* `#14393 <https://github.com/numpy/numpy/pull/14393>`__: BUG: view with fieldless dtype should raise if itemsize != 0
+* `#14395 <https://github.com/numpy/numpy/pull/14395>`__: DOC: fix issue with __new__ usage in subclassing doc.
+* `#14398 <https://github.com/numpy/numpy/pull/14398>`__: DOC: Fix release notes table of contents
+* `#14399 <https://github.com/numpy/numpy/pull/14399>`__: NEP 32: Remove the financial functions from NumPy
+* `#14404 <https://github.com/numpy/numpy/pull/14404>`__: BLD: Update RELEASE_WALKTHROUGH and cythonize.
+* `#14407 <https://github.com/numpy/numpy/pull/14407>`__: Bump pytest from 5.1.1 to 5.1.2
+* `#14408 <https://github.com/numpy/numpy/pull/14408>`__: TST: Remove build job since we now use Dependabot
+* `#14410 <https://github.com/numpy/numpy/pull/14410>`__: BLD: Only allow using Cython module when cythonizing.
+* `#14411 <https://github.com/numpy/numpy/pull/14411>`__: TST: Add dependabot config file.
+* `#14416 <https://github.com/numpy/numpy/pull/14416>`__: BUG: Fix format statement associated with AttributeError.
+* `#14417 <https://github.com/numpy/numpy/pull/14417>`__: BUG: Fix aradixsort indirect indexing.
+* `#14426 <https://github.com/numpy/numpy/pull/14426>`__: DOC: add the reference to 'printoptions'
+* `#14429 <https://github.com/numpy/numpy/pull/14429>`__: BUG: Do not show Override module in private error classes.
+* `#14444 <https://github.com/numpy/numpy/pull/14444>`__: DOC: Make implementation bullet points consistent in NEP 29
+* `#14447 <https://github.com/numpy/numpy/pull/14447>`__: MAINT: Clarify policy language in NEP-29.
+* `#14448 <https://github.com/numpy/numpy/pull/14448>`__: REL: Update master after 1.17.2 release.
+* `#14452 <https://github.com/numpy/numpy/pull/14452>`__: MAINT: clean up pocketfft modules inside numpy.fft namespace
+* `#14453 <https://github.com/numpy/numpy/pull/14453>`__: BLD: remove generated Cython files from sdist
+* `#14454 <https://github.com/numpy/numpy/pull/14454>`__: MAINT: add test to prevent new public-looking modules being added
+* `#14458 <https://github.com/numpy/numpy/pull/14458>`__: BUG: random.hypergeometic assumes npy_long is npy_int64, hangs...
+* `#14459 <https://github.com/numpy/numpy/pull/14459>`__: ENH: Print the amount of memory that would be used by a failed...
+* `#14460 <https://github.com/numpy/numpy/pull/14460>`__: MAINT: use test_requirements.txt in tox and shippable, ship it...
+* `#14464 <https://github.com/numpy/numpy/pull/14464>`__: BUG: add a specialized loop for boolean matmul
+* `#14469 <https://github.com/numpy/numpy/pull/14469>`__: BUG: Fix _ctypes class circular reference. (#13808)
+* `#14472 <https://github.com/numpy/numpy/pull/14472>`__: BUG: core: Fix the str function of the rational dtype.
+* `#14475 <https://github.com/numpy/numpy/pull/14475>`__: DOC: add timedelta64 signature
+* `#14477 <https://github.com/numpy/numpy/pull/14477>`__: MAINT: Extract raising of MemoryError to a helper function
+* `#14483 <https://github.com/numpy/numpy/pull/14483>`__: BUG,MAINT: Some fixes and minor cleanup based on clang analysis
+* `#14484 <https://github.com/numpy/numpy/pull/14484>`__: MAINT: Add `NPY_UNUSED` and `const` qualified suggested by clang
+* `#14485 <https://github.com/numpy/numpy/pull/14485>`__: MAINT: Silence integer comparison build warnings in assert statements
+* `#14486 <https://github.com/numpy/numpy/pull/14486>`__: MAINT: distutils: Add newline at the end of printed warnings.
+* `#14490 <https://github.com/numpy/numpy/pull/14490>`__: BUG: random: Revert gh-14458 and refix gh-14557.
+* `#14493 <https://github.com/numpy/numpy/pull/14493>`__: DOC: Fix reference NPY_ARRAY_OWNDATA instead of NPY_OWNDATA.
+* `#14495 <https://github.com/numpy/numpy/pull/14495>`__: ENH: Allow NPY_PKG_CONFIG_PATH environment variable override
+* `#14498 <https://github.com/numpy/numpy/pull/14498>`__: MAINT: remove the entropy c-extension module
+* `#14499 <https://github.com/numpy/numpy/pull/14499>`__: DOC: Add backslashes so PyUFunc_FromFuncAndDataAndSignatureAndIdentity...
+* `#14500 <https://github.com/numpy/numpy/pull/14500>`__: DOC: Fix a minor typo in changelog readme
+* `#14501 <https://github.com/numpy/numpy/pull/14501>`__: BUG: Fix randint when range is 2**32
+* `#14503 <https://github.com/numpy/numpy/pull/14503>`__: DOC: tweak np.round docstring to clarify floating-point error
+* `#14508 <https://github.com/numpy/numpy/pull/14508>`__: DOC: Add warning to NPV function
+* `#14510 <https://github.com/numpy/numpy/pull/14510>`__: API: Do not return None from recfunctions.drop_fields
+* `#14511 <https://github.com/numpy/numpy/pull/14511>`__: BUG: Fix flatten_dtype so that nested 0-field structs are flattened...
+* `#14514 <https://github.com/numpy/numpy/pull/14514>`__: DOC: Build release notes during CircleCI step
+* `#14518 <https://github.com/numpy/numpy/pull/14518>`__: BUILD: Hide platform configuration probe behind --debug-configure
+* `#14520 <https://github.com/numpy/numpy/pull/14520>`__: Mention that split() returns views into the original array
+* `#14521 <https://github.com/numpy/numpy/pull/14521>`__: MAINT: Simplify lookfor function
+* `#14523 <https://github.com/numpy/numpy/pull/14523>`__: MAINT: random: Remove a few duplicated C function prototypes.
+* `#14525 <https://github.com/numpy/numpy/pull/14525>`__: BUILD, MAINT: run tests with verbose for PyPY, also do not leak...
+* `#14526 <https://github.com/numpy/numpy/pull/14526>`__: BUG: fix release snippet failures caught only after merging
+* `#14527 <https://github.com/numpy/numpy/pull/14527>`__: BLD: add warn-error option, adds -Werror to compiler
+* `#14531 <https://github.com/numpy/numpy/pull/14531>`__: BUG: random: Create a legacy implementation of random.binomial.
+* `#14534 <https://github.com/numpy/numpy/pull/14534>`__: MAINT: remove unused functions, rearrange headers (from CC=clang)
+* `#14535 <https://github.com/numpy/numpy/pull/14535>`__: DOC: Fix a bit of code in 'Beyond the Basics' C API user guide.
+* `#14536 <https://github.com/numpy/numpy/pull/14536>`__: MAINT: Cleanup old_defines in DOC
+* `#14540 <https://github.com/numpy/numpy/pull/14540>`__: DOC: Added missing versionadded to diff(prepend)
+* `#14543 <https://github.com/numpy/numpy/pull/14543>`__: BUG: Avoid ctypes in Generators
+* `#14545 <https://github.com/numpy/numpy/pull/14545>`__: Changing ImportWarning to DeprecationWarning
+* `#14548 <https://github.com/numpy/numpy/pull/14548>`__: MAINT: handle case where GIT_VERSION is empty string
+* `#14554 <https://github.com/numpy/numpy/pull/14554>`__: MAINT: core: Remove duplicated inner loop ee->e from log, exp,...
+* `#14555 <https://github.com/numpy/numpy/pull/14555>`__: DOC: clarify input types in basics.io.genfromtxt.rst
+* `#14557 <https://github.com/numpy/numpy/pull/14557>`__: DOC: remove note about Pocketfft license file (non-existing here).
+* `#14558 <https://github.com/numpy/numpy/pull/14558>`__: DOC: Fix code that generates the table in the 'Casting Rules'...
+* `#14562 <https://github.com/numpy/numpy/pull/14562>`__: MAINT: don't install partial numpy.random C/Cython API.
+* `#14564 <https://github.com/numpy/numpy/pull/14564>`__: TST: ensure coercion tables aren't printed on failing public...
+* `#14567 <https://github.com/numpy/numpy/pull/14567>`__: DEP: remove deprecated (and private) numpy.testing submodules.
+* `#14568 <https://github.com/numpy/numpy/pull/14568>`__: BLD, DOC: fix gh-14518, add release note
+* `#14570 <https://github.com/numpy/numpy/pull/14570>`__: BUG: importing build_src breaks setuptools monkeypatch for msvc14
+* `#14572 <https://github.com/numpy/numpy/pull/14572>`__: DOC: Note runtests.py `-- -s` method to use pytests `-s`
+* `#14573 <https://github.com/numpy/numpy/pull/14573>`__: DOC: update submodule docstrings, remove info.py files
+* `#14576 <https://github.com/numpy/numpy/pull/14576>`__: DOC: Document the NPY_SCALARKIND values as C variables.
+* `#14582 <https://github.com/numpy/numpy/pull/14582>`__: MAINT: Bump pytest from 5.1.2 to 5.1.3
+* `#14583 <https://github.com/numpy/numpy/pull/14583>`__: DEP: remove deprecated select behaviour
+* `#14585 <https://github.com/numpy/numpy/pull/14585>`__: BUG: Add missing check for 0-sized array in ravel_multi_index
+* `#14586 <https://github.com/numpy/numpy/pull/14586>`__: BUG: dtype refcount cleanups
+* `#14587 <https://github.com/numpy/numpy/pull/14587>`__: DOC: Fix a minor typo in changelog entry
+* `#14592 <https://github.com/numpy/numpy/pull/14592>`__: MAINT: Fix typo: remoge → remove
+* `#14595 <https://github.com/numpy/numpy/pull/14595>`__: DOC: Change the promotion table checkmark to 'Y'.
+* `#14596 <https://github.com/numpy/numpy/pull/14596>`__: DEP: Complete deprecation of invalid array/memory order
+* `#14598 <https://github.com/numpy/numpy/pull/14598>`__: DOC: Add to doc that interp cannot contain NaN
+* `#14600 <https://github.com/numpy/numpy/pull/14600>`__: NEP: Accept NEP 32.
+* `#14601 <https://github.com/numpy/numpy/pull/14601>`__: NEP: Fix discrepancies in NEPs
+* `#14603 <https://github.com/numpy/numpy/pull/14603>`__: NEP: Only list "Active" NEPs under "Meta-NEPs"
+* `#14604 <https://github.com/numpy/numpy/pull/14604>`__: API: restructure and document numpy.random C-API
+* `#14605 <https://github.com/numpy/numpy/pull/14605>`__: BUG: properly define PyArray_DescrCheck{,Exact}
+* `#14607 <https://github.com/numpy/numpy/pull/14607>`__: MAINT: Remove duplicate files from .gitignore
+* `#14608 <https://github.com/numpy/numpy/pull/14608>`__: API: rearrange the cython files in numpy.random
+* `#14614 <https://github.com/numpy/numpy/pull/14614>`__: MAINT: Bump pytest from 5.1.3 to 5.2.0
+* `#14615 <https://github.com/numpy/numpy/pull/14615>`__: MAINT: Add "MAINT" tag to dependabot commit msg
+* `#14616 <https://github.com/numpy/numpy/pull/14616>`__: DOC: Updated sphinx directive formatting
+* `#14620 <https://github.com/numpy/numpy/pull/14620>`__: DEP: Finish deprecation of non-integer `num` in linspace
+* `#14621 <https://github.com/numpy/numpy/pull/14621>`__: DOC: s/OR/AND/ in np.logical_and docstring
+* `#14623 <https://github.com/numpy/numpy/pull/14623>`__: DOC: misleading np.sinc() documentation
+* `#14629 <https://github.com/numpy/numpy/pull/14629>`__: DOC: clarify residual in np.polyfit
+* `#14630 <https://github.com/numpy/numpy/pull/14630>`__: BUILD: change to build_src --verbose-cfg, runtests.py --debug-info
+* `#14631 <https://github.com/numpy/numpy/pull/14631>`__: BUG: always free clean_sep
+* `#14634 <https://github.com/numpy/numpy/pull/14634>`__: DOC: Create `class Extension` docstring and add it to documentation.
+* `#14636 <https://github.com/numpy/numpy/pull/14636>`__: DOC: add `printoptions` as a context manager to `set_printoptions`
+* `#14639 <https://github.com/numpy/numpy/pull/14639>`__: DOC: Fix typo in NEP 29
+* `#14643 <https://github.com/numpy/numpy/pull/14643>`__: MAINT: Use scalar math power function directly
+* `#14649 <https://github.com/numpy/numpy/pull/14649>`__: DOC: Add IPython to dependencies needed to build docs.
+* `#14652 <https://github.com/numpy/numpy/pull/14652>`__: MAINT: Bump pytest-cov from 2.7.1 to 2.8.1
+* `#14653 <https://github.com/numpy/numpy/pull/14653>`__: MAINT: Bump pytest from 5.2.0 to 5.2.1
+* `#14654 <https://github.com/numpy/numpy/pull/14654>`__: MAINT: Bump pytz from 2019.2 to 2019.3
+* `#14656 <https://github.com/numpy/numpy/pull/14656>`__: MAINT: Use `extract_unit` throughout datetime
+* `#14657 <https://github.com/numpy/numpy/pull/14657>`__: BUG: fix fromfile behavior when reading sub-array dtypes
+* `#14662 <https://github.com/numpy/numpy/pull/14662>`__: BUG: random: Use correct length when axis is given to shuffle.
+* `#14669 <https://github.com/numpy/numpy/pull/14669>`__: BUG: Do not rely on undefined behaviour to cast from float to...
+* `#14674 <https://github.com/numpy/numpy/pull/14674>`__: NEP: add default-dtype-object-deprecation nep 34
+* `#14681 <https://github.com/numpy/numpy/pull/14681>`__: MAINT: Remove unused boolean negative/subtract loops
+* `#14682 <https://github.com/numpy/numpy/pull/14682>`__: DEP: ufunc `out` argument must be a tuple for multiple outputs
+* `#14693 <https://github.com/numpy/numpy/pull/14693>`__: BUG: Fix `np.einsum` errors on Power9 Linux and z/Linux
+* `#14696 <https://github.com/numpy/numpy/pull/14696>`__: DOC: Note release notes process changes on devdocs start page
+* `#14699 <https://github.com/numpy/numpy/pull/14699>`__: Doc warnings
+* `#14705 <https://github.com/numpy/numpy/pull/14705>`__: DOC: Switch Markdown link to RST in NEP 29
+* `#14709 <https://github.com/numpy/numpy/pull/14709>`__: TST: Divide Azure CI Pipelines into stages.
+* `#14710 <https://github.com/numpy/numpy/pull/14710>`__: DEP: Finish the out kwarg deprecation for ufunc calls
+* `#14711 <https://github.com/numpy/numpy/pull/14711>`__: DOC: Removing mentions of appveyor
+* `#14714 <https://github.com/numpy/numpy/pull/14714>`__: BUG: Default start to 0 for timedelta arange
+* `#14717 <https://github.com/numpy/numpy/pull/14717>`__: API: NaT (arg)min/max behavior
+* `#14718 <https://github.com/numpy/numpy/pull/14718>`__: API: Forbid Q<->m safe casting
+* `#14720 <https://github.com/numpy/numpy/pull/14720>`__: DEP: deprecate financial functions.
+* `#14721 <https://github.com/numpy/numpy/pull/14721>`__: DOC: Move newsfragment to correct folder
+* `#14723 <https://github.com/numpy/numpy/pull/14723>`__: DOC: cleaning up examples in maskedarray.generic
+* `#14725 <https://github.com/numpy/numpy/pull/14725>`__: MAINT: umath: Change error message for unsupported bool subtraction.
+* `#14730 <https://github.com/numpy/numpy/pull/14730>`__: ENH: Add complex number support for fromfile
+* `#14732 <https://github.com/numpy/numpy/pull/14732>`__: TST: run refguide-check on rst files in doc/*
+* `#14734 <https://github.com/numpy/numpy/pull/14734>`__: DOC: Edit NEP procedure for better discussion
+* `#14736 <https://github.com/numpy/numpy/pull/14736>`__: DOC: Post 1.17.3 release update.
+* `#14737 <https://github.com/numpy/numpy/pull/14737>`__: NEP: Accept NEP 29 as final
+* `#14738 <https://github.com/numpy/numpy/pull/14738>`__: BUG: Don't narrow intp to int when producing error messages
+* `#14742 <https://github.com/numpy/numpy/pull/14742>`__: DOC: lib: Fix deprecation markup in financial function docstrings.
+* `#14743 <https://github.com/numpy/numpy/pull/14743>`__: DOC: Change from HTTP to HTTPS
+* `#14745 <https://github.com/numpy/numpy/pull/14745>`__: BUG: clear only attribute errors in get_attr_string.h::maybe_get_attr
+* `#14762 <https://github.com/numpy/numpy/pull/14762>`__: MAINT: doc: Remove doc/newdtype_example/
+* `#14763 <https://github.com/numpy/numpy/pull/14763>`__: Reword cautionary note about dtype.descr
+* `#14769 <https://github.com/numpy/numpy/pull/14769>`__: BUG: fix integer size confusion in handling array's ndmin argument
+* `#14771 <https://github.com/numpy/numpy/pull/14771>`__: TST, BUILD: add a gcc 4.8 run on ubuntu 18.04
+* `#14775 <https://github.com/numpy/numpy/pull/14775>`__: Update CLASSIFIERS with python 3.8 support
+* `#14777 <https://github.com/numpy/numpy/pull/14777>`__: BUG: random: biased samples from integers() with 8 or 16 bit...
+* `#14782 <https://github.com/numpy/numpy/pull/14782>`__: DOC: Add release note about changed random variate stream from...
+* `#14786 <https://github.com/numpy/numpy/pull/14786>`__: DOC: Make changes to NEP procedure
+* `#14790 <https://github.com/numpy/numpy/pull/14790>`__: DOC: random: Remove redundant 'See Also' entry in 'uniform' docstring.
+* `#14791 <https://github.com/numpy/numpy/pull/14791>`__: MAINT: Minor typo fix
+* `#14792 <https://github.com/numpy/numpy/pull/14792>`__: MAINT: Bump pytest from 5.2.1 to 5.2.2
+* `#14793 <https://github.com/numpy/numpy/pull/14793>`__: DOC: Adjust NEP-31 to new template.
+* `#14794 <https://github.com/numpy/numpy/pull/14794>`__: DEP: issue deprecation warning when creating ragged array (NEP...
+* `#14798 <https://github.com/numpy/numpy/pull/14798>`__: NEP: move 'NEP 29 random' from Accepted to Final
+* `#14799 <https://github.com/numpy/numpy/pull/14799>`__: DOC: Add take_along_axis to the see also section in argmin, argmax...
+* `#14800 <https://github.com/numpy/numpy/pull/14800>`__: ENH: change object-array comparisons to prefer OO->O unfuncs
+* `#14805 <https://github.com/numpy/numpy/pull/14805>`__: TST: Don't construct Fraction instances from numpy scalars
+* `#14814 <https://github.com/numpy/numpy/pull/14814>`__: Rename helper functions to not use the word rank
+* `#14820 <https://github.com/numpy/numpy/pull/14820>`__: MAINT: Use templating to merge float loops
+* `#14826 <https://github.com/numpy/numpy/pull/14826>`__: BUILD: ignore more build.log warnings
+* `#14827 <https://github.com/numpy/numpy/pull/14827>`__: BLD: Prevent -flto from optimising long double representation...
+* `#14829 <https://github.com/numpy/numpy/pull/14829>`__: BUG: raise ValueError for empty arrays passed to _pyarray_correlate
+* `#14830 <https://github.com/numpy/numpy/pull/14830>`__: MAINT: move buffer.h -> npy_buffer.h to avoid conflicts
+* `#14836 <https://github.com/numpy/numpy/pull/14836>`__: MAINT: Bump cython from 0.29.13 to 0.29.14
+* `#14841 <https://github.com/numpy/numpy/pull/14841>`__: ENH: add isinf, isnan, fmin, fmax loops for datetime64, timedelta64
+* `#14842 <https://github.com/numpy/numpy/pull/14842>`__: BLD: add 'apt update' to shippable
+* `#14845 <https://github.com/numpy/numpy/pull/14845>`__: MAINT: revert gh-14800, which gave precedence to OO->O over OO->?
+* `#14874 <https://github.com/numpy/numpy/pull/14874>`__: REL: Update master after 1.17.4 release.
+* `#14878 <https://github.com/numpy/numpy/pull/14878>`__: BUILD: remove SSE2 flag from numpy.random builds
+* `#14879 <https://github.com/numpy/numpy/pull/14879>`__: DOC: Update NEP29 with Python3.8 informations.
+* `#14881 <https://github.com/numpy/numpy/pull/14881>`__: BUG: Remove builtins from __all__
+* `#14898 <https://github.com/numpy/numpy/pull/14898>`__: MAINT: Delete and ignore generated files
+* `#14899 <https://github.com/numpy/numpy/pull/14899>`__: Update FUNDING.yml
+* `#14901 <https://github.com/numpy/numpy/pull/14901>`__: MAINT: Remove uses of scalar aliases
+* `#14903 <https://github.com/numpy/numpy/pull/14903>`__: NEP: move nep 34 to accepted
+* `#14907 <https://github.com/numpy/numpy/pull/14907>`__: TST: Add s390x to the TravisCI test matrix.
+* `#14912 <https://github.com/numpy/numpy/pull/14912>`__: DOC: Note FFT type promotion
+* `#14914 <https://github.com/numpy/numpy/pull/14914>`__: TST: Test with Python3.8 on Windows.
+* `#14915 <https://github.com/numpy/numpy/pull/14915>`__: TST: Update travis.yml
+* `#14921 <https://github.com/numpy/numpy/pull/14921>`__: TST: add no_tracing decorator to refcount-sensitive codepath...
+* `#14926 <https://github.com/numpy/numpy/pull/14926>`__: MAINT: Bump pytest from 5.2.2 to 5.2.4
+* `#14929 <https://github.com/numpy/numpy/pull/14929>`__: BUG: Fix step returned by linspace when num=1 and endpoint=False
+* `#14932 <https://github.com/numpy/numpy/pull/14932>`__: DOC: Compare 'tolist' function to 'list' in example
+* `#14935 <https://github.com/numpy/numpy/pull/14935>`__: DOC: Clarify return type for default_rng
+* `#14944 <https://github.com/numpy/numpy/pull/14944>`__: MAINT: move numpy/random/examples -> numpy/random/_examples
+* `#14947 <https://github.com/numpy/numpy/pull/14947>`__: DOC: testing: Note handling of scalars in assert_array_equal...
+* `#14948 <https://github.com/numpy/numpy/pull/14948>`__: DOC, API: add random.__init__.pxd and document random.* functions
+* `#14951 <https://github.com/numpy/numpy/pull/14951>`__: DOC: Clean up examples of low-level random access
+* `#14954 <https://github.com/numpy/numpy/pull/14954>`__: TST. API: test using distributions.h via cffi
+* `#14962 <https://github.com/numpy/numpy/pull/14962>`__: TST: skip if cython is not available
+* `#14967 <https://github.com/numpy/numpy/pull/14967>`__: MAINT: Cleaned up mintypecode for Py3
+* `#14973 <https://github.com/numpy/numpy/pull/14973>`__: DOC: fix docstring of np.linalg.norm
+* `#14974 <https://github.com/numpy/numpy/pull/14974>`__: MAINT: Added Python3.8 branch to dll lib discovery on Windows
+* `#14976 <https://github.com/numpy/numpy/pull/14976>`__: DEV: update asv.conf.json
+* `#14978 <https://github.com/numpy/numpy/pull/14978>`__: MAINT: Bump pytest from 5.2.4 to 5.3.0
+* `#14982 <https://github.com/numpy/numpy/pull/14982>`__: MAINT: Fix typos
+* `#14983 <https://github.com/numpy/numpy/pull/14983>`__: REV: "ENH: Improved performance of PyArray_FromAny for sequences...
+* `#14994 <https://github.com/numpy/numpy/pull/14994>`__: BUG: warn when saving dtype with metadata
+* `#14996 <https://github.com/numpy/numpy/pull/14996>`__: DEP: Deprecate the axis argument to masked_rows and masked_cols
+* `#15004 <https://github.com/numpy/numpy/pull/15004>`__: MAINT: Fix long name of PCG64
+* `#15007 <https://github.com/numpy/numpy/pull/15007>`__: DOC, API: improve the C-API/Cython documentation and interfaces...
+* `#15009 <https://github.com/numpy/numpy/pull/15009>`__: DOC: Fix typo in numpy.loadtxt and numpy.genfromtxt documentation
+* `#15012 <https://github.com/numpy/numpy/pull/15012>`__: ENH: allow using symbol-suffixed 64-bit BLAS/LAPACK for numpy.dot...
+* `#15014 <https://github.com/numpy/numpy/pull/15014>`__: DOC: add a more useful comment to compat.py3k.py
+* `#15019 <https://github.com/numpy/numpy/pull/15019>`__: DOC: lib: Use a clearer example of ddof in the notes of the cov...
+* `#15021 <https://github.com/numpy/numpy/pull/15021>`__: TST: machinery for tests requiring large memory + lapack64 smoketest
+* `#15023 <https://github.com/numpy/numpy/pull/15023>`__: MAINT: Only copy input array in _replace_nan() if there are nans...
+* `#15025 <https://github.com/numpy/numpy/pull/15025>`__: MAINT: Bump pytest from 5.3.0 to 5.3.1
+* `#15027 <https://github.com/numpy/numpy/pull/15027>`__: REV: "ENH: Improved performance of PyArray_FromAny for sequences...
+* `#15031 <https://github.com/numpy/numpy/pull/15031>`__: REL: Prepare for 1.18 branch
+* `#15032 <https://github.com/numpy/numpy/pull/15032>`__: MAINT: Cleaned up mintypecode for Py3 (pt. 2)
+* `#15036 <https://github.com/numpy/numpy/pull/15036>`__: BUG: Fix refcounting in ufunc object loops
+* `#15039 <https://github.com/numpy/numpy/pull/15039>`__: BUG: Exceptions tracebacks are dropped
+* `#15053 <https://github.com/numpy/numpy/pull/15053>`__: REV: Revert "Merge pull request #14794 from mattip/nep-0034-impl"
+* `#15058 <https://github.com/numpy/numpy/pull/15058>`__: API, DOC: change names to multivariate_hypergeometric, improve docs
+* `#15059 <https://github.com/numpy/numpy/pull/15059>`__: REL: Prepare for NumPy 1.18.0 release.
+* `#15109 <https://github.com/numpy/numpy/pull/15109>`__: TST: Check requires_memory immediately before the test
+* `#15111 <https://github.com/numpy/numpy/pull/15111>`__: ENH: Add support to sort timedelta64 `NaT` to end of the array
+* `#15112 <https://github.com/numpy/numpy/pull/15112>`__: MAINT: follow-up cleanup for blas64 PR
+* `#15113 <https://github.com/numpy/numpy/pull/15113>`__: ENH: f2py: add --f2cmap option for specifying the name of .f2py_f2cmap
+* `#15114 <https://github.com/numpy/numpy/pull/15114>`__: ENH: add support for ILP64 OpenBLAS (without symbol suffix)
+* `#15146 <https://github.com/numpy/numpy/pull/15146>`__: REL: Prepare for 1.18.0 release.
use the function.
>>> a = [1, 2, 3]
- >>> print [x + 3 for x in a]
+ >>> print([x + 3 for x in a])
[4, 5, 6]
- >>> print "a\n\nb"
+ >>> print("a\n\nb")
a
b
.. toctree::
:maxdepth: 1
-{% for nep, tags in neps.items() if tags['Type'] == 'Process' %}
+{% for nep, tags in neps.items() if tags['Status'] == 'Active' %}
{{ tags['Title'] }} <{{ tags['Filename'] }}>
{% endfor %}
where ``<n>`` is an appropriately assigned four-digit number (e.g.,
``nep-0000.rst``). The draft must use the :doc:`nep-template` file.
-Once the PR is in place, the NEP should be announced on the mailing
-list for discussion (comments on the PR itself should be restricted to
-minor editorial and technical fixes).
+Once the PR for the NEP is in place, a post should be made to the
+mailing list containing the sections upto "Backward compatibility",
+with the purpose of limiting discussion there to usage and impact.
+Discussion on the pull request will have a broader scope, also including
+details of implementation.
At the earliest convenience, the PR should be merged (regardless of
whether it is accepted during discussion). Additional PRs may be made
When a NEP is ``Accepted``, ``Rejected``, or ``Withdrawn``, the NEP should be
updated accordingly. In addition to updating the status field, at the very
least the ``Resolution`` header should be added with a link to the relevant
-post in the mailing list archives.
+thread in the mailing list archives.
NEPs can also be ``Superseded`` by a different NEP, rendering the
original obsolete. The ``Replaced-By`` and ``Replaces`` headers
=======================================
:Author: Robert Kern <robert.kern@gmail.com>
-:Status: Accepted
+:Status: Final
:Type: Standards Track
:Created: 2018-05-24
:Updated: 2019-05-21
-:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-June/078126.html
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-July/078380.html
Abstract
--------
do random picking of any element from the array. Note that in the last example
a method such as mentioned in the ``Related Questions`` section could be more
straight forward. But this approach is even more flexible, since ``rows``
-does not have to be a simple ``arange``, but could be ``intersting_times``::
+does not have to be a simple ``arange``, but could be ``interesting_times``::
>>> interesting_times = np.array([0, 4, 8, 9, 10])
>>> correct_sensors_at_it = correct_sensors[interesting_times, :]
Rationale
^^^^^^^^^
-The purpose of this aNEP is to define two interfaces -- one for handling
+The purpose of this NEP is to define two interfaces -- one for handling
'missing values', and one for handling 'masked arrays'.
An ordinary value is something like an integer or a floating point number. A
--- /dev/null
+===================================
+NEP 28 — numpy.org website redesign
+===================================
+
+:Author: Ralf Gommers <ralf.gommers@gmail.com>
+:Author: Joe LaChance <joe@boldmetrics.com>
+:Author: Shekhar Rajak <shekharrajak.1994@gmail.com>
+:Status: Accepted
+:Type: Informational
+:Created: 2019-07-16
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2019-August/079889.html
+
+
+Abstract
+--------
+
+NumPy is the fundamental library for numerical and scientific computing with
+Python. It is used by millions and has a large team of maintainers and
+contributors. Despite that, its `numpy.org <http://numpy.org>`_ website has
+never received the attention it needed and deserved. We hope and intend to
+change that soon. This document describes ideas and requirements for how to
+design a replacement for the current website, to better serve the needs of
+our diverse community.
+
+At a high level, what we're aiming for is:
+
+- a modern, clean look
+- an easy to deploy static site
+- a structure that's easy to navigate
+- content that addresses all types of stakeholders
+- Possible multilingual translations / i18n
+
+This website serves a couple of roles:
+
+- it's the entry point to the project for new users
+- it should link to the documentation (which is hosted separately, now on
+ http://docs.scipy.org/ and in the near future on http://numpy.org/doc).
+- it should address various aspects of the project (e.g. what NumPy is and
+ why you'd want to use it, community, project organization, funding,
+ relationship with NumFOCUS and possibly other organizations)
+- it should link out to other places, so every type of stakeholder
+ (beginning and advanced user, educators, packagers, funders, etc.)
+ can find their way
+
+
+Motivation and Scope
+--------------------
+
+The current numpy.org website has almost no content and its design is poor.
+This affects many users, who come there looking for information. It also
+affects many other aspects of the NumPy project, from finding new contributors
+to fundraising.
+
+The scope of the proposed redesign is the top-level numpy.org site, which
+now contains only a couple of pages and may contain on the order of ten
+pages after the redesign. Changing the documentation (user guide, reference
+guide, and some other pages in the NumPy Manual) is out of scope for
+this proposal.
+
+
+Detailed description
+--------------------
+
+User Experience
+~~~~~~~~~~~~~~~
+
+Besides the NumPy logo, there is little that can or needs to be kept from the
+current website. We will rely to a large extent on ideas and proposals by the
+designer(s) of the new website.
+
+As reference points we can use the `Jupyter website <https://jupyter.org/>`_,
+which is probably the best designed site in our ecosystem, and the
+`QuantEcon <https://quantecon.org>`_ and `Julia <https://julialang.org>`_
+sites which are well-designed too.
+
+The Website
+~~~~~~~~~~~
+
+A static site is a must. There are many high-quality static site generators.
+The current website uses Sphinx, however that is not the best choice - it's
+hard to theme and results in sites that are too text-heavy due to Sphinx'
+primary aim being documentation.
+
+The following should be considered when choosing a static site generator:
+
+1. *How widely used is it?* This is important when looking for help maintaining
+ or improving the site. More popular frameworks are usually also better
+ maintained, so less chance of bugs or obsolescence.
+2. *Ease of deployment.* Most generators meet this criterion, however things
+ like built-in support for GitHub Pages helps.
+3. *Preferences of who implements the new site.* Everyone has their own
+ preferences. And it's a significant amount of work to build a new site.
+ So we should take the opinion of those doing the work into account.
+
+Traffic
+```````
+
+The current site receives on the order of 500,000 unique visitors per month.
+With a redesigned site and relevant content, there is potential for visitor
+counts to reach 5-6 million -- a similar level as
+`scipy.org <http://scipy.org>`_ or `matplotlib.org <http://matplotlib.org>`_ --
+or more.
+
+Possible options for static site generators
+```````````````````````````````````````````
+
+1. *Jekyll.* This is a well maintained option with 855 Github contributors,
+ with contributions within the last month. Jekyll is written in Ruby, and
+ has a simple CLI interface. Jekyll also has a large directory of
+ `themes <https://jekyllthemes.io>`__, although a majority cost money.
+ There are several themes (`serif <https://jekyllthemes.io/theme/serif>`_,
+ `uBuild <https://jekyllthemes.io/theme/ubuild-jekyll-theme>`_,
+ `Just The Docs <https://jekyllthemes.io/theme/just-the-docs>`_) that are
+ appropriate and free. Most themes are likely responsive for mobile, and
+ that should be a requirement. Jekyll uses a combination of liquid templating
+ and YAML to render HTML, and content is written in Markdown. i18n
+ functionality is not native to Jekyll, but can be added easily.
+ One nice benefit of Jekyll is that it can be run automatically by GitHub
+ Pages, so deployment via a CI system doesn't need to be implemented.
+2. *Hugo.* This is another well maintained option with 554 contributors, with
+ contributions within the last month. Hugo is written in Go, and similar to
+ Jekyll, has a simple to use CLI interface to generate static sites. Again,
+ similar to Jekyll, Hugo has a large directory of
+ `themes <https://themes.gohugo.io>`_. These themes appear to be free,
+ unlike some of Jekyll's themes.
+ (`Sample landing page theme <https://themes.gohugo.io/hugo-hero-theme>`_,
+ `docs theme <https://themes.gohugo.io/hugo-whisper-theme>`_). Hugo uses Jade
+ as its templating language, and content is also written in Markdown. i18n
+ functionality is native to Hugo.
+3. *Docusaurus.* Docusaurus is a responsive static site generator made by Facebook.
+ Unlike the previous options, Docusaurus doesn't come with themes, and thus we
+ would not want to use this for our landing page. This is an excellent docs
+ option written in React. Docusaurus natively has support for i18n (via
+ Crowdin_, document versioning, and document search.
+
+Both Jekyll and Hugo are excellent options that should be supported into the
+future and are good choices for NumPy. Docusaurus has several bonus features
+such as versioning and search that Jekyll and Hugo don't have, but is likely
+a poor candidate for a landing page - it could be a good option for a
+high-level docs site later on though.
+
+Deployment
+~~~~~~~~~~
+
+There is no need for running a server, and doing so is in our experience a
+significant drain on the time of maintainers.
+
+1. *Netlify.* Using netlify is free until 100GB of bandwidth is used. Additional
+ bandwidth costs $20/100GB. They support a global CDN system, which will keep
+ load times quick for users in other regions. Netlify also has Github integration,
+ which will allow for easy deployment. When a pull request is merged, Netlify
+ will automatically deploy the changes. DNS is simple, and HTTPS is also supported.
+2. *Github Pages.* Github Pages also has a 100GB bandwidth limit, and is unclear if
+ additional bandwidth can be purchased. It is also unclear where sites are deployed,
+ and should be assumed sites aren't deployed globally. Github Pages has an easy to
+ use CI & DNS, similar to to Netlify. HTTPS is supported.
+3. *Cloudflare.* An excellent option, additional CI is likely needed for the same
+ ease of deployment.
+
+All of the above options are appropriate for the NumPy site based on current
+traffic. Updating to a new deployment strategy, if needed, is a minor amount of
+work compared to developing the website itself. If a provider such as
+Cloudflare is chosen, additional CI may be required, such as CircleCI, to
+have a similar deployment to GitHub Pages or Netlify.
+
+Analytics
+~~~~~~~~~
+
+It's benefical to maintainers to know how many visitors are coming to
+numpy.org. Google Analytics offers visitor counts and locations. This will
+help to support and deploy more strategically, and help maintainers
+understand where traffic is coming from.
+
+Google Analytics is free. A script, provided by Google, must be added to the home page.
+
+Website Structure
+~~~~~~~~~~~~~~~~~
+
+We aim to keep the first version of the new website small in terms of amount
+of content. New pages can be added later on, it's more important right now to
+get the site design right and get some essential information up. Note that in
+the second half of 2019 we expect to get 1 or 2 tech writers involved in the
+project via Google Season of Docs. They will likely help improve the content
+and organization of that content.
+
+We propose the following structure:
+
+0. Front page: essentials of what NumPy is (compare e.g. jupyter.org), one or
+ a couple key user stories (compare e.g. julialang.org)
+1. Install
+2. Documentation
+3. Array computing
+4. Community
+5. Learning
+6. About Us
+7. Contribute
+8. Donate
+
+There may be a few other pages, e.g. a page on performance, that are linked
+from one of the main pages.
+
+Stakeholder Content
+~~~~~~~~~~~~~~~~~~~
+
+This should have as little content as possible *within the site*. Somewhere
+on the site we should link out to content that's specific to:
+
+- beginning users (quickstart, tutorial)
+- advanced users
+- educators
+- packagers
+- package authors that depend on NumPy
+- funders (governance, roadmap)
+
+Translation (multilingual / i18n)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+NumPy has users all over the world. Most of those users are not native
+English speakers, and many don't speak English well or at all. Therefore
+having content in multiple languages is potentially addressing a large unmet
+need. It would likely also help make the NumPy project more diverse and
+welcoming.
+
+On the other hand, there are good reasons why few projects have a
+multi-lingual site. It's potentially a lot of extra work. Extra work for
+maintainers is costly - they're already struggling to keep up with the work
+load. Therefore we have to very carefully consider whether a multi-lingual
+site is feasible and weight costs and benefits.
+
+We start with an assertion: maintaining translations of all documentation, or
+even the whole user guide, as part of the NumPy project is not feasible. One
+simply has to look at the volume of our documentation and the frequency with
+which we change it to realize that that's the case. Perhaps it will be
+feasible though to translate just the top-level pages of the website. Those
+do not change very often, and it will be a limited amount of content (order
+of magnitude 5-10 pages of text).
+
+We propose the following requirements for adding a language:
+
+- The language must have a dedicated maintainer
+- There must be a way to validate content changes (e.g. a second
+ maintainer/reviewer, or high quality language support in a freely
+ available machine translation tool)
+- The language must have a reasonable size target audience (to be
+ assessed by the NumPy maintainers)
+
+Furthermore we propose a policy for when to remove support for a language again
+(preferably by hiding it rather than deleting content). This may be done when
+the language no longer has a maintainer, and coverage of translations falls
+below an acceptable threshold (say 80%).
+
+Benefits of having translations include:
+
+- Better serve many existing and potential users
+- Potentially attract a culturally and geographically more diverse set of contributors
+
+The tradeoffs are:
+
+- Cost of maintaining a more complex code base
+- Cost of making decisions about whether or not to add a new language
+- Higher cost to making content changes, creates work for language maintainers
+- Any content change should be rolled out with enough delay to have translations in place
+
+Can we define a small enough set of pages and content that it makes sense to do this?
+Probably yes.
+
+Is there an easy to use tool to maintain translations and add them to the website?
+To be discussed - it needs investigating, and may depend on the choice of static site
+generator. One potential option is Crowdin_, which is free for open source projects.
+
+
+Style and graphic design
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Beyond the "a modern, clean look" goal we choose to not specify too much. A
+designer may have much better ideas than the authors of this proposal, hence we
+will work with the designer(s) during the implementation phase.
+
+The NumPy logo could use a touch-up. The logo widely recognized and its colors and
+design are good, however the look-and-feel is perhaps a little dated.
+
+
+Other aspects
+~~~~~~~~~~~~~
+
+A search box would be nice to have. The Sphinx documentation already has a
+search box, however a search box on the main site which provides search results
+for the docs, the website, and perhaps other domains that are relevant for
+NumPy would make sense.
+
+
+Backward compatibility
+----------------------
+
+Given a static site generator is chosen, we will migrate away from Sphinx for
+numpy.org (the website, *not including the docs*). The current deployment can
+be preserved until a future deprecation date is decided (potentially based on
+the comfort level of our new site).
+
+All site generators listed above have visibility into the HTML and Javascript
+that is generated, and can continue to be maintained in the event a given
+project ceases to be maintained.
+
+
+Alternatives
+------------
+
+Alternatives we considered for the overall design of the website:
+
+1. *Update current site.* A new Sphinx theme could be chosen. This would likely
+ take the least amount of resources initially, however, Sphinx does not have
+ the features we are looking for moving forward such as i18n, responsive design,
+ and a clean, modern look.
+ Note that updating the docs Sphinx theme is likely still a good idea - it's
+ orthogonal to this NEP though.
+2. *Create custom site.* This would take the most amount of resources, and is
+ likely to have additional benefit in comparison to a static site generator.
+ All features would be able to be added at the cost of developer time.
+
+
+Discussion
+----------
+
+Mailing list thread discussing this NEP: TODO
+
+
+References and Footnotes
+------------------------
+.. _Crowdin: https://crowdin.com/pricing#annual
+
+Copyright
+---------
+
+This document has been placed in the public domain.
--- /dev/null
+==================================================================================
+NEP 29 — Recommend Python and Numpy version support as a community policy standard
+==================================================================================
+
+
+:Author: Thomas A Caswell <tcaswell@gmail.com>, Andreas Mueller, Brian Granger, Madicken Munk, Ralf Gommers, Matt Haberland <mhaberla@calpoly.edu>, Matthias Bussonnier <bussonniermatthias@gmail.com>, Stefan van der Walt <stefanv@berkeley.edu>
+:Status: Final
+:Type: Informational
+:Created: 2019-07-13
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2019-October/080128.html
+
+
+Abstract
+--------
+
+This NEP recommends that all projects across the Scientific
+Python ecosystem adopt a common "time window-based" policy for
+support of Python and NumPy versions. Standardizing a recommendation
+for project support of minimum Python and NumPy versions will improve
+downstream project planning.
+
+This is an unusual NEP in that it offers recommendations for
+community-wide policy and not for changes to NumPy itself. Since a
+common place for SPEEPs (Scientific Python Ecosystem Enhancement
+Proposals) does not exist and given NumPy's central role in the
+ecosystem, a NEP provides a visible place to document the proposed
+policy.
+
+This NEP is being put forward by maintainers of Matplotlib, scikit-learn,
+IPython, Jupyter, yt, SciPy, NumPy, and scikit-image.
+
+
+
+Detailed description
+--------------------
+
+For the purposes of this NEP we assume semantic versioning and define:
+
+*major version*
+ A release that changes the first number (e.g. X.0.0)
+
+*minor version*
+ A release that changes the second number (e.g 1.Y.0)
+
+*patch version*
+ A release that changes the third number (e.g. 1.1.Z)
+
+
+When a project releases a new major or minor version, we recommend that
+they support at least all minor versions of Python
+introduced and released in the prior 42 months *from the
+anticipated release date* with a minimum of 2 minor versions of
+Python, and all minor versions of NumPy released in the prior 24
+months *from the anticipated release date* with a minimum of 3
+minor versions of NumPy.
+
+
+Consider the following timeline::
+
+ Jan 16 Jan 17 Jan 18 Jan 19 Jan 20
+ | | | | |
+ +++++|+++++++++++|+++++++++++|+++++++++++|+++++++++++|++++++++++++
+ | | | |
+ py 3.5.0 py 3.6.0 py 3.7.0 py 3.8.0
+ |-----------------------------------------> Feb19
+ |-----------------------------------------> Dec19
+ |-----------------------------------------> Nov20
+
+It shows the 42 month support windows for Python. A project with a
+major or minor version release in February 2019 should support Python 3.5 and newer,
+a project with a major or minor version released in December 2019 should
+support Python 3.6 and newer, and a project with a major or minor version
+release in November 2020 should support Python 3.7 and newer.
+
+The current Python release cadence is 18 months so a 42 month window
+ensures that there will always be at least two minor versions of Python
+in the window. The window is extended 6 months beyond the anticipated two-release
+interval for Python to provides resilience against small fluctuations /
+delays in its release schedule.
+
+Because Python minor version support is based only on historical
+release dates, a 42 month time window, and a planned project release
+date, one can predict with high confidence when a project will be able
+to drop any given minor version of Python. This, in turn, could save
+months of unnecessary maintenance burden.
+
+If a project releases immediately after a minor version of Python
+drops out of the support window, there will inevitably be some
+mismatch in supported versions—but this situation should only last
+until other projects in the ecosystem make releases.
+
+Otherwise, once a project does a minor or major release, it is
+guaranteed that there will be a stable release of all other projects
+that, at the source level, support the same set of Python versions
+supported by the new release.
+
+If there is a Python 4 or a NumPy 2 this policy will have to be
+reviewed in light of the community's and projects' best interests.
+
+
+Support Table
+~~~~~~~~~~~~~
+
+============ ====== =====
+Date Python NumPy
+------------ ------ -----
+Jan 07, 2020 3.6+ 1.15+
+Jun 23, 2020 3.7+ 1.15+
+Jul 23, 2020 3.7+ 1.16+
+Jan 13, 2021 3.7+ 1.17+
+Jul 26, 2021 3.7+ 1.18+
+Dec 26, 2021 3.8+ 1.18+
+Apr 14, 2023 3.9+ 1.18+
+============ ====== =====
+
+
+Drop Schedule
+~~~~~~~~~~~~~
+
+::
+
+ On next release, drop support for Python 3.5 (initially released on Sep 13, 2015)
+ On Jan 07, 2020 drop support for Numpy 1.14 (initially released on Jan 06, 2018)
+ On Jun 23, 2020 drop support for Python 3.6 (initially released on Dec 23, 2016)
+ On Jul 23, 2020 drop support for Numpy 1.15 (initially released on Jul 23, 2018)
+ On Jan 13, 2021 drop support for Numpy 1.16 (initially released on Jan 13, 2019)
+ On Jul 26, 2021 drop support for Numpy 1.17 (initially released on Jul 26, 2019)
+ On Dec 26, 2021 drop support for Python 3.7 (initially released on Jun 27, 2018)
+ On Apr 14, 2023 drop support for Python 3.8 (initially released on Oct 14, 2019)
+
+
+Implementation
+--------------
+
+We suggest that all projects adopt the following language into their
+development guidelines:
+
+ This project supports:
+
+ - All minor versions of Python released 42 months prior to the
+ project, and at minimum the two latest minor versions.
+ - All minor versions of ``numpy`` released in the 24 months prior
+ to the project, and at minimum the last three minor versions.
+
+ In ``setup.py``, the ``python_requires`` variable should be set to
+ the minimum supported version of Python. All supported minor
+ versions of Python should be in the test matrix and have binary
+ artifacts built for the release.
+
+ Minimum Python and NumPy version support should be adjusted upward
+ on every major and minor release, but never on a patch release.
+
+
+Backward compatibility
+----------------------
+
+No backward compatibility issues.
+
+Alternatives
+------------
+
+Ad-Hoc version support
+~~~~~~~~~~~~~~~~~~~~~~
+
+A project could, on every release, evaluate whether to increase
+the minimum version of Python supported.
+As a major downside, an ad-hoc approach makes it hard for downstream users to predict what
+the future minimum versions will be. As there is no objective threshold
+to when the minimum version should be dropped, it is easy for these
+version support discussions to devolve into `bike shedding <https://en.wikipedia.org/wiki/Wikipedia:Avoid_Parkinson%27s_bicycle-shed_effect>`_ and acrimony.
+
+
+All CPython supported versions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The CPython supported versions of Python are listed in the Python
+Developers Guide and the Python PEPs. Supporting these is a very clear
+and conservative approach. However, it means that there exists a four
+year lag between when a new features is introduced into the language
+and when a project is able to use it. Additionally, for projects with
+compiled extensions this requires building many binary artifacts for
+each release.
+
+For the case of NumPy, many projects carry workarounds to bugs that
+are fixed in subsequent versions of NumPy. Being proactive about
+increasing the minimum version of NumPy allows downstream
+packages to carry fewer version-specific patches.
+
+
+
+Default version on Linux distribution
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The policy could be to support the version of Python that ships by
+default in the latest Ubuntu LTS or CentOS/RHEL release. However, we
+would still have to standardize across the community which
+distribution to follow.
+
+By following the versions supported by major Linux distributions, we
+are giving up technical control of our projects to external
+organizations that may have different motivations and concerns than we
+do.
+
+
+N minor versions of Python
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Given the current release cadence of the Python, the proposed time (42
+months) is roughly equivalent to "the last two" Python minor versions.
+However, if Python changes their release cadence substantially, any
+rule based solely on the number of minor releases may need to be
+changed to remain sensible.
+
+A more fundamental problem with a policy based on number of Python
+releases is that it is hard to predict when support for a given minor
+version of Python will be dropped as that requires correctly
+predicting the release schedule of Python for the next 3-4 years. A
+time-based rule, in contrast, only depends on past events
+and the length of the support window.
+
+
+Time window from the X.Y.1 Python release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is equivalent to a few month longer support window from the X.Y.0
+release. This is because X.Y.1 bug-fix release is typically a few
+months after the X.Y.0 release, thus a N month window from X.Y.1 is
+roughly equivalent to a N+3 month from X.Y.0.
+
+The X.Y.0 release is naturally a special release. If we were to
+anchor the window on X.Y.1 we would then have the discussion of why
+not X.Y.M?
+
+
+Discussion
+----------
+
+
+References and Footnotes
+------------------------
+
+Code to generate support and drop schedule tables ::
+
+ from datetime import datetime, timedelta
+
+ data = """Jan 15, 2017: Numpy 1.12
+ Sep 13, 2015: Python 3.5
+ Dec 23, 2016: Python 3.6
+ Jun 27, 2018: Python 3.7
+ Jun 07, 2017: Numpy 1.13
+ Jan 06, 2018: Numpy 1.14
+ Jul 23, 2018: Numpy 1.15
+ Jan 13, 2019: Numpy 1.16
+ Jul 26, 2019: Numpy 1.17
+ Oct 14, 2019: Python 3.8
+ """
+
+ releases = []
+
+ plus42 = timedelta(days=int(365*3.5 + 1))
+ plus24 = timedelta(days=int(365*2 + 1))
+
+ for line in data.splitlines():
+ date, project_version = line.split(':')
+ project, version = project_version.strip().split(' ')
+ release = datetime.strptime(date, '%b %d, %Y')
+ if project.lower() == 'numpy':
+ drop = release + plus24
+ else:
+ drop = release + plus42
+ releases.append((drop, project, version, release))
+
+ releases = sorted(releases, key=lambda x: x[0])
+
+ minpy = '3.9+'
+ minnum = '1.18+'
+
+ toprint_drop_dates = ['']
+ toprint_support_table = []
+ for d, p, v, r in releases[::-1]:
+ df = d.strftime('%b %d, %Y')
+ toprint_drop_dates.append(
+ f'On {df} drop support for {p} {v} '
+ f'(initially released on {r.strftime("%b %d, %Y")})')
+ toprint_support_table.append(f'{df} {minpy:<6} {minnum:<5}')
+ if p.lower() == 'numpy':
+ minnum = v+'+'
+ else:
+ minpy = v+'+'
+
+ for e in toprint_drop_dates[::-1]:
+ print(e)
+
+ print('============ ====== =====')
+ print('Date Python NumPy')
+ print('------------ ------ -----')
+ for e in toprint_support_table[::-1]:
+ print(e)
+ print('============ ====== =====')
+
+
+Copyright
+---------
+
+This document has been placed in the public domain.
--- /dev/null
+======================================================
+NEP 30 — Duck Typing for NumPy Arrays - Implementation
+======================================================
+
+:Author: Peter Andreas Entschev <pentschev@nvidia.com>
+:Author: Stephan Hoyer <shoyer@google.com>
+:Status: Draft
+:Type: Standards Track
+:Created: 2019-07-31
+:Updated: 2019-07-31
+:Resolution:
+
+Abstract
+--------
+
+We propose the ``__duckarray__`` protocol, following the high-level overview
+described in NEP 22, allowing downstream libraries to return arrays of their
+defined types, in contrast to ``np.asarray``, that coerces those ``array_like``
+objects to NumPy arrays.
+
+Detailed description
+--------------------
+
+NumPy's API, including array definitions, is implemented and mimicked in
+countless other projects. By definition, many of those arrays are fairly
+similar in how they operate to the NumPy standard. The introduction of
+``__array_function__`` allowed dispathing of functions implemented by several
+of these projects directly via NumPy's API. This introduces a new requirement,
+returning the NumPy-like array itself, rather than forcing a coercion into a
+pure NumPy array.
+
+For the purpose above, NEP 22 introduced the concept of duck typing to NumPy
+arrays. The suggested solution described in the NEP allows libraries to avoid
+coercion of a NumPy-like array to a pure NumPy array where necessary, while
+still allowing that NumPy-like array libraries that do not wish to implement
+the protocol to coerce arrays to a pure Numpy array via ``np.asarray``.
+
+Usage Guidance
+~~~~~~~~~~~~~~
+
+Code that uses np.duckarray is meant for supporting other ndarray-like objects
+that "follow the NumPy API". That is an ill-defined concept at the moment --
+every known library implements the NumPy API only partly, and many deviate
+intentionally in at least some minor ways. This cannot be easily remedied, so
+for users of ``__duckarray__`` we recommend the following strategy: check if the
+NumPy functionality used by the code that follows your use of ``__duckarray__``
+is present in Dask, CuPy and Sparse. If so, it's reasonable to expect any duck
+array to work here. If not, we suggest you indicate in your docstring what kinds
+of duck arrays are accepted, or what properties they need to have.
+
+To exemplify the usage of duck arrays, suppose one wants to take the ``mean()``
+of an array-like object ``arr``. Using NumPy to achieve that, one could write
+``np.asarray(arr).mean()`` to achieve the intended result. However, libraries
+may expect ``arr`` to be a NumPy-like array, and at the same time, the array may
+or may not be an object compliant to the NumPy API (either in full or partially)
+such as a CuPy, Sparse or a Dask array. In the case where ``arr`` is already an
+object compliant to the NumPy API, we would simply return it (and prevent it
+from being coerced into a pure NumPy array), otherwise, it would then be coerced
+into a NumPy array.
+
+Implementation
+--------------
+
+The implementation idea is fairly straightforward, requiring a new function
+``duckarray`` to be introduced in NumPy, and a new method ``__duckarray__`` in
+NumPy-like array classes. The new ``__duckarray__`` method shall return the
+downstream array-like object itself, such as the ``self`` object. If appropriate,
+an ``__array__`` method may be implemented that returns a NumPy array or possibly
+raise a ``TypeError`` with a helpful message.
+
+The new NumPy ``duckarray`` function can be implemented as follows:
+
+.. code:: python
+
+ def duckarray(array_like):
+ if hasattr(array_like, '__duckarray__'):
+ return array_like.__duckarray__()
+ return np.asarray(array_like)
+
+Example for a project implementing NumPy-like arrays
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now consider a library that implements a NumPy-compatible array class called
+``NumPyLikeArray``, this class shall implement the methods described above, and
+a complete implementation would look like the following:
+
+.. code:: python
+
+ class NumPyLikeArray:
+ def __duckarray__(self):
+ return self
+
+ def __array__(self):
+ return TypeError("NumPyLikeArray can not be converted to a numpy array. "
+ "You may want to use np.duckarray.")
+
+The implementation above exemplifies the simplest case, but the overall idea
+is that libraries will implement a ``__duckarray__`` method that returns the
+original object, and an ``__array__`` method that either creates and returns an
+appropriate NumPy array, or raises a``TypeError`` to prevent unintentional use
+as an object in a NumPy array (if ``np.asarray`` is called on an arbitrary
+object that does not implement ``__array__``, it will create a NumPy array
+scalar).
+
+In case of existing libraries that don't already implement ``__array__`` but
+would like to use duck array typing, it is advised that they introduce
+both ``__array__`` and``__duckarray__`` methods.
+
+Usage
+-----
+
+An example of how the ``__duckarray__`` protocol could be used to write a
+``stack`` function based on ``concatenate``, and its produced outcome, can be
+seen below. The example here was chosen not only to demonstrate the usage of
+the ``duckarray`` function, but also to demonstrate its dependency on the NumPy
+API, demonstrated by checks on the array's ``shape`` attribute. Note that the
+example is merely a simplified version of NumPy's actually implementation of
+``stack`` working on the first axis, and it is assumed that Dask has implemented
+the ``__duckarray__`` method.
+
+.. code:: python
+
+ def duckarray_stack(arrays):
+ arrays = [np.duckarray(arr) for arr in arrays]
+
+ shapes = {arr.shape for arr in arrays}
+ if len(shapes) != 1:
+ raise ValueError('all input arrays must have the same shape')
+
+ expanded_arrays = [arr[np.newaxis, ...] for arr in arrays]
+ return np.concatenate(expanded_arrays, axis=0)
+
+ dask_arr = dask.array.arange(10)
+ np_arr = np.arange(10)
+ np_like = list(range(10))
+
+ duckarray_stack((dask_arr, dask_arr)) # Returns dask.array
+ duckarray_stack((dask_arr, np_arr)) # Returns dask.array
+ duckarray_stack((dask_arr, np_like)) # Returns dask.array
+
+In contrast, using only ``np.asarray`` (at the time of writing of this NEP, this
+is the usual method employed by library developers to ensure arrays are
+NumPy-like) has a different outcome:
+
+.. code:: python
+
+ def asarray_stack(arrays):
+ arrays = [np.asanyarray(arr) for arr in arrays]
+
+ # The remaining implementation is the same as that of
+ # ``duckarray_stack`` above
+
+ asarray_stack((dask_arr, dask_arr)) # Returns np.ndarray
+ asarray_stack((dask_arr, np_arr)) # Returns np.ndarray
+ asarray_stack((dask_arr, np_like)) # Returns np.ndarray
+
+Backward compatibility
+----------------------
+
+This proposal does not raise any backward compatibility issues within NumPy,
+given that it only introduces a new function. However, downstream libraries
+that opt to introduce the ``__duckarray__`` protocol may choose to remove the
+ability of coercing arrays back to a NumPy array via ``np.array`` or
+``np.asarray`` functions, preventing unintended effects of coercion of such
+arrays back to a pure NumPy array (as some libraries already do, such as CuPy
+and Sparse), but still leaving libraries not implementing the protocol with the
+choice of utilizing ``np.duckarray`` to promote ``array_like`` objects to pure
+NumPy arrays.
+
+Previous proposals and discussion
+---------------------------------
+
+The duck typing protocol proposed here was described in a high level in
+`NEP 22 <https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html>`_.
+
+Additionally, longer discussions about the protocol and related proposals
+took place in
+`numpy/numpy #13831 <https://github.com/numpy/numpy/issues/13831>`_
+
+Copyright
+---------
+
+This document has been placed in the public domain.
--- /dev/null
+============================================================
+NEP 31 — Context-local and global overrides of the NumPy API
+============================================================
+
+:Author: Hameer Abbasi <habbasi@quansight.com>
+:Author: Ralf Gommers <rgommers@quansight.com>
+:Author: Peter Bell <pbell@quansight.com>
+:Status: Draft
+:Type: Standards Track
+:Created: 2019-08-22
+
+
+Abstract
+--------
+
+This NEP proposes to make all of NumPy's public API overridable via an
+extensible backend mechanism.
+
+Acceptance of this NEP means NumPy would provide global and context-local
+overrides in a separate namespace, as well as a dispatch mechanism similar
+to NEP-18 [2]_. First experiences with ``__array_function__`` show that it
+is necessary to be able to override NumPy functions that *do not take an
+array-like argument*, and hence aren't overridable via
+``__array_function__``. The most pressing need is array creation and coercion
+functions, such as ``numpy.zeros`` or ``numpy.asarray``; see e.g. NEP-30 [9]_.
+
+This NEP proposes to allow, in an opt-in fashion, overriding any part of the
+NumPy API. It is intended as a comprehensive resolution to NEP-22 [3]_, and
+obviates the need to add an ever-growing list of new protocols for each new
+type of function or object that needs to become overridable.
+
+Motivation and Scope
+--------------------
+
+The primary end-goal of this NEP is to make the following possible:
+
+.. code:: python
+
+ # On the library side
+ import numpy.overridable as unp
+
+ def library_function(array):
+ array = unp.asarray(array)
+ # Code using unumpy as usual
+ return array
+
+ # On the user side:
+ import numpy.overridable as unp
+ import uarray as ua
+ import dask.array as da
+
+ ua.register_backend(da) # Can be done within Dask itself
+
+ library_function(dask_array) # works and returns dask_array
+
+ with unp.set_backend(da):
+ library_function([1, 2, 3, 4]) # actually returns a Dask array.
+
+Here, ``backend`` can be any compatible object defined either by NumPy or an
+external library, such as Dask or CuPy. Ideally, it should be the module
+``dask.array`` or ``cupy`` itself.
+
+These kinds of overrides are useful for both the end-user as well as library
+authors. End-users may have written or wish to write code that they then later
+speed up or move to a different implementation, say PyData/Sparse. They can do
+this simply by setting a backend. Library authors may also wish to write code
+that is portable across array implementations, for example ``sklearn`` may wish
+to write code for a machine learning algorithm that is portable across array
+implementations while also using array creation functions.
+
+This NEP takes a holistic approach: It assumes that there are parts of
+the API that need to be overridable, and that these will grow over time. It
+provides a general framework and a mechanism to avoid a design of a new
+protocol each time this is required. This was the goal of ``uarray``: to
+allow for overrides in an API without needing the design of a new protocol.
+
+This NEP proposes the following: That ``unumpy`` [8]_ becomes the
+recommended override mechanism for the parts of the NumPy API not yet covered
+by ``__array_function__`` or ``__array_ufunc__``, and that ``uarray`` is
+vendored into a new namespace within NumPy to give users and downstream
+dependencies access to these overrides. This vendoring mechanism is similar
+to what SciPy decided to do for making ``scipy.fft`` overridable (see [10]_).
+
+The motivation behind ``uarray`` is manyfold: First, there have been several
+attempts to allow dispatch of parts of the NumPy API, including (most
+prominently), the ``__array_ufunc__`` protocol in NEP-13 [4]_, and the
+``__array_function__`` protocol in NEP-18 [2]_, but this has shown the need
+for further protocols to be developed, including a protocol for coercion (see
+[5]_, [9]_). The reasons these overrides are needed have been extensively
+discussed in the references, and this NEP will not attempt to go into the
+details of why these are needed; but in short: It is necessary for library
+authors to be able to coerce arbitrary objects into arrays of their own types,
+such as CuPy needing to coerce to a CuPy array, for example, instead of
+a NumPy array. In simpler words, one needs things like ``np.asarray(...)`` or
+an alternative to "just work" and return duck-arrays.
+
+Usage and Impact
+----------------
+
+This NEP allows for global and context-local overrides, as well as
+automatic overrides a-la ``__array_function__``.
+
+Here are some use-cases this NEP would enable, besides the
+first one stated in the motivation section:
+
+The first is allowing alternate dtypes to return their
+respective arrays.
+
+.. code:: python
+
+ # Returns an XND array
+ x = unp.ones((5, 5), dtype=xnd_dtype) # Or torch dtype
+
+The second is allowing overrides for parts of the API.
+This is to allow alternate and/or optimised implementations
+for ``np.linalg``, BLAS, and ``np.random``.
+
+.. code:: python
+
+ import numpy as np
+ import pyfftw # Or mkl_fft
+
+ # Makes pyfftw the default for FFT
+ np.set_global_backend(pyfftw)
+
+ # Uses pyfftw without monkeypatching
+ np.fft.fft(numpy_array)
+
+ with np.set_backend(pyfftw) # Or mkl_fft, or numpy
+ # Uses the backend you specified
+ np.fft.fft(numpy_array)
+
+This will allow an official way for overrides to work with NumPy without
+monkeypatching or distributing a modified version of NumPy.
+
+Here are a few other use-cases, implied but not already
+stated:
+
+.. code:: python
+
+ data = da.from_zarr('myfile.zarr')
+ # result should still be dask, all things being equal
+ result = library_function(data)
+ result.to_zarr('output.zarr')
+
+This second one would work if ``magic_library`` was built
+on top of ``unumpy``.
+
+.. code:: python
+
+ from dask import array as da
+ from magic_library import pytorch_predict
+
+ data = da.from_zarr('myfile.zarr')
+ # normally here one would use e.g. data.map_overlap
+ result = pytorch_predict(data)
+ result.to_zarr('output.zarr')
+
+There are some backends which may depend on other backends, for example xarray
+depending on `numpy.fft`, and transforming a time axis into a frequency axis,
+or Dask/xarray holding an array other than a NumPy array inside it. This would
+be handled in the following manner inside code::
+
+ with ua.set_backend(cupy), ua.set_backend(dask.array):
+ # Code that has distributed GPU arrays here
+
+Backward compatibility
+----------------------
+
+There are no backward incompatible changes proposed in this NEP.
+
+Detailed description
+--------------------
+
+Proposals
+~~~~~~~~~
+
+The only change this NEP proposes at its acceptance, is to make ``unumpy`` the
+officially recommended way to override NumPy, along with making some submodules
+overridable by default via ``uarray``. ``unumpy`` will remain a separate
+repository/package (which we propose to vendor to avoid a hard dependency, and
+use the separate ``unumpy`` package only if it is installed, rather than depend
+on for the time being). In concrete terms, ``numpy.overridable`` becomes an
+alias for ``unumpy``, if available with a fallback to the a vendored version if
+not. ``uarray`` and ``unumpy`` and will be developed primarily with the input
+of duck-array authors and secondarily, custom dtype authors, via the usual
+GitHub workflow. There are a few reasons for this:
+
+* Faster iteration in the case of bugs or issues.
+* Faster design changes, in the case of needed functionality.
+* ``unumpy`` will work with older versions of NumPy as well.
+* The user and library author opt-in to the override process,
+ rather than breakages happening when it is least expected.
+ In simple terms, bugs in ``unumpy`` mean that ``numpy`` remains
+ unaffected.
+* For ``numpy.fft``, ``numpy.linalg`` and ``numpy.random``, the functions in
+ the main namespace will mirror those in the ``numpy.overridable`` namespace.
+ The reason for this is that there may exist functions in the in these
+ submodules that need backends, even for ``numpy.ndarray`` inputs.
+
+Advantanges of ``unumpy`` over other solutions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``unumpy`` offers a number of advantanges over the approach of defining a new
+protocol for every problem encountered: Whenever there is something requiring
+an override, ``unumpy`` will be able to offer a unified API with very minor
+changes. For example:
+
+* ``ufunc`` objects can be overridden via their ``__call__``, ``reduce`` and
+ other methods.
+* Other functions can be overridden in a similar fashion.
+* ``np.asduckarray`` goes away, and becomes ``np.overridable.asarray`` with a
+ backend set.
+* The same holds for array creation functions such as ``np.zeros``,
+ ``np.empty`` and so on.
+
+This also holds for the future: Making something overridable would require only
+minor changes to ``unumpy``.
+
+Another promise ``unumpy`` holds is one of default implementations. Default
+implementations can be provided for any multimethod, in terms of others. This
+allows one to override a large part of the NumPy API by defining only a small
+part of it. This is to ease the creation of new duck-arrays, by providing
+default implementations of many functions that can be easily expressed in
+terms of others, as well as a repository of utility functions that help in the
+implementation of duck-arrays that most duck-arrays would require. This would
+allow us to avoid designing entire protocols, e.g., a protocol for stacking
+and concatenating would be replaced by simply implementing ``stack`` and/or
+``concatenate`` and then providing default implementations for everything else
+in that class. The same applies for transposing, and many other functions for
+which protocols haven't been proposed, such as ``isin`` in terms of ``in1d``,
+``setdiff1d`` in terms of ``unique``, and so on.
+
+It also allows one to override functions in a manner which
+``__array_function__`` simply cannot, such as overriding ``np.einsum`` with the
+version from the ``opt_einsum`` package, or Intel MKL overriding FFT, BLAS
+or ``ufunc`` objects. They would define a backend with the appropriate
+multimethods, and the user would select them via a ``with`` statement, or
+registering them as a backend.
+
+The last benefit is a clear way to coerce to a given backend (via the
+``coerce`` keyword in ``ua.set_backend``), and a protocol
+for coercing not only arrays, but also ``dtype`` objects and ``ufunc`` objects
+with similar ones from other libraries. This is due to the existence of actual,
+third party dtype packages, and their desire to blend into the NumPy ecosystem
+(see [6]_). This is a separate issue compared to the C-level dtype redesign
+proposed in [7]_, it's about allowing third-party dtype implementations to
+work with NumPy, much like third-party array implementations. These can provide
+features such as, for example, units, jagged arrays or other such features that
+are outside the scope of NumPy.
+
+Mixing NumPy and ``unumpy`` in the same file
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Normally, one would only want to import only one of ``unumpy`` or ``numpy``,
+you would import it as ``np`` for familiarity. However, there may be situations
+where one wishes to mix NumPy and the overrides, and there are a few ways to do
+this, depending on the user's style::
+
+ from numpy import overridable as unp
+ import numpy as np
+
+or::
+
+ import numpy as np
+
+ # Use unumpy via np.overridable
+
+Duck-array coercion
+~~~~~~~~~~~~~~~~~~~
+
+There are inherent problems about returning objects that are not NumPy arrays
+from ``numpy.array`` or ``numpy.asarray``, particularly in the context of C/C++
+or Cython code that may get an object with a different memory layout than the
+one it expects. However, we believe this problem may apply not only to these
+two functions but all functions that return NumPy arrays. For this reason,
+overrides are opt-in for the user, by using the submodule ``numpy.overridable``
+rather than ``numpy``. NumPy will continue to work unaffected by anything in
+``numpy.overridable``.
+
+If the user wishes to obtain a NumPy array, there are two ways of doing it:
+
+1. Use ``numpy.asarray`` (the non-overridable version).
+2. Use ``numpy.overridable.asarray`` with the NumPy backend set and coercion
+ enabled
+
+Aliases outside of the ``numpy.overridable`` namespace
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+All functionality in ``numpy.random``, ``numpy.linalg`` and ``numpy.fft``
+will be aliased to their respective overridable versions inside
+``numpy.overridable``. The reason for this is that there are alternative
+implementations of RNGs (``mkl-random``), linear algebra routines (``eigen``,
+``blis``) and FFT routines (``mkl-fft``, ``pyFFTW``) that need to operate on
+``numpy.ndarray`` inputs, but still need the ability to switch behaviour.
+
+This is different from monkeypatching in a few different ways:
+
+* The caller-facing signature of the function is always the same,
+ so there is at least the loose sense of an API contract. Monkeypatching
+ does not provide this ability.
+* There is the ability of locally switching the backend.
+* It has been `suggested <http://numpy-discussion.10968.n7.nabble.com/NEP-31-Context-local-and-global-overrides-of-the-NumPy-API-tp47452p47472.html>`_
+ that the reason that 1.17 hasn't landed in the Anaconda defaults channel is
+ due to the incompatibility between monkeypatching and ``__array_function__``,
+ as monkeypatching would bypass the protocol completely.
+* Statements of the form ``from numpy import x; x`` and ``np.x`` would have
+ different results depending on whether the import was made before or
+ after monkeypatching happened.
+
+All this isn't possible at all with ``__array_function__`` or
+``__array_ufunc__``.
+
+It has been formally realised (at least in part) that a backend system is
+needed for this, in the `NumPy roadmap <https://numpy.org/neps/roadmap.html#other-functionality>`_.
+
+For ``numpy.random``, it's still necessary to make the C-API fit the one
+proposed in `NEP-19 <https://numpy.org/neps/nep-0019-rng-policy.html>`_.
+This is impossible for `mkl-random`, because then it would need to be
+rewritten to fit that framework. The guarantees on stream
+compatibility will be the same as before, but if there's a backend that affects
+``numpy.random`` set, we make no guarantees about stream compatibility, and it
+is up to the backend author to provide their own guarantees.
+
+Providing a way for implicit dispatch
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It has been suggested that the ability to dispatch methods which do not take
+a dispatchable is needed, while guessing that backend from another dispatchable.
+
+As a concrete example, consider the following:
+
+.. code:: python
+
+ with unumpy.determine_backend(array_like, np.ndarray):
+ unumpy.arange(len(array_like))
+
+While this does not exist yet in ``uarray``, it is trivial to add it. The need for
+this kind of code exists because one might want to have an alternative for the
+proposed ``*_like`` functions, or the ``like=`` keyword argument. The need for these
+exists because there are functions in the NumPy API that do not take a dispatchable
+argument, but there is still the need to select a backend based on a different
+dispatchable.
+
+The need for an opt-in module
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The need for an opt-in module is realised because of a few reasons:
+
+* There are parts of the API (like `numpy.asarray`) that simply cannot be
+ overridden due to incompatibility concerns with C/Cython extensions, however,
+ one may want to coerce to a duck-array using ``asarray`` with a backend set.
+* There are possible issues around an implicit option and monkeypatching, such
+ as those mentioned above.
+
+NEP 18 notes that this may require maintenance of two separate APIs. However,
+this burden may be lessened by, for example, parametrizing all tests over
+``numpy.overridable`` separately via a fixture. This also has the side-effect
+of thoroughly testing it, unlike ``__array_function__``. We also feel that it
+provides an oppurtunity to separate the NumPy API contract properly from the
+implementation.
+
+Benefits to end-users and mixing backends
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Mixing backends is easy in ``uarray``, one only has to do:
+
+.. code:: python
+
+ # Explicitly say which backends you want to mix
+ ua.register_backend(backend1)
+ ua.register_backend(backend2)
+ ua.register_backend(backend3)
+
+ # Freely use code that mixes backends here.
+
+The benefits to end-users extend beyond just writing new code. Old code
+(usually in the form of scripts) can be easily ported to different backends
+by a simple import switch and a line adding the preferred backend. This way,
+users may find it easier to port existing code to GPU or distributed computing.
+
+Related Work
+------------
+
+Other override mechanisms
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* NEP-18, the ``__array_function__`` protocol. [2]_
+* NEP-13, the ``__array_ufunc__`` protocol. [3]_
+* NEP-30, the ``__duck_array__`` protocol. [9]_
+
+Existing NumPy-like array implementations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* Dask: https://dask.org/
+* CuPy: https://cupy.chainer.org/
+* PyData/Sparse: https://sparse.pydata.org/
+* Xnd: https://xnd.readthedocs.io/
+* Astropy's Quantity: https://docs.astropy.org/en/stable/units/
+
+Existing and potential consumers of alternative arrays
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* Dask: https://dask.org/
+* scikit-learn: https://scikit-learn.org/
+* xarray: https://xarray.pydata.org/
+* TensorLy: http://tensorly.org/
+
+Existing alternate dtype implementations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* ``ndtypes``: https://ndtypes.readthedocs.io/en/latest/
+* Datashape: https://datashape.readthedocs.io
+* Plum: https://plum-py.readthedocs.io/
+
+Alternate implementations of parts of the NumPy API
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* ``mkl_random``: https://github.com/IntelPython/mkl_random
+* ``mkl_fft``: https://github.com/IntelPython/mkl_fft
+* ``bottleneck``: https://github.com/pydata/bottleneck
+* ``opt_einsum``: https://github.com/dgasmith/opt_einsum
+
+Implementation
+--------------
+
+The implementation of this NEP will require the following steps:
+
+* Implementation of ``uarray`` multimethods corresponding to the
+ NumPy API, including classes for overriding ``dtype``, ``ufunc``
+ and ``array`` objects, in the ``unumpy`` repository, which are usually
+ very easy to create.
+* Moving backends from ``unumpy`` into the respective array libraries.
+
+Maintenance can be eased by testing over ``{numpy, unumpy}`` via parameterized
+tests. If a new argument is added to a method, the corresponding argument
+extractor and replacer will need to be updated within ``unumpy``.
+
+A lot of argument extractors can be re-used from the existing implementation
+of the ``__array_function__`` protocol, and the replacers can be usually
+re-used across many methods.
+
+For the parts of the namespace which are going to be overridable by default,
+the main method will need to be renamed and hidden behind a ``uarray`` multimethod.
+
+Default implementations are usually seen in the documentation using the words
+"equivalent to", and thus, are easily available.
+
+``uarray`` Primer
+~~~~~~~~~~~~~~~~~
+
+**Note:** *This section will not attempt to go into too much detail about
+uarray, that is the purpose of the uarray documentation.* [1]_
+*However, the NumPy community will have input into the design of
+uarray, via the issue tracker.*
+
+``unumpy`` is the interface that defines a set of overridable functions
+(multimethods) compatible with the numpy API. To do this, it uses the
+``uarray`` library. ``uarray`` is a general purpose tool for creating
+multimethods that dispatch to one of multiple different possible backend
+implementations. In this sense, it is similar to the ``__array_function__``
+protocol but with the key difference that the backend is explicitly installed
+by the end-user and not coupled into the array type.
+
+Decoupling the backend from the array type gives much more flexibility to
+end-users and backend authors. For example, it is possible to:
+
+* override functions not taking arrays as arguments
+* create backends out of source from the array type
+* install multiple backends for the same array type
+
+This decoupling also means that ``uarray`` is not constrained to dispatching
+over array-like types. The backend is free to inspect the entire set of
+function arguments to determine if it can implement the function e.g. ``dtype``
+parameter dispatching.
+
+Defining backends
+^^^^^^^^^^^^^^^^^
+
+``uarray`` consists of two main protocols: ``__ua_convert__`` and
+``__ua_function__``, called in that order, along with ``__ua_domain__``.
+``__ua_convert__`` is for conversion and coercion. It has the signature
+``(dispatchables, coerce)``, where ``dispatchables`` is an iterable of
+``ua.Dispatchable`` objects and ``coerce`` is a boolean indicating whether or
+not to force the conversion. ``ua.Dispatchable`` is a simple class consisting
+of three simple values: ``type``, ``value``, and ``coercible``.
+``__ua_convert__`` returns an iterable of the converted values, or
+``NotImplemented`` in the case of failure.
+
+``__ua_function__`` has the signature ``(func, args, kwargs)`` and defines
+the actual implementation of the function. It receives the function and its
+arguments. Returning ``NotImplemented`` will cause a move to the default
+implementation of the function if one exists, and failing that, the next
+backend.
+
+Here is what will happen assuming a ``uarray`` multimethod is called:
+
+1. We canonicalise the arguments so any arguments without a default
+ are placed in ``*args`` and those with one are placed in ``**kwargs``.
+2. We check the list of backends.
+
+ a. If it is empty, we try the default implementation.
+
+3. We check if the backend's ``__ua_convert__`` method exists. If it exists:
+
+ a. We pass it the output of the dispatcher,
+ which is an iterable of ``ua.Dispatchable`` objects.
+ b. We feed this output, along with the arguments,
+ to the argument replacer. ``NotImplemented`` means we move to 3
+ with the next backend.
+ c. We store the replaced arguments as the new arguments.
+
+4. We feed the arguments into ``__ua_function__``, and return the output, and
+ exit if it isn't ``NotImplemented``.
+5. If the default implementation exists, we try it with the current backend.
+6. On failure, we move to 3 with the next backend. If there are no more
+ backends, we move to 7.
+7. We raise a ``ua.BackendNotImplementedError``.
+
+Defining overridable multimethods
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To define an overridable function (a multimethod), one needs a few things:
+
+1. A dispatcher that returns an iterable of ``ua.Dispatchable`` objects.
+2. A reverse dispatcher that replaces dispatchable values with the supplied
+ ones.
+3. A domain.
+4. Optionally, a default implementation, which can be provided in terms of
+ other multimethods.
+
+As an example, consider the following::
+
+ import uarray as ua
+
+ def full_argreplacer(args, kwargs, dispatchables):
+ def full(shape, fill_value, dtype=None, order='C'):
+ return (shape, fill_value), dict(
+ dtype=dispatchables[0],
+ order=order
+ )
+
+ return full(*args, **kwargs)
+
+ @ua.create_multimethod(full_argreplacer, domain="numpy")
+ def full(shape, fill_value, dtype=None, order='C'):
+ return (ua.Dispatchable(dtype, np.dtype),)
+
+A large set of examples can be found in the ``unumpy`` repository, [8]_.
+This simple act of overriding callables allows us to override:
+
+* Methods
+* Properties, via ``fget`` and ``fset``
+* Entire objects, via ``__get__``.
+
+Examples for NumPy
+^^^^^^^^^^^^^^^^^^
+
+A library that implements a NumPy-like API will use it in the following
+manner (as an example)::
+
+ import numpy.overridable as unp
+ _ua_implementations = {}
+
+ __ua_domain__ = "numpy"
+
+ def __ua_function__(func, args, kwargs):
+ fn = _ua_implementations.get(func, None)
+ return fn(*args, **kwargs) if fn is not None else NotImplemented
+
+ def implements(ua_func):
+ def inner(func):
+ _ua_implementations[ua_func] = func
+ return func
+
+ return inner
+
+ @implements(unp.asarray)
+ def asarray(a, dtype=None, order=None):
+ # Code here
+ # Either this method or __ua_convert__ must
+ # return NotImplemented for unsupported types,
+ # Or they shouldn't be marked as dispatchable.
+
+ # Provides a default implementation for ones and zeros.
+ @implements(unp.full)
+ def full(shape, fill_value, dtype=None, order='C'):
+ # Code here
+
+Alternatives
+------------
+
+The current alternative to this problem is a combination of NEP-18 [2]_,
+NEP-13 [4]_ and NEP-30 [9]_ plus adding more protocols (not yet specified)
+in addition to it. Even then, some parts of the NumPy API will remain
+non-overridable, so it's a partial alternative.
+
+The main alternative to vendoring ``unumpy`` is to simply move it into NumPy
+completely and not distribute it as a separate package. This would also achieve
+the proposed goals, however we prefer to keep it a separate package for now,
+for reasons already stated above.
+
+The third alternative is to move ``unumpy`` into the NumPy organisation and
+develop it as a NumPy project. This will also achieve the said goals, and is
+also a possibility that can be considered by this NEP. However, the act of
+doing an extra ``pip install`` or ``conda install`` may discourage some users
+from adopting this method.
+
+An alternative to requiring opt-in is mainly to *not* override ``np.asarray``
+and ``np.array``, and making the rest of the NumPy API surface overridable,
+instead providing ``np.duckarray`` and ``np.asduckarray``
+as duck-array friendly alternatives that used the respective overrides. However,
+this has the downside of adding a minor overhead to NumPy calls.
+
+Discussion
+----------
+
+* ``uarray`` blogpost: https://labs.quansight.org/blog/2019/07/uarray-update-api-changes-overhead-and-comparison-to-__array_function__/
+* The discussion section of NEP-18: https://numpy.org/neps/nep-0018-array-function-protocol.html#discussion
+* NEP-22: https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html
+* Dask issue #4462: https://github.com/dask/dask/issues/4462
+* PR #13046: https://github.com/numpy/numpy/pull/13046
+* Dask issue #4883: https://github.com/dask/dask/issues/4883
+* Issue #13831: https://github.com/numpy/numpy/issues/13831
+* Discussion PR 1: https://github.com/hameerabbasi/numpy/pull/3
+* Discussion PR 2: https://github.com/hameerabbasi/numpy/pull/4
+* Discussion PR 3: https://github.com/numpy/numpy/pull/14389
+
+
+References and Footnotes
+------------------------
+
+.. [1] uarray, A general dispatch mechanism for Python: https://uarray.readthedocs.io
+
+.. [2] NEP 18 — A dispatch mechanism for NumPy’s high level array functions: https://numpy.org/neps/nep-0018-array-function-protocol.html
+
+.. [3] NEP 22 — Duck typing for NumPy arrays – high level overview: https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html
+
+.. [4] NEP 13 — A Mechanism for Overriding Ufuncs: https://numpy.org/neps/nep-0013-ufunc-overrides.html
+
+.. [5] Reply to Adding to the non-dispatched implementation of NumPy methods: http://numpy-discussion.10968.n7.nabble.com/Adding-to-the-non-dispatched-implementation-of-NumPy-methods-tp46816p46874.html
+
+.. [6] Custom Dtype/Units discussion: http://numpy-discussion.10968.n7.nabble.com/Custom-Dtype-Units-discussion-td43262.html
+
+.. [7] The epic dtype cleanup plan: https://github.com/numpy/numpy/issues/2899
+
+.. [8] unumpy: NumPy, but implementation-independent: https://unumpy.readthedocs.io
+
+.. [9] NEP 30 — Duck Typing for NumPy Arrays - Implementation: https://www.numpy.org/neps/nep-0030-duck-array-protocol.html
+
+.. [10] http://scipy.github.io/devdocs/fft.html#backend-control
+
+
+Copyright
+---------
+
+This document has been placed in the public domain.
--- /dev/null
+==================================================
+NEP 32 — Remove the financial functions from NumPy
+==================================================
+
+:Author: Warren Weckesser <warren.weckesser@gmail.com>
+:Status: Accepted
+:Type: Standards Track
+:Created: 2019-08-30
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2019-September/080074.html
+
+
+Abstract
+--------
+
+We propose deprecating and ultimately removing the financial functions [1]_
+from NumPy. The functions will be moved to an independent repository,
+and provided to the community as a separate package with the name
+``numpy_financial``.
+
+
+Motivation and scope
+--------------------
+
+The NumPy financial functions [1]_ are the 10 functions ``fv``, ``ipmt``,
+``irr``, ``mirr``, ``nper``, ``npv``, ``pmt``, ``ppmt``, ``pv`` and ``rate``.
+The functions provide elementary financial calculations such as future value,
+net present value, etc. These functions were added to NumPy in 2008 [2]_.
+
+In May, 2009, a request by Joe Harrington to add a function called ``xirr`` to
+the financial functions triggered a long thread about these functions [3]_.
+One important point that came up in that thread is that a "real" financial
+library must be able to handle real dates. The NumPy financial functions do
+not work with actual dates or calendars. The preference for a more capable
+library independent of NumPy was expressed several times in that thread.
+
+In June, 2009, D. L. Goldsmith expressed concerns about the correctness of the
+implementations of some of the financial functions [4]_. It was suggested then
+to move the financial functions out of NumPy to an independent package.
+
+In a GitHub issue in 2013 [5]_, Nathaniel Smith suggested moving the financial
+functions from the top-level namespace to ``numpy.financial``. He also
+suggested giving the functions better names. Responses at that time included
+the suggestion to deprecate them and move them from NumPy to a separate
+package. This issue is still open.
+
+Later in 2013 [6]_, it was suggested on the mailing list that these functions
+be removed from NumPy.
+
+The arguments for the removal of these functions from NumPy:
+
+* They are too specialized for NumPy.
+* They are not actually useful for "real world" financial calculations, because
+ they do not handle real dates and calendars.
+* The definition of "correctness" for some of these functions seems to be a
+ matter of convention, and the current NumPy developers do not have the
+ background to judge their correctness.
+* There has been little interest among past and present NumPy developers
+ in maintaining these functions.
+
+The main arguments for keeping the functions in NumPy are:
+
+* Removing these functions will be disruptive for some users. Current users
+ will have to add the new ``numpy_financial`` package to their dependencies,
+ and then modify their code to use the new package.
+* The functions provided, while not "industrial strength", are apparently
+ similar to functions provided by spreadsheets and some calculators. Having
+ them available in NumPy makes it easier for some developers to migrate their
+ software to Python and NumPy.
+
+It is clear from comments in the mailing list discussions and in the GitHub
+issues that many current NumPy developers believe the benefits of removing
+the functions outweigh the costs. For example, from [5]_::
+
+ The financial functions should probably be part of a separate package
+ -- Charles Harris
+
+ If there's a better package we can point people to we could just deprecate
+ them and then remove them entirely... I'd be fine with that too...
+ -- Nathaniel Smith
+
+ +1 to deprecate them. If no other package exists, it can be created if
+ someone feels the need for that.
+ -- Ralf Gommers
+
+ I feel pretty strongly that we should deprecate these. If nobody on numpy’s
+ core team is interested in maintaining them, then it is purely a drag on
+ development for NumPy.
+ -- Stephan Hoyer
+
+And from the 2013 mailing list discussion, about removing the functions from
+NumPy::
+
+ I am +1 as well, I don't think they should have been included in the first
+ place.
+ -- David Cournapeau
+
+But not everyone was in favor of removal::
+
+ The fin routines are tiny and don't require much maintenance once
+ written. If we made an effort (putting up pages with examples of common
+ financial calculations and collecting those under a topical web page,
+ then linking to that page from various places and talking it up), I
+ would think they could attract users looking for a free way to play with
+ financial scenarios. [...]
+ So, I would say we keep them. If ours are not the best, we should bring
+ them up to snuff.
+ -- Joe Harrington
+
+For an idea of the maintenance burden of the financial functions, one can
+look for all the GitHub issues [7]_ and pull requests [8]_ that have the tag
+``component: numpy.lib.financial``.
+
+One method for measuring the effect of removing these functions is to find
+all the packages on GitHub that use them. Such a search can be performed
+with the ``python-api-inspect`` service [9]_. A search for all uses of the
+NumPy financial functions finds just eight repositories. (See the comments
+in [5]_ for the actual SQL query.)
+
+
+Implementation
+--------------
+
+* Create a new Python package, ``numpy_financial``, to be maintained in the
+ top-level NumPy github organization. This repository will contain the
+ definitions and unit tests for the financial functions. The package will
+ be added to PyPI so it can be installed with ``pip``.
+* Deprecate the financial functions in the ``numpy`` namespace, beginning in
+ NumPy version 1.18. Remove the financial functions from NumPy version 1.20.
+
+
+Backward compatibility
+----------------------
+
+The removal of these functions breaks backward compatibility, as explained
+earlier. The effects are mitigated by providing the ``numpy_financial``
+library.
+
+
+Alternatives
+------------
+
+The following alternatives were mentioned in [5]_:
+
+* *Maintain the functions as they are (i.e. do nothing).*
+ A review of the history makes clear that this is not the preference of many
+ NumPy developers. A recurring comment is that the functions simply do not
+ belong in NumPy. When that sentiment is combined with the history of bug
+ reports and the ongoing questions about the correctness of the functions, the
+ conclusion is that the cleanest solution is deprecation and removal.
+* *Move the functions from the ``numpy`` namespace to ``numpy.financial``.*
+ This was the initial suggestion in [5]_. Such a change does not address the
+ maintenance issues, and doesn't change the misfit that many developers see
+ between these functions and NumPy. It causes disruption for the current
+ users of these functions without addressing what many developers see as the
+ fundamental problem.
+
+
+Discussion
+----------
+
+Links to past mailing list discussions, and to relevant GitHub issues and pull
+requests, have already been given. The announcement of this NEP was made on
+the NumPy-Discussion mailing list on 3 September 2019 [10]_, and on the
+PyData mailing list on 8 September 2019 [11]_. The formal proposal to accept
+the NEP was made on 19 September 2019 [12]_; a notification was also sent to
+PyData (same thread as [11]_). There have been no substantive objections.
+
+
+References and footnotes
+------------------------
+
+.. [1] Financial functions,
+ https://numpy.org/doc/1.17/reference/routines.financial.html
+
+.. [2] Numpy-discussion mailing list, "Simple financial functions for NumPy",
+ https://mail.python.org/pipermail/numpy-discussion/2008-April/032353.html
+
+.. [3] Numpy-discussion mailing list, "add xirr to numpy financial functions?",
+ https://mail.python.org/pipermail/numpy-discussion/2009-May/042645.html
+
+.. [4] Numpy-discussion mailing list, "Definitions of pv, fv, nper, pmt, and rate",
+ https://mail.python.org/pipermail/numpy-discussion/2009-June/043188.html
+
+.. [5] Get financial functions out of main namespace,
+ https://github.com/numpy/numpy/issues/2880
+
+.. [6] Numpy-discussion mailing list, "Deprecation of financial routines",
+ https://mail.python.org/pipermail/numpy-discussion/2013-August/067409.html
+
+.. [7] ``component: numpy.lib.financial`` issues,
+ https://github.com/numpy/numpy/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22component%3A+numpy.lib.financial%22+
+
+.. [8] ``component: numpy.lib.financial`` pull requests,
+ https://github.com/numpy/numpy/pulls?utf8=%E2%9C%93&q=is%3Apr+label%3A%22component%3A+numpy.lib.financial%22+
+
+.. [9] Quansight-Labs/python-api-inspect,
+ https://github.com/Quansight-Labs/python-api-inspect/
+
+.. [10] Numpy-discussion mailing list, "NEP 32: Remove the financial functions
+ from NumPy"
+ https://mail.python.org/pipermail/numpy-discussion/2019-September/079965.html
+
+.. [11] PyData mailing list (pydata@googlegroups.com), "NumPy proposal to
+ remove the financial functions.
+ https://mail.google.com/mail/u/0/h/1w0mjgixc4rpe/?&th=16d5c38be45f77c4&q=nep+32&v=c&s=q
+
+.. [12] Numpy-discussion mailing list, "Proposal to accept NEP 32: Remove the
+ financial functions from NumPy"
+ https://mail.python.org/pipermail/numpy-discussion/2019-September/080074.html
+
+Copyright
+---------
+
+This document has been placed in the public domain.
--- /dev/null
+===========================================================
+NEP 34 — Disallow inferring ``dtype=object`` from sequences
+===========================================================
+
+:Author: Matti Picus
+:Status: Accepted
+:Type: Standards Track
+:Created: 2019-10-10
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2019-October/080200.html
+
+Abstract
+--------
+
+When users create arrays with sequences-of-sequences, they sometimes err in
+matching the lengths of the nested sequences_, commonly called "ragged
+arrays". Here we will refer to them as ragged nested sequences. Creating such
+arrays via ``np.array([<ragged_nested_sequence>])`` with no ``dtype`` keyword
+argument will today default to an ``object``-dtype array. Change the behaviour to
+raise a ``ValueError`` instead.
+
+Motivation and Scope
+--------------------
+
+Users who specify lists-of-lists when creating a `numpy.ndarray` via
+``np.array`` may mistakenly pass in lists of different lengths. Currently we
+accept this input and automatically create an array with ``dtype=object``. This
+can be confusing, since it is rarely what is desired. Changing the automatic
+dtype detection to never return ``object`` for ragged nested sequences (defined as a
+recursive sequence of sequences, where not all the sequences on the same
+level have the same length) will force users who actually wish to create
+``object`` arrays to specify that explicitly. Note that ``lists``, ``tuples``,
+and ``nd.ndarrays`` are all sequences [0]_. See for instance `issue 5303`_.
+
+Usage and Impact
+----------------
+
+After this change, array creation with ragged nested sequences must explicitly
+define a dtype:
+
+ >>> np.array([[1, 2], [1]])
+ ValueError: cannot guess the desired dtype from the input
+
+ >>> np.array([[1, 2], [1]], dtype=object)
+ # succeeds, with no change from current behaviour
+
+The deprecation will affect any call that internally calls ``np.asarray``. For
+instance, the ``assert_equal`` family of functions calls ``np.asarray``, so
+users will have to change code like::
+
+ np.assert_equal(a, [[1, 2], 3])
+
+to::
+
+ np.assert_equal(a, np.array([[1, 2], 3], dtype=object))
+
+Detailed description
+--------------------
+
+To explicitly set the shape of the object array, since it is sometimes hard to
+determine what shape is desired, one could use:
+
+ >>> arr = np.empty(correct_shape, dtype=object)
+ >>> arr[...] = values
+
+We will also reject mixed sequences of non-sequence and sequence, for instance
+all of these will be rejected:
+
+ >>> arr = np.array([np.arange(10), [10]])
+ >>> arr = np.array([[range(3), range(3), range(3)], [range(3), 0, 0]])
+
+Related Work
+------------
+
+`PR 14341`_ tried to raise an error when ragged nested sequences were specified
+with a numeric dtype ``np.array, [[1], [2, 3]], dtype=int)`` but failed due to
+false-positives, for instance ``np.array([1, np.array([5])], dtype=int)``.
+
+.. _`PR 14341`: https://github.com/numpy/numpy/pull/14341
+
+Implementation
+--------------
+
+The code to be changed is inside ``PyArray_GetArrayParamsFromObject`` and the
+internal ``discover_dimentions`` function. See `PR 14794`_.
+
+Backward compatibility
+----------------------
+
+Anyone depending on creating object arrays from ragged nested sequences will
+need to modify their code. There will be a deprecation period during which the
+current behaviour will emit a ``DeprecationWarning``.
+
+Alternatives
+------------
+
+- We could continue with the current situation.
+
+- It was also suggested to add a kwarg ``depth`` to array creation, or perhaps
+ to add another array creation API function ``ragged_array_object``. The goal
+ was to eliminate the ambiguity in creating an object array from ``array([[1,
+ 2], [1]], dtype=object)``: should the returned array have a shape of
+ ``(1,)``, or ``(2,)``? This NEP does not deal with that issue, and only
+ deprecates the use of ``array`` with no ``dtype=object`` for ragged nested
+ sequences. Users of ragged nested sequences may face another deprecation
+ cycle in the future. Rationale: we expect that there are very few users who
+ intend to use ragged arrays like that, this was never intended as a use case
+ of NumPy arrays. Users are likely better off with `another library`_ or just
+ using list of lists.
+
+- It was also suggested to deprecate all automatic creation of ``object``-dtype
+ arrays, which would require adding an explicit ``dtype=object`` for something
+ like ``np.array([Decimal(10), Decimal(10)])``. This too is out of scope for
+ the current NEP. Rationale: it's harder to asses the impact of this larger
+ change, we're not sure how many users this may impact.
+
+Discussion
+----------
+
+Comments to `issue 5303`_ indicate this is unintended behaviour as far back as
+2014. Suggestions to change it have been made in the ensuing years, but none
+have stuck. The WIP implementation in `PR 14794`_ seems to point to the
+viability of this approach.
+
+References and Footnotes
+------------------------
+
+.. _`issue 5303`: https://github.com/numpy/numpy/issues/5303
+.. _sequences: https://docs.python.org/3.7/glossary.html#term-sequence
+.. _`PR 14794`: https://github.com/numpy/numpy/pull/14794
+.. _`another library`: https://github.com/scikit-hep/awkward-array
+
+.. [0] ``np.ndarrays`` are not recursed into, rather their shape is used
+ directly. This will not emit warnings::
+
+ ragged = np.array([[1], [1, 2, 3]], dtype=object)
+ np.array([ragged, ragged]) # no dtype needed
+
+Copyright
+---------
+
+This document has been placed in the public domain.
-=============================
-NEP Template and Instructions
-=============================
+=================================
+NEP X — Template and Instructions
+=================================
:Author: <list of authors' real names and optionally, email addresses>
:Status: <Draft | Active | Accepted | Deferred | Rejected | Withdrawn | Final | Superseded>
:Created: <date created on, in yyyy-mm-dd format>
:Resolution: <url> (required for Accepted | Rejected | Withdrawn)
+
Abstract
--------
The abstract should be a short description of what the NEP will achieve.
+Note that the — in the title is an elongated dash, not -.
+
+Motivation and Scope
+--------------------
+
+This section describes the need for the proposed change. It should describe
+the existing problem, who it affects, what it is trying to solve, and why.
+This section should explicitly address the scope of and key requirements for
+the proposed change.
+
+Usage and Impact
+----------------
+
+This section describes how users of NumPy will use features described in this
+NEP. It should be comprised mainly of code examples that wouldn't be possible
+without acceptance and implementation of this NEP, as well as the impact the
+proposed changes would have on the ecosystem. This section should be written
+from the perspective of the users of NumPy, and the benefits it will provide
+them; and as such, it should include implementation details only if
+necessary to explain the functionality.
+
+Backward compatibility
+----------------------
+
+This section describes the ways in which the NEP breaks backward compatibility.
+
+The mailing list post will contain the NEP up to and including this section.
+Its purpose is to provide a high-level summary to users who are not interested
+in detailed technical discussion, but may have opinions around, e.g., usage and
+impact.
Detailed description
--------------------
-This section describes the need for the NEP. It should describe the existing
-problem that it is trying to solve and why this NEP makes the situation better.
-It should include examples of how the new functionality would be used and
-perhaps some use cases.
+This section should provide a detailed description of the proposed change.
+It should include examples of how the new functionality would be used,
+intended use-cases and pseudo-code illustrating its use.
+
+
+Related Work
+------------
+
+This section should list relevant and/or similar technologies, possibly in other
+libraries. It does not need to be comprehensive, just list the major examples of
+prior and relevant art.
Implementation
This section lists the major steps required to implement the NEP. Where
possible, it should be noted where one step is dependent on another, and which
-steps may be optionally omitted. Where it makes sense, each step should
-include a link related pull requests as the implementation progresses.
+steps may be optionally omitted. Where it makes sense, each step should
+include a link to related pull requests as the implementation progresses.
Any pull requests or development branches containing work on this NEP should
be linked to from here. (A NEP does not need to be implemented in a single
pull request if it makes sense to implement it in discrete phases).
-Backward compatibility
-----------------------
-
-This section describes the ways in which the NEP breaks backward compatibility.
-
-
Alternatives
------------
- One of these should probably be the default for text data. The current
behavior on Python 3 is neither efficient nor user friendly.
-- `np.int` should not be platform dependent
+- ``np.dtype(int)`` should not be platform dependent
- Better coercion for string + number
Performance
+++ /dev/null
-from __future__ import division, absolute_import, print_function
-
-import floatint.floatint as ff
-import numpy as np
-
-# Setting using array is hard because
-# The parser doesn't stop at tuples always
-# So, the setitem code will be called with scalars on the
-# wrong shaped array.
-# But we can get a view as an ndarray of the given type:
-g = np.array([1, 2, 3, 4, 5, 6, 7, 8]).view(ff.floatint_type)
-
-# Now, the elements will be the scalar type associated
-# with the ndarray.
-print(g[0])
-print(type(g[1]))
-
-# Now, you need to register ufuncs and more arrfuncs to do useful things...
+++ /dev/null
-
-#include "Python.h"
-#include "structmember.h" /* for offset of macro if needed */
-#include "numpy/arrayobject.h"
-
-
-/* Use a Python float as the canonical type being added
-*/
-
-typedef struct _floatint {
- PyObject_HEAD
- npy_int32 first;
- npy_int32 last;
-} PyFloatIntObject;
-
-static PyTypeObject PyFloatInt_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /*ob_size*/
- "floatint.floatint", /*tp_name*/
- sizeof(PyFloatIntObject), /*tp_basicsize*/
-};
-
-static PyArray_ArrFuncs _PyFloatInt_Funcs;
-
-#define _ALIGN(type) offsetof(struct {char c; type v;},v)
-
-/* The scalar-type */
-
-static PyArray_Descr _PyFloatInt_Dtype = {
- PyObject_HEAD_INIT(NULL)
- &PyFloatInt_Type,
- 'f',
- '0',
- '=',
- 0,
- 0,
- sizeof(double),
- _ALIGN(double),
- NULL,
- NULL,
- NULL,
- &_PyFloatInt_Funcs
-};
-
-static void
-twoint_copyswap(void *dst, void *src, int swap, void *arr)
-{
- if (src != NULL) {
- memcpy(dst, src, sizeof(double));
- }
-
- if (swap) {
- register char *a, *b, c;
- a = (char *)dst;
- b = a + 7;
- c = *a; *a++ = *b; *b-- = c;
- c = *a; *a++ = *b; *b-- = c;
- c = *a; *a++ = *b; *b-- = c;
- c = *a; *a++ = *b; *b = c;
- }
-}
-
-static PyObject *
-twoint_getitem(char *ip, PyArrayObject *ap) {
- npy_int32 a[2];
-
- if ((ap==NULL) || PyArray_ISBEHAVED_RO(ap)) {
- a[0] = *((npy_int32 *)ip);
- a[1] = *((npy_int32 *)ip + 1);
- }
- else {
- ap->descr->f->copyswap(a, ip, !PyArray_ISNOTSWAPPED(ap), ap);
- }
- return Py_BuildValue("(ii)", a[0], a[1]);
-}
-
-static int
-twoint_setitem(PyObject *op, char *ov, PyArrayObject *ap) {
- npy_int32 a[2];
-
- if (!PyTuple_Check(op)) {
- PyErr_SetString(PyExc_TypeError, "must be a tuple");
- return -1;
- }
- if (!PyArg_ParseTuple(op, "ii", a, a+1)) return -1;
-
- if (ap == NULL || PyArray_ISBEHAVED(ap)) {
- memcpy(ov, a, sizeof(double));
- }
- else {
- ap->descr->f->copyswap(ov, a, !PyArray_ISNOTSWAPPED(ap), ap);
- }
- return 0;
-}
-
-static PyArray_Descr * _register_dtype(void)
-{
- int userval;
- PyArray_InitArrFuncs(&_PyFloatInt_Funcs);
- /* Add copyswap,
- nonzero, getitem, setitem*/
- _PyFloatInt_Funcs.copyswap = twoint_copyswap;
- _PyFloatInt_Funcs.getitem = (PyArray_GetItemFunc *)twoint_getitem;
- _PyFloatInt_Funcs.setitem = (PyArray_SetItemFunc *)twoint_setitem;
- _PyFloatInt_Dtype.ob_type = &PyArrayDescr_Type;
-
- userval = PyArray_RegisterDataType(&_PyFloatInt_Dtype);
- return PyArray_DescrFromType(userval);
-}
-
-
-/* Initialization function for the module (*must* be called init<name>) */
-
-PyMODINIT_FUNC initfloatint(void) {
- PyObject *m, *d;
- PyArray_Descr *dtype;
-
- /* Create the module and add the functions */
- m = Py_InitModule("floatint", NULL);
-
- /* Import the array objects */
- import_array();
-
-
- /* Initialize the new float type */
-
- /* Add some symbolic constants to the module */
- d = PyModule_GetDict(m);
-
- if (PyType_Ready(&PyFloat_Type) < 0) return;
- PyFloatInt_Type.tp_base = &PyFloat_Type;
- /* This is only needed because we are sub-typing the
- Float type and must pre-set some function pointers
- to get PyType_Ready to fill in the rest.
- */
- PyFloatInt_Type.tp_alloc = PyType_GenericAlloc;
- PyFloatInt_Type.tp_new = PyFloat_Type.tp_new;
- PyFloatInt_Type.tp_dealloc = PyFloat_Type.tp_dealloc;
- PyFloatInt_Type.tp_free = PyObject_Del;
- if (PyType_Ready(&PyFloatInt_Type) < 0) return;
- /* End specific code */
-
-
- dtype = _register_dtype();
- Py_XINCREF(dtype);
- if (dtype != NULL) {
- PyDict_SetItemString(d, "floatint_type", (PyObject *)dtype);
- }
- Py_INCREF(&PyFloatInt_Type);
- PyDict_SetItemString(d, "floatint", (PyObject *)&PyFloatInt_Type);
- return;
-}
+++ /dev/null
-from __future__ import division, absolute_import, print_function
+++ /dev/null
-from __future__ import division, print_function
-
-from numpy.distutils.core import setup
-
-def configuration(parent_package = '', top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('floatint', parent_package, top_path)
-
- config.add_extension('floatint',
- sources = ['floatint.c'])
- return config
-
-setup(configuration=configuration)
**Dictionary (keys "names", "titles", and "formats")**
- This will be converted to a ``PyArray_VOID`` type with corresponding
+ This will be converted to a ``NPY_VOID`` type with corresponding
fields parameter (the formats list will be converted to actual
``PyArray_Descr *`` objects).
**Objects (anything with an .itemsize and .fields attribute)**
If its an instance of (a sub-class of) void type, then a new
``PyArray_Descr*`` structure is created corresponding to its
- typeobject (and ``PyArray_VOID``) typenumber. If the type is
+ typeobject (and ``NPY_VOID``) typenumber. If the type is
registered, then the registered type-number is used.
- Otherwise a new ``PyArray_VOID PyArray_Descr*`` structure is created
+ Otherwise a new ``NPY_VOID PyArray_Descr*`` structure is created
and filled ->elsize and ->fields filled in appropriately.
The itemsize attribute must return a number > 0. The fields
+++ /dev/null
-==========================
-NumPy 1.10.0 Release Notes
-==========================
-
-This release supports Python 2.6 - 2.7 and 3.2 - 3.5.
-
-
-Highlights
-==========
-* numpy.distutils now supports parallel compilation via the --parallel/-j
- argument passed to setup.py build
-* numpy.distutils now supports additional customization via site.cfg to
- control compilation parameters, i.e. runtime libraries, extra
- linking/compilation flags.
-* Addition of *np.linalg.multi_dot*: compute the dot product of two or more
- arrays in a single function call, while automatically selecting the fastest
- evaluation order.
-* The new function `np.stack` provides a general interface for joining a
- sequence of arrays along a new axis, complementing `np.concatenate` for
- joining along an existing axis.
-* Addition of `nanprod` to the set of nanfunctions.
-* Support for the '@' operator in Python 3.5.
-
-Dropped Support
-===============
-
-* The _dotblas module has been removed. CBLAS Support is now in
- Multiarray.
-* The testcalcs.py file has been removed.
-* The polytemplate.py file has been removed.
-* npy_PyFile_Dup and npy_PyFile_DupClose have been removed from
- npy_3kcompat.h.
-* splitcmdline has been removed from numpy/distutils/exec_command.py.
-* try_run and get_output have been removed from
- numpy/distutils/command/config.py
-* The a._format attribute is no longer supported for array printing.
-* Keywords ``skiprows`` and ``missing`` removed from np.genfromtxt.
-* Keyword ``old_behavior`` removed from np.correlate.
-
-Future Changes
-==============
-
-* In array comparisons like ``arr1 == arr2``, many corner cases
- involving strings or structured dtypes that used to return scalars
- now issue ``FutureWarning`` or ``DeprecationWarning``, and in the
- future will be change to either perform elementwise comparisons or
- raise an error.
-* In ``np.lib.split`` an empty array in the result always had dimension
- ``(0,)`` no matter the dimensions of the array being split. In Numpy 1.11
- that behavior will be changed so that the dimensions will be preserved. A
- ``FutureWarning`` for this change has been in place since Numpy 1.9 but,
- due to a bug, sometimes no warning was raised and the dimensions were
- already preserved.
-* The SafeEval class will be removed in Numpy 1.11.
-* The alterdot and restoredot functions will be removed in Numpy 1.11.
-
-See below for more details on these changes.
-
-Compatibility notes
-===================
-
-Default casting rule change
----------------------------
-Default casting for inplace operations has changed to ``'same_kind'``. For
-instance, if n is an array of integers, and f is an array of floats, then
-``n += f`` will result in a ``TypeError``, whereas in previous Numpy
-versions the floats would be silently cast to ints. In the unlikely case
-that the example code is not an actual bug, it can be updated in a backward
-compatible way by rewriting it as ``np.add(n, f, out=n, casting='unsafe')``.
-The old ``'unsafe'`` default has been deprecated since Numpy 1.7.
-
-numpy version string
---------------------
-The numpy version string for development builds has been changed from
-``x.y.z.dev-githash`` to ``x.y.z.dev0+githash`` (note the +) in order to comply
-with PEP 440.
-
-relaxed stride checking
------------------------
-NPY_RELAXED_STRIDE_CHECKING is now true by default.
-
-UPDATE: In 1.10.2 the default value of NPY_RELAXED_STRIDE_CHECKING was
-changed to false for back compatibility reasons. More time is needed before
-it can be made the default. As part of the roadmap a deprecation of
-dimension changing views of f_contiguous not c_contiguous arrays was also
-added.
-
-Concatenation of 1d arrays along any but ``axis=0`` raises ``IndexError``
--------------------------------------------------------------------------
-Using axis != 0 has raised a DeprecationWarning since NumPy 1.7, it now
-raises an error.
-
-*np.ravel*, *np.diagonal* and *np.diag* now preserve subtypes
--------------------------------------------------------------
-There was inconsistent behavior between *x.ravel()* and *np.ravel(x)*, as
-well as between *x.diagonal()* and *np.diagonal(x)*, with the methods
-preserving subtypes while the functions did not. This has been fixed and
-the functions now behave like the methods, preserving subtypes except in
-the case of matrices. Matrices are special cased for backward
-compatibility and still return 1-D arrays as before. If you need to
-preserve the matrix subtype, use the methods instead of the functions.
-
-*rollaxis* and *swapaxes* always return a view
-----------------------------------------------
-Previously, a view was returned except when no change was made in the order
-of the axes, in which case the input array was returned. A view is now
-returned in all cases.
-
-*nonzero* now returns base ndarrays
------------------------------------
-Previously, an inconsistency existed between 1-D inputs (returning a
-base ndarray) and higher dimensional ones (which preserved subclasses).
-Behavior has been unified, and the return will now be a base ndarray.
-Subclasses can still override this behavior by providing their own
-*nonzero* method.
-
-C API
------
-The changes to *swapaxes* also apply to the *PyArray_SwapAxes* C function,
-which now returns a view in all cases.
-
-The changes to *nonzero* also apply to the *PyArray_Nonzero* C function,
-which now returns a base ndarray in all cases.
-
-The dtype structure (PyArray_Descr) has a new member at the end to cache
-its hash value. This shouldn't affect any well-written applications.
-
-The change to the concatenation function DeprecationWarning also affects
-PyArray_ConcatenateArrays,
-
-recarray field return types
----------------------------
-Previously the returned types for recarray fields accessed by attribute and by
-index were inconsistent, and fields of string type were returned as chararrays.
-Now, fields accessed by either attribute or indexing will return an ndarray for
-fields of non-structured type, and a recarray for fields of structured type.
-Notably, this affect recarrays containing strings with whitespace, as trailing
-whitespace is trimmed from chararrays but kept in ndarrays of string type.
-Also, the dtype.type of nested structured fields is now inherited.
-
-recarray views
---------------
-Viewing an ndarray as a recarray now automatically converts the dtype to
-np.record. See new record array documentation. Additionally, viewing a recarray
-with a non-structured dtype no longer converts the result's type to ndarray -
-the result will remain a recarray.
-
-'out' keyword argument of ufuncs now accepts tuples of arrays
--------------------------------------------------------------
-When using the 'out' keyword argument of a ufunc, a tuple of arrays, one per
-ufunc output, can be provided. For ufuncs with a single output a single array
-is also a valid 'out' keyword argument. Previously a single array could be
-provided in the 'out' keyword argument, and it would be used as the first
-output for ufuncs with multiple outputs, is deprecated, and will result in a
-`DeprecationWarning` now and an error in the future.
-
-byte-array indices now raises an IndexError
--------------------------------------------
-Indexing an ndarray using a byte-string in Python 3 now raises an IndexError
-instead of a ValueError.
-
-Masked arrays containing objects with arrays
---------------------------------------------
-For such (rare) masked arrays, getting a single masked item no longer returns a
-corrupted masked array, but a fully masked version of the item.
-
-Median warns and returns nan when invalid values are encountered
-----------------------------------------------------------------
-Similar to mean, median and percentile now emits a Runtime warning and
-returns `NaN` in slices where a `NaN` is present.
-To compute the median or percentile while ignoring invalid values use the
-new `nanmedian` or `nanpercentile` functions.
-
-Functions available from numpy.ma.testutils have changed
---------------------------------------------------------
-All functions from numpy.testing were once available from
-numpy.ma.testutils but not all of them were redefined to work with masked
-arrays. Most of those functions have now been removed from
-numpy.ma.testutils with a small subset retained in order to preserve
-backward compatibility. In the long run this should help avoid mistaken use
-of the wrong functions, but it may cause import problems for some.
-
-
-New Features
-============
-
-Reading extra flags from site.cfg
----------------------------------
-Previously customization of compilation of dependency libraries and numpy
-itself was only accomblishable via code changes in the distutils package.
-Now numpy.distutils reads in the following extra flags from each group of the
-*site.cfg*:
-
-* ``runtime_library_dirs/rpath``, sets runtime library directories to override
- ``LD_LIBRARY_PATH``
-* ``extra_compile_args``, add extra flags to the compilation of sources
-* ``extra_link_args``, add extra flags when linking libraries
-
-This should, at least partially, complete user customization.
-
-*np.cbrt* to compute cube root for real floats
-----------------------------------------------
-*np.cbrt* wraps the C99 cube root function *cbrt*.
-Compared to *np.power(x, 1./3.)* it is well defined for negative real floats
-and a bit faster.
-
-numpy.distutils now allows parallel compilation
------------------------------------------------
-By passing *--parallel=n* or *-j n* to *setup.py build* the compilation of
-extensions is now performed in *n* parallel processes.
-The parallelization is limited to files within one extension so projects using
-Cython will not profit because it builds extensions from single files.
-
-*genfromtxt* has a new ``max_rows`` argument
---------------------------------------------
-A ``max_rows`` argument has been added to *genfromtxt* to limit the
-number of rows read in a single call. Using this functionality, it is
-possible to read in multiple arrays stored in a single file by making
-repeated calls to the function.
-
-New function *np.broadcast_to* for invoking array broadcasting
---------------------------------------------------------------
-*np.broadcast_to* manually broadcasts an array to a given shape according to
-numpy's broadcasting rules. The functionality is similar to broadcast_arrays,
-which in fact has been rewritten to use broadcast_to internally, but only a
-single array is necessary.
-
-New context manager *clear_and_catch_warnings* for testing warnings
--------------------------------------------------------------------
-When Python emits a warning, it records that this warning has been emitted in
-the module that caused the warning, in a module attribute
-``__warningregistry__``. Once this has happened, it is not possible to emit
-the warning again, unless you clear the relevant entry in
-``__warningregistry__``. This makes is hard and fragile to test warnings,
-because if your test comes after another that has already caused the warning,
-you will not be able to emit the warning or test it. The context manager
-``clear_and_catch_warnings`` clears warnings from the module registry on entry
-and resets them on exit, meaning that warnings can be re-raised.
-
-*cov* has new ``fweights`` and ``aweights`` arguments
------------------------------------------------------
-The ``fweights`` and ``aweights`` arguments add new functionality to
-covariance calculations by applying two types of weighting to observation
-vectors. An array of ``fweights`` indicates the number of repeats of each
-observation vector, and an array of ``aweights`` provides their relative
-importance or probability.
-
-Support for the '@' operator in Python 3.5+
--------------------------------------------
-Python 3.5 adds support for a matrix multiplication operator '@' proposed
-in PEP465. Preliminary support for that has been implemented, and an
-equivalent function ``matmul`` has also been added for testing purposes and
-use in earlier Python versions. The function is preliminary and the order
-and number of its optional arguments can be expected to change.
-
-New argument ``norm`` to fft functions
---------------------------------------
-The default normalization has the direct transforms unscaled and the inverse
-transforms are scaled by :math:`1/n`. It is possible to obtain unitary
-transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
-`None`) so that both direct and inverse transforms will be scaled by
-:math:`1/\\sqrt{n}`.
-
-
-Improvements
-============
-
-*np.digitize* using binary search
----------------------------------
-*np.digitize* is now implemented in terms of *np.searchsorted*. This means
-that a binary search is used to bin the values, which scales much better
-for larger number of bins than the previous linear search. It also removes
-the requirement for the input array to be 1-dimensional.
-
-*np.poly* now casts integer inputs to float
--------------------------------------------
-*np.poly* will now cast 1-dimensional input arrays of integer type to double
-precision floating point, to prevent integer overflow when computing the monic
-polynomial. It is still possible to obtain higher precision results by
-passing in an array of object type, filled e.g. with Python ints.
-
-*np.interp* can now be used with periodic functions
----------------------------------------------------
-*np.interp* now has a new parameter *period* that supplies the period of the
-input data *xp*. In such case, the input data is properly normalized to the
-given period and one end point is added to each extremity of *xp* in order to
-close the previous and the next period cycles, resulting in the correct
-interpolation behavior.
-
-*np.pad* supports more input types for ``pad_width`` and ``constant_values``
-----------------------------------------------------------------------------
-``constant_values`` parameters now accepts NumPy arrays and float values.
-NumPy arrays are supported as input for ``pad_width``, and an exception is
-raised if its values are not of integral type.
-
-*np.argmax* and *np.argmin* now support an ``out`` argument
------------------------------------------------------------
-The ``out`` parameter was added to *np.argmax* and *np.argmin* for consistency
-with *ndarray.argmax* and *ndarray.argmin*. The new parameter behaves exactly
-as it does in those methods.
-
-More system C99 complex functions detected and used
----------------------------------------------------
-All of the functions ``in complex.h`` are now detected. There are new
-fallback implementations of the following functions.
-
-* npy_ctan,
-* npy_cacos, npy_casin, npy_catan
-* npy_ccosh, npy_csinh, npy_ctanh,
-* npy_cacosh, npy_casinh, npy_catanh
-
-As a result of these improvements, there will be some small changes in
-returned values, especially for corner cases.
-
-*np.loadtxt* support for the strings produced by the ``float.hex`` method
--------------------------------------------------------------------------
-The strings produced by ``float.hex`` look like ``0x1.921fb54442d18p+1``,
-so this is not the hex used to represent unsigned integer types.
-
-*np.isclose* properly handles minimal values of integer dtypes
---------------------------------------------------------------
-In order to properly handle minimal values of integer types, *np.isclose* will
-now cast to the float dtype during comparisons. This aligns its behavior with
-what was provided by *np.allclose*.
-
-*np.allclose* uses *np.isclose* internally.
--------------------------------------------
-*np.allclose* now uses *np.isclose* internally and inherits the ability to
-compare NaNs as equal by setting ``equal_nan=True``. Subclasses, such as
-*np.ma.MaskedArray*, are also preserved now.
-
-*np.genfromtxt* now handles large integers correctly
-----------------------------------------------------
-*np.genfromtxt* now correctly handles integers larger than ``2**31-1`` on
-32-bit systems and larger than ``2**63-1`` on 64-bit systems (it previously
-crashed with an ``OverflowError`` in these cases). Integers larger than
-``2**63-1`` are converted to floating-point values.
-
-*np.load*, *np.save* have pickle backward compatibility flags
--------------------------------------------------------------
-
-The functions *np.load* and *np.save* have additional keyword
-arguments for controlling backward compatibility of pickled Python
-objects. This enables Numpy on Python 3 to load npy files containing
-object arrays that were generated on Python 2.
-
-MaskedArray support for more complicated base classes
------------------------------------------------------
-Built-in assumptions that the baseclass behaved like a plain array are being
-removed. In particular, setting and getting elements and ranges will respect
-baseclass overrides of ``__setitem__`` and ``__getitem__``, and arithmetic
-will respect overrides of ``__add__``, ``__sub__``, etc.
-
-Changes
-=======
-
-dotblas functionality moved to multiarray
------------------------------------------
-The cblas versions of dot, inner, and vdot have been integrated into
-the multiarray module. In particular, vdot is now a multiarray function,
-which it was not before.
-
-stricter check of gufunc signature compliance
----------------------------------------------
-Inputs to generalized universal functions are now more strictly checked
-against the function's signature: all core dimensions are now required to
-be present in input arrays; core dimensions with the same label must have
-the exact same size; and output core dimension's must be specified, either
-by a same label input core dimension or by a passed-in output array.
-
-views returned from *np.einsum* are writeable
----------------------------------------------
-Views returned by *np.einsum* will now be writeable whenever the input
-array is writeable.
-
-*np.argmin* skips NaT values
-----------------------------
-
-*np.argmin* now skips NaT values in datetime64 and timedelta64 arrays,
-making it consistent with *np.min*, *np.argmax* and *np.max*.
-
-
-Deprecations
-============
-
-Array comparisons involving strings or structured dtypes
---------------------------------------------------------
-
-Normally, comparison operations on arrays perform elementwise
-comparisons and return arrays of booleans. But in some corner cases,
-especially involving strings are structured dtypes, NumPy has
-historically returned a scalar instead. For example::
-
- ### Current behaviour
-
- np.arange(2) == "foo"
- # -> False
-
- np.arange(2) < "foo"
- # -> True on Python 2, error on Python 3
-
- np.ones(2, dtype="i4,i4") == np.ones(2, dtype="i4,i4,i4")
- # -> False
-
-Continuing work started in 1.9, in 1.10 these comparisons will now
-raise ``FutureWarning`` or ``DeprecationWarning``, and in the future
-they will be modified to behave more consistently with other
-comparison operations, e.g.::
-
- ### Future behaviour
-
- np.arange(2) == "foo"
- # -> array([False, False])
-
- np.arange(2) < "foo"
- # -> error, strings and numbers are not orderable
-
- np.ones(2, dtype="i4,i4") == np.ones(2, dtype="i4,i4,i4")
- # -> [False, False]
-
-SafeEval
---------
-The SafeEval class in numpy/lib/utils.py is deprecated and will be removed
-in the next release.
-
-alterdot, restoredot
---------------------
-The alterdot and restoredot functions no longer do anything, and are
-deprecated.
-
-pkgload, PackageLoader
-----------------------
-These ways of loading packages are now deprecated.
-
-bias, ddof arguments to corrcoef
---------------------------------
-
-The values for the ``bias`` and ``ddof`` arguments to the ``corrcoef``
-function canceled in the division implied by the correlation coefficient and
-so had no effect on the returned values.
-
-We now deprecate these arguments to ``corrcoef`` and the masked array version
-``ma.corrcoef``.
-
-Because we are deprecating the ``bias`` argument to ``ma.corrcoef``, we also
-deprecate the use of the ``allow_masked`` argument as a positional argument,
-as its position will change with the removal of ``bias``. ``allow_masked``
-will in due course become a keyword-only argument.
-
-dtype string representation changes
------------------------------------
-Since 1.6, creating a dtype object from its string representation, e.g.
-``'f4'``, would issue a deprecation warning if the size did not correspond
-to an existing type, and default to creating a dtype of the default size
-for the type. Starting with this release, this will now raise a ``TypeError``.
-
-The only exception is object dtypes, where both ``'O4'`` and ``'O8'`` will
-still issue a deprecation warning. This platform-dependent representation
-will raise an error in the next release.
-
-In preparation for this upcoming change, the string representation of an
-object dtype, i.e. ``np.dtype(object).str``, no longer includes the item
-size, i.e. will return ``'|O'`` instead of ``'|O4'`` or ``'|O8'`` as
-before.
+++ /dev/null
-==========================
-NumPy 1.10.1 Release Notes
-==========================
-
-This release deals with a few build problems that showed up in 1.10.0. Most
-users would not have seen these problems. The differences are:
-
-* Compiling with msvc9 or msvc10 for 32 bit Windows now requires SSE2.
- This was the easiest fix for what looked to be some miscompiled code when
- SSE2 was not used. If you need to compile for 32 bit Windows systems
- without SSE2 support, mingw32 should still work.
-
-* Make compiling with VS2008 python2.7 SDK easier
-
-* Change Intel compiler options so that code will also be generated to
- support systems without SSE4.2.
-
-* Some _config test functions needed an explicit integer return in
- order to avoid the openSUSE rpmlinter erring out.
-
-* We ran into a problem with pipy not allowing reuse of filenames and a
- resulting proliferation of *.*.*.postN releases. Not only were the names
- getting out of hand, some packages were unable to work with the postN
- suffix.
-
-
-Numpy 1.10.1 supports Python 2.6 - 2.7 and 3.2 - 3.5.
-
-
-Commits:
-
-45a3d84 DEP: Remove warning for `full` when dtype is set.
-0c1a5df BLD: import setuptools to allow compile with VS2008 python2.7 sdk
-04211c6 BUG: mask nan to 1 in ordered compare
-826716f DOC: Document the reason msvc requires SSE2 on 32 bit platforms.
-49fa187 BLD: enable SSE2 for 32-bit msvc 9 and 10 compilers
-dcbc4cc MAINT: remove Wreturn-type warnings from config checks
-d6564cb BLD: do not build exclusively for SSE4.2 processors
-15cb66f BLD: do not build exclusively for SSE4.2 processors
-c38bc08 DOC: fix var. reference in percentile docstring
-78497f4 DOC: Sync 1.10.0-notes.rst in 1.10.x branch with master.
-
+++ /dev/null
-==========================
-NumPy 1.10.2 Release Notes
-==========================
-
-This release deals with a number of bugs that turned up in 1.10.1 and
-adds various build and release improvements.
-
-Numpy 1.10.1 supports Python 2.6 - 2.7 and 3.2 - 3.5.
-
-
-Compatibility notes
-===================
-
-Relaxed stride checking is no longer the default
-------------------------------------------------
-There were back compatibility problems involving views changing the dtype of
-multidimensional Fortran arrays that need to be dealt with over a longer
-timeframe.
-
-Fix swig bug in ``numpy.i``
----------------------------
-Relaxed stride checking revealed a bug in ``array_is_fortran(a)``, that was
-using PyArray_ISFORTRAN to check for Fortran contiguity instead of
-PyArray_IS_F_CONTIGUOUS. You may want to regenerate swigged files using the
-updated numpy.i
-
-Deprecate views changing dimensions in fortran order
-----------------------------------------------------
-This deprecates assignment of a new descriptor to the dtype attribute of
-a non-C-contiguous array if it result in changing the shape. This
-effectively bars viewing a multidimensional Fortran array using a dtype
-that changes the element size along the first axis.
-
-The reason for the deprecation is that, when relaxed strides checking is
-enabled, arrays that are both C and Fortran contiguous are always treated
-as C contiguous which breaks some code that depended the two being mutually
-exclusive for non-scalar arrays of ndim > 1. This deprecation prepares the
-way to always enable relaxed stride checking.
-
-
-Issues Fixed
-============
-
-* gh-6019 Masked array repr fails for structured array with multi-dimensional column.
-* gh-6462 Median of empty array produces IndexError.
-* gh-6467 Performance regression for record array access.
-* gh-6468 numpy.interp uses 'left' value even when x[0]==xp[0].
-* gh-6475 np.allclose returns a memmap when one of its arguments is a memmap.
-* gh-6491 Error in broadcasting stride_tricks array.
-* gh-6495 Unrecognized command line option '-ffpe-summary' in gfortran.
-* gh-6497 Failure of reduce operation on recarrays.
-* gh-6498 Mention change in default casting rule in 1.10 release notes.
-* gh-6530 The partition function errors out on empty input.
-* gh-6532 numpy.inner return wrong inaccurate value sometimes.
-* gh-6563 Intent(out) broken in recent versions of f2py.
-* gh-6569 Cannot run tests after 'python setup.py build_ext -i'
-* gh-6572 Error in broadcasting stride_tricks array component.
-* gh-6575 BUG: Split produces empty arrays with wrong number of dimensions
-* gh-6590 Fortran Array problem in numpy 1.10.
-* gh-6602 Random __all__ missing choice and dirichlet.
-* gh-6611 ma.dot no longer always returns a masked array in 1.10.
-* gh-6618 NPY_FORTRANORDER in make_fortran() in numpy.i
-* gh-6636 Memory leak in nested dtypes in numpy.recarray
-* gh-6641 Subsetting recarray by fields yields a structured array.
-* gh-6667 ma.make_mask handles ma.nomask input incorrectly.
-* gh-6675 Optimized blas detection broken in master and 1.10.
-* gh-6678 Getting unexpected error from: X.dtype = complex (or Y = X.view(complex))
-* gh-6718 f2py test fail in pip installed numpy-1.10.1 in virtualenv.
-* gh-6719 Error compiling Cython file: Pythonic division not allowed without gil.
-* gh-6771 Numpy.rec.fromarrays losing dtype metadata between versions 1.9.2 and 1.10.1
-* gh-6781 The travis-ci script in maintenance/1.10.x needs fixing.
-* gh-6807 Windows testing errors for 1.10.2
-
-
-Merged PRs
-==========
-
-The following PRs have been merged into 1.10.2. When the PR is a backport,
-the PR number for the original PR against master is listed.
-
-* gh-5773 MAINT: Hide testing helper tracebacks when using them with pytest.
-* gh-6094 BUG: Fixed a bug with string representation of masked structured arrays.
-* gh-6208 MAINT: Speedup field access by removing unneeded safety checks.
-* gh-6460 BUG: Replacing the os.environ.clear by less invasive procedure.
-* gh-6470 BUG: Fix AttributeError in numpy distutils.
-* gh-6472 MAINT: Use Python 3.5 instead of 3.5-dev for travis 3.5 testing.
-* gh-6474 REL: Update Paver script for sdist and auto-switch test warnings.
-* gh-6478 BUG: Fix Intel compiler flags for OS X build.
-* gh-6481 MAINT: LIBPATH with spaces is now supported Python 2.7+ and Win32.
-* gh-6487 BUG: Allow nested use of parameters in definition of arrays in f2py.
-* gh-6488 BUG: Extend common blocks rather than overwriting in f2py.
-* gh-6499 DOC: Mention that default casting for inplace operations has changed.
-* gh-6500 BUG: Recarrays viewed as subarrays don't convert to np.record type.
-* gh-6501 REL: Add "make upload" command for built docs, update "make dist".
-* gh-6526 BUG: Fix use of __doc__ in setup.py for -OO mode.
-* gh-6527 BUG: Fix the IndexError when taking the median of an empty array.
-* gh-6537 BUG: Make ma.atleast_* with scalar argument return arrays.
-* gh-6538 BUG: Fix ma.masked_values does not shrink mask if requested.
-* gh-6546 BUG: Fix inner product regression for non-contiguous arrays.
-* gh-6553 BUG: Fix partition and argpartition error for empty input.
-* gh-6556 BUG: Error in broadcast_arrays with as_strided array.
-* gh-6558 MAINT: Minor update to "make upload" doc build command.
-* gh-6562 BUG: Disable view safety checks in recarray.
-* gh-6567 BUG: Revert some import * fixes in f2py.
-* gh-6574 DOC: Release notes for Numpy 1.10.2.
-* gh-6577 BUG: Fix for #6569, allowing build_ext --inplace
-* gh-6579 MAINT: Fix mistake in doc upload rule.
-* gh-6596 BUG: Fix swig for relaxed stride checking.
-* gh-6606 DOC: Update 1.10.2 release notes.
-* gh-6614 BUG: Add choice and dirichlet to numpy.random.__all__.
-* gh-6621 BUG: Fix swig make_fortran function.
-* gh-6628 BUG: Make allclose return python bool.
-* gh-6642 BUG: Fix memleak in _convert_from_dict.
-* gh-6643 ENH: make recarray.getitem return a recarray.
-* gh-6653 BUG: Fix ma dot to always return masked array.
-* gh-6668 BUG: ma.make_mask should always return nomask for nomask argument.
-* gh-6686 BUG: Fix a bug in assert_string_equal.
-* gh-6695 BUG: Fix removing tempdirs created during build.
-* gh-6697 MAINT: Fix spurious semicolon in macro definition of PyArray_FROM_OT.
-* gh-6698 TST: test np.rint bug for large integers.
-* gh-6717 BUG: Readd fallback CBLAS detection on linux.
-* gh-6721 BUG: Fix for #6719.
-* gh-6726 BUG: Fix bugs exposed by relaxed stride rollback.
-* gh-6757 BUG: link cblas library if cblas is detected.
-* gh-6756 TST: only test f2py, not f2py2.7 etc, fixes #6718.
-* gh-6747 DEP: Deprecate changing shape of non-C-contiguous array via descr.
-* gh-6775 MAINT: Include from __future__ boilerplate in some files missing it.
-* gh-6780 BUG: metadata is not copied to base_dtype.
-* gh-6783 BUG: Fix travis ci testing for new google infrastructure.
-* gh-6785 BUG: Quick and dirty fix for interp.
-* gh-6813 TST,BUG: Make test_mvoid_multidim_print work for 32 bit systems.
-* gh-6817 BUG: Disable 32-bit msvc9 compiler optimizations for npy_rint.
-* gh-6819 TST: Fix test_mvoid_multidim_print failures on Python 2.x for Windows.
-
-Initial support for mingwpy was reverted as it was causing problems for
-non-windows builds.
-
-* gh-6536 BUG: Revert gh-5614 to fix non-windows build problems
-
-A fix for np.lib.split was reverted because it resulted in "fixing"
-behavior that will be present in the Numpy 1.11 and that was already
-present in Numpy 1.9. See the discussion of the issue at gh-6575 for
-clarification.
-
-* gh-6576 BUG: Revert gh-6376 to fix split behavior for empty arrays.
-
-Relaxed stride checking was reverted. There were back compatibility
-problems involving views changing the dtype of multidimensional Fortran
-arrays that need to be dealt with over a longer timeframe.
-
-* gh-6735 MAINT: Make no relaxed stride checking the default for 1.10.
-
-
-Notes
-=====
-A bug in the Numpy 1.10.1 release resulted in exceptions being raised for
-``RuntimeWarning`` and ``DeprecationWarning`` in projects depending on Numpy.
-That has been fixed.
+++ /dev/null
-==========================
-NumPy 1.10.3 Release Notes
-==========================
-
-N/A this release did not happen due to various screwups involving PyPi.
+++ /dev/null
-==========================
-NumPy 1.10.4 Release Notes
-==========================
-
-This release is a bugfix source release motivated by a segfault regression.
-No windows binaries are provided for this release, as there appear to be
-bugs in the toolchain we use to generate those files. Hopefully that
-problem will be fixed for the next release. In the meantime, we suggest
-using one of the providers of windows binaries.
-
-Compatibility notes
-===================
-
-* The trace function now calls the trace method on subclasses of ndarray,
- except for matrix, for which the current behavior is preserved. This is
- to help with the units package of AstroPy and hopefully will not cause
- problems.
-
-Issues Fixed
-============
-
-* gh-6922 BUG: numpy.recarray.sort segfaults on Windows.
-* gh-6937 BUG: busday_offset does the wrong thing with modifiedpreceding roll.
-* gh-6949 BUG: Type is lost when slicing a subclass of recarray.
-
-Merged PRs
-==========
-
-The following PRs have been merged into 1.10.4. When the PR is a backport,
-the PR number for the original PR against master is listed.
-
-* gh-6840 TST: Update travis testing script in 1.10.x
-* gh-6843 BUG: Fix use of python 3 only FileNotFoundError in test_f2py.
-* gh-6884 REL: Update pavement.py and setup.py to reflect current version.
-* gh-6916 BUG: Fix test_f2py so it runs correctly in runtests.py.
-* gh-6924 BUG: Fix segfault gh-6922.
-* gh-6942 Fix datetime roll='modifiedpreceding' bug.
-* gh-6943 DOC,BUG: Fix some latex generation problems.
-* gh-6950 BUG trace is not subclass aware, np.trace(ma) != ma.trace().
-* gh-6952 BUG recarray slices should preserve subclass.
+++ /dev/null
-==========================
-NumPy 1.11.0 Release Notes
-==========================
-
-This release supports Python 2.6 - 2.7 and 3.2 - 3.5 and contains a number
-of enhancements and improvements. Note also the build system changes listed
-below as they may have subtle effects.
-
-No Windows (TM) binaries are provided for this release due to a broken
-toolchain. One of the providers of Python packages for Windows (TM) is your
-best bet.
-
-
-Highlights
-==========
-
-Details of these improvements can be found below.
-
-* The datetime64 type is now timezone naive.
-* A dtype parameter has been added to ``randint``.
-* Improved detection of two arrays possibly sharing memory.
-* Automatic bin size estimation for ``np.histogram``.
-* Speed optimization of A @ A.T and dot(A, A.T).
-* New function ``np.moveaxis`` for reordering array axes.
-
-
-Build System Changes
-====================
-
-* Numpy now uses ``setuptools`` for its builds instead of plain distutils.
- This fixes usage of ``install_requires='numpy'`` in the ``setup.py`` files of
- projects that depend on Numpy (see gh-6551). It potentially affects the way
- that build/install methods for Numpy itself behave though. Please report any
- unexpected behavior on the Numpy issue tracker.
-* Bento build support and related files have been removed.
-* Single file build support and related files have been removed.
-
-
-Future Changes
-==============
-
-The following changes are scheduled for Numpy 1.12.0.
-
-* Support for Python 2.6, 3.2, and 3.3 will be dropped.
-* Relaxed stride checking will become the default. See the 1.8.0 release
- notes for a more extended discussion of what this change implies.
-* The behavior of the datetime64 "not a time" (NaT) value will be changed
- to match that of floating point "not a number" (NaN) values: all
- comparisons involving NaT will return False, except for NaT != NaT which
- will return True.
-* Indexing with floats will raise IndexError,
- e.g., a[0, 0.0].
-* Indexing with non-integer array_like will raise ``IndexError``,
- e.g., ``a['1', '2']``
-* Indexing with multiple ellipsis will raise ``IndexError``,
- e.g., ``a[..., ...]``.
-* Non-integers used as index values will raise ``TypeError``,
- e.g., in ``reshape``, ``take``, and specifying reduce axis.
-
-
-In a future release the following changes will be made.
-
-* The ``rand`` function exposed in ``numpy.testing`` will be removed. That
- function is left over from early Numpy and was implemented using the
- Python random module. The random number generators from ``numpy.random``
- should be used instead.
-* The ``ndarray.view`` method will only allow c_contiguous arrays to be
- viewed using a dtype of different size causing the last dimension to
- change. That differs from the current behavior where arrays that are
- f_contiguous but not c_contiguous can be viewed as a dtype type of
- different size causing the first dimension to change.
-* Slicing a ``MaskedArray`` will return views of both data **and** mask.
- Currently the mask is copy-on-write and changes to the mask in the slice do
- not propagate to the original mask. See the FutureWarnings section below for
- details.
-
-
-Compatibility notes
-===================
-
-datetime64 changes
-------------------
-In prior versions of NumPy the experimental datetime64 type always stored
-times in UTC. By default, creating a datetime64 object from a string or
-printing it would convert from or to local time::
-
- # old behavior
- >>>> np.datetime64('2000-01-01T00:00:00')
- numpy.datetime64('2000-01-01T00:00:00-0800') # note the timezone offset -08:00
-
-
-A consensus of datetime64 users agreed that this behavior is undesirable
-and at odds with how datetime64 is usually used (e.g., by `pandas
-<http://pandas.pydata.org>`__). For most use cases, a timezone naive datetime
-type is preferred, similar to the ``datetime.datetime`` type in the Python
-standard library. Accordingly, datetime64 no longer assumes that input is in
-local time, nor does it print local times::
-
- >>>> np.datetime64('2000-01-01T00:00:00')
- numpy.datetime64('2000-01-01T00:00:00')
-
-For backwards compatibility, datetime64 still parses timezone offsets, which
-it handles by converting to UTC. However, the resulting datetime is timezone
-naive::
-
- >>> np.datetime64('2000-01-01T00:00:00-08')
- DeprecationWarning: parsing timezone aware datetimes is deprecated;
- this will raise an error in the future
- numpy.datetime64('2000-01-01T08:00:00')
-
-As a corollary to this change, we no longer prohibit casting between datetimes
-with date units and datetimes with time units. With timezone naive datetimes,
-the rule for casting from dates to times is no longer ambiguous.
-
-``linalg.norm`` return type changes
------------------------------------
-The return type of the ``linalg.norm`` function is now floating point without
-exception. Some of the norm types previously returned integers.
-
-polynomial fit changes
-----------------------
-The various fit functions in the numpy polynomial package no longer accept
-non-integers for degree specification.
-
-*np.dot* now raises ``TypeError`` instead of ``ValueError``
------------------------------------------------------------
-This behaviour mimics that of other functions such as ``np.inner``. If the two
-arguments cannot be cast to a common type, it could have raised a ``TypeError``
-or ``ValueError`` depending on their order. Now, ``np.dot`` will now always
-raise a ``TypeError``.
-
-FutureWarning to changed behavior
----------------------------------
-
-* In ``np.lib.split`` an empty array in the result always had dimension
- ``(0,)`` no matter the dimensions of the array being split. This
- has been changed so that the dimensions will be preserved. A
- ``FutureWarning`` for this change has been in place since Numpy 1.9 but,
- due to a bug, sometimes no warning was raised and the dimensions were
- already preserved.
-
-``%`` and ``//`` operators
---------------------------
-These operators are implemented with the ``remainder`` and ``floor_divide``
-functions respectively. Those functions are now based around ``fmod`` and are
-computed together so as to be compatible with each other and with the Python
-versions for float types. The results should be marginally more accurate or
-outright bug fixes compared to the previous results, but they may
-differ significantly in cases where roundoff makes a difference in the integer
-returned by ``floor_divide``. Some corner cases also change, for instance, NaN
-is always returned for both functions when the divisor is zero,
-``divmod(1.0, inf)`` returns ``(0.0, 1.0)`` except on MSVC 2008, and
-``divmod(-1.0, inf)`` returns ``(-1.0, inf)``.
-
-C API
------
-
-Removed the ``check_return`` and ``inner_loop_selector`` members of
-the ``PyUFuncObject`` struct (replacing them with ``reserved`` slots
-to preserve struct layout). These were never used for anything, so
-it's unlikely that any third-party code is using them either, but we
-mention it here for completeness.
-
-
-object dtype detection for old-style classes
---------------------------------------------
-
-In python 2, objects which are instances of old-style user-defined classes no
-longer automatically count as 'object' type in the dtype-detection handler.
-Instead, as in python 3, they may potentially count as sequences, but only if
-they define both a `__len__` and a `__getitem__` method. This fixes a segfault
-and inconsistency between python 2 and 3.
-
-New Features
-============
-
-* ``np.histogram`` now provides plugin estimators for automatically
- estimating the optimal number of bins. Passing one of ['auto', 'fd',
- 'scott', 'rice', 'sturges'] as the argument to 'bins' results in the
- corresponding estimator being used.
-
-* A benchmark suite using `Airspeed Velocity
- <https://asv.readthedocs.io/>`__ has been added, converting the
- previous vbench-based one. You can run the suite locally via ``python
- runtests.py --bench``. For more details, see ``benchmarks/README.rst``.
-
-* A new function ``np.shares_memory`` that can check exactly whether two
- arrays have memory overlap is added. ``np.may_share_memory`` also now has
- an option to spend more effort to reduce false positives.
-
-* ``SkipTest`` and ``KnownFailureException`` exception classes are exposed
- in the ``numpy.testing`` namespace. Raise them in a test function to mark
- the test to be skipped or mark it as a known failure, respectively.
-
-* ``f2py.compile`` has a new ``extension`` keyword parameter that allows the
- fortran extension to be specified for generated temp files. For instance,
- the files can be specifies to be ``*.f90``. The ``verbose`` argument is
- also activated, it was previously ignored.
-
-* A ``dtype`` parameter has been added to ``np.random.randint``
- Random ndarrays of the following types can now be generated:
-
- - ``np.bool``,
- - ``np.int8``, ``np.uint8``,
- - ``np.int16``, ``np.uint16``,
- - ``np.int32``, ``np.uint32``,
- - ``np.int64``, ``np.uint64``,
- - ``np.int_ ``, ``np.intp``
-
- The specification is by precision rather than by C type. Hence, on some
- platforms ``np.int64`` may be a ``long`` instead of ``long long`` even if
- the specified dtype is ``long long`` because the two may have the same
- precision. The resulting type depends on which C type numpy uses for the
- given precision. The byteorder specification is also ignored, the
- generated arrays are always in native byte order.
-
-* A new ``np.moveaxis`` function allows for moving one or more array axes
- to a new position by explicitly providing source and destination axes.
- This function should be easier to use than the current ``rollaxis``
- function as well as providing more functionality.
-
-* The ``deg`` parameter of the various ``numpy.polynomial`` fits has been
- extended to accept a list of the degrees of the terms to be included in
- the fit, the coefficients of all other terms being constrained to zero.
- The change is backward compatible, passing a scalar ``deg`` will behave
- as before.
-
-* A divmod function for float types modeled after the Python version has
- been added to the npy_math library.
-
-
-Improvements
-============
-
-``np.gradient`` now supports an ``axis`` argument
--------------------------------------------------
-The ``axis`` parameter was added to ``np.gradient`` for consistency. It
-allows to specify over which axes the gradient is calculated.
-
-``np.lexsort`` now supports arrays with object data-type
---------------------------------------------------------
-The function now internally calls the generic ``npy_amergesort`` when the
-type does not implement a merge-sort kind of ``argsort`` method.
-
-``np.ma.core.MaskedArray`` now supports an ``order`` argument
--------------------------------------------------------------
-When constructing a new ``MaskedArray`` instance, it can be configured with
-an ``order`` argument analogous to the one when calling ``np.ndarray``. The
-addition of this argument allows for the proper processing of an ``order``
-argument in several MaskedArray-related utility functions such as
-``np.ma.core.array`` and ``np.ma.core.asarray``.
-
-Memory and speed improvements for masked arrays
------------------------------------------------
-Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses
-``np.ones`` (resp. ``np.zeros``) to create the mask, which is faster and
-avoid a big memory peak. Another optimization was done to avoid a memory
-peak and useless computations when printing a masked array.
-
-``ndarray.tofile`` now uses fallocate on linux
-----------------------------------------------
-The function now uses the fallocate system call to reserve sufficient
-disk space on file systems that support it.
-
-Optimizations for operations of the form ``A.T @ A`` and ``A @ A.T``
---------------------------------------------------------------------
-Previously, ``gemm`` BLAS operations were used for all matrix products. Now,
-if the matrix product is between a matrix and its transpose, it will use
-``syrk`` BLAS operations for a performance boost. This optimization has been
-extended to ``@``, ``numpy.dot``, ``numpy.inner``, and ``numpy.matmul``.
-
-**Note:** Requires the transposed and non-transposed matrices to share data.
-
-``np.testing.assert_warns`` can now be used as a context manager
-----------------------------------------------------------------
-This matches the behavior of ``assert_raises``.
-
-Speed improvement for np.random.shuffle
----------------------------------------
-``np.random.shuffle`` is now much faster for 1d ndarrays.
-
-
-Changes
-=======
-
-Pyrex support was removed from ``numpy.distutils``
---------------------------------------------------
-The method ``build_src.generate_a_pyrex_source`` will remain available; it
-has been monkeypatched by users to support Cython instead of Pyrex. It's
-recommended to switch to a better supported method of build Cython
-extensions though.
-
-``np.broadcast`` can now be called with a single argument
----------------------------------------------------------
-The resulting object in that case will simply mimic iteration over
-a single array. This change obsoletes distinctions like
-
- if len(x) == 1:
- shape = x[0].shape
- else:
- shape = np.broadcast(\*x).shape
-
-Instead, ``np.broadcast`` can be used in all cases.
-
-``np.trace`` now respects array subclasses
-------------------------------------------
-This behaviour mimics that of other functions such as ``np.diagonal`` and
-ensures, e.g., that for masked arrays ``np.trace(ma)`` and ``ma.trace()`` give
-the same result.
-
-``np.dot`` now raises ``TypeError`` instead of ``ValueError``
--------------------------------------------------------------
-This behaviour mimics that of other functions such as ``np.inner``. If the two
-arguments cannot be cast to a common type, it could have raised a ``TypeError``
-or ``ValueError`` depending on their order. Now, ``np.dot`` will now always
-raise a ``TypeError``.
-
-``linalg.norm`` return type changes
------------------------------------
-The ``linalg.norm`` function now does all its computations in floating point
-and returns floating results. This change fixes bugs due to integer overflow
-and the failure of abs with signed integers of minimum value, e.g., int8(-128).
-For consistency, floats are used even where an integer might work.
-
-
-Deprecations
-============
-
-Views of arrays in Fortran order
---------------------------------
-The F_CONTIGUOUS flag was used to signal that views using a dtype that
-changed the element size would change the first index. This was always
-problematical for arrays that were both F_CONTIGUOUS and C_CONTIGUOUS
-because C_CONTIGUOUS took precedence. Relaxed stride checking results in
-more such dual contiguous arrays and breaks some existing code as a result.
-Note that this also affects changing the dtype by assigning to the dtype
-attribute of an array. The aim of this deprecation is to restrict views to
-C_CONTIGUOUS arrays at some future time. A work around that is backward
-compatible is to use ``a.T.view(...).T`` instead. A parameter may also be
-added to the view method to explicitly ask for Fortran order views, but
-that will not be backward compatible.
-
-Invalid arguments for array ordering
-------------------------------------
-It is currently possible to pass in arguments for the ``order``
-parameter in methods like ``array.flatten`` or ``array.ravel``
-that were not one of the following: 'C', 'F', 'A', 'K' (note that
-all of these possible values are both unicode and case insensitive).
-Such behavior will not be allowed in future releases.
-
-Random number generator in the ``testing`` namespace
-----------------------------------------------------
-The Python standard library random number generator was previously exposed
-in the ``testing`` namespace as ``testing.rand``. Using this generator is
-not recommended and it will be removed in a future release. Use generators
-from ``numpy.random`` namespace instead.
-
-Random integer generation on a closed interval
-----------------------------------------------
-In accordance with the Python C API, which gives preference to the half-open
-interval over the closed one, ``np.random.random_integers`` is being
-deprecated in favor of calling ``np.random.randint``, which has been
-enhanced with the ``dtype`` parameter as described under "New Features".
-However, ``np.random.random_integers`` will not be removed anytime soon.
-
-
-FutureWarnings
-==============
-
-Assigning to slices/views of ``MaskedArray``
---------------------------------------------
-Currently a slice of a masked array contains a view of the original data and a
-copy-on-write view of the mask. Consequently, any changes to the slice's mask
-will result in a copy of the original mask being made and that new mask being
-changed rather than the original. For example, if we make a slice of the
-original like so, ``view = original[:]``, then modifications to the data in one
-array will affect the data of the other but, because the mask will be copied
-during assignment operations, changes to the mask will remain local. A similar
-situation occurs when explicitly constructing a masked array using
-``MaskedArray(data, mask)``, the returned array will contain a view of ``data``
-but the mask will be a copy-on-write view of ``mask``.
-
-In the future, these cases will be normalized so that the data and mask arrays
-are treated the same way and modifications to either will propagate between
-views. In 1.11, numpy will issue a ``MaskedArrayFutureWarning`` warning
-whenever user code modifies the mask of a view that in the future may cause
-values to propagate back to the original. To silence these warnings and make
-your code robust against the upcoming changes, you have two options: if you
-want to keep the current behavior, call ``masked_view.unshare_mask()`` before
-modifying the mask. If you want to get the future behavior early, use
-``masked_view._sharedmask = False``. However, note that setting the
-``_sharedmask`` attribute will break following explicit calls to
-``masked_view.unshare_mask()``.
+++ /dev/null
-==========================
-NumPy 1.11.1 Release Notes
-==========================
-
-Numpy 1.11.1 supports Python 2.6 - 2.7 and 3.2 - 3.5. It fixes bugs and
-regressions found in Numpy 1.11.0 and includes several build related
-improvements. Wheels for Linux, Windows, and OSX can be found on pypi.
-
-Fixes Merged
-============
-
-- #7506 BUG: Make sure numpy imports on python 2.6 when nose is unavailable.
-- #7530 BUG: Floating exception with invalid axis in np.lexsort.
-- #7535 BUG: Extend glibc complex trig functions blacklist to glibc < 2.18.
-- #7551 BUG: Allow graceful recovery for no compiler.
-- #7558 BUG: Constant padding expected wrong type in constant_values.
-- #7578 BUG: Fix OverflowError in Python 3.x. in swig interface.
-- #7590 BLD: Fix configparser.InterpolationSyntaxError.
-- #7597 BUG: Make np.ma.take work on scalars.
-- #7608 BUG: linalg.norm(): Don't convert object arrays to float.
-- #7638 BLD: Correct C compiler customization in system_info.py.
-- #7654 BUG: ma.median of 1d array should return a scalar.
-- #7656 BLD: Remove hardcoded Intel compiler flag -xSSE4.2.
-- #7660 BUG: Temporary fix for str(mvoid) for object field types.
-- #7665 BUG: Fix incorrect printing of 1D masked arrays.
-- #7670 BUG: Correct initial index estimate in histogram.
-- #7671 BUG: Boolean assignment no GIL release when transfer needs API.
-- #7676 BUG: Fix handling of right edge of final histogram bin.
-- #7680 BUG: Fix np.clip bug NaN handling for Visual Studio 2015.
-- #7724 BUG: Fix segfaults in np.random.shuffle.
-- #7731 MAINT: Change mkl_info.dir_env_var from MKL to MKLROOT.
-- #7737 BUG: Fix issue on OS X with Python 3.x, npymath.ini not installed.
+++ /dev/null
-==========================
-NumPy 1.11.2 Release Notes
-==========================
-
-Numpy 1.11.2 supports Python 2.6 - 2.7 and 3.2 - 3.5. It fixes bugs and
-regressions found in Numpy 1.11.1 and includes several build related
-improvements. Wheels for Linux, Windows, and OS X can be found on PyPI.
-
-Pull Requests Merged
-====================
-
-Fixes overridden by later merges and release notes updates are omitted.
-
-- #7736 BUG: Many functions silently drop 'keepdims' kwarg.
-- #7738 ENH: Add extra kwargs and update doc of many MA methods.
-- #7778 DOC: Update Numpy 1.11.1 release notes.
-- #7793 BUG: MaskedArray.count treats negative axes incorrectly.
-- #7816 BUG: Fix array too big error for wide dtypes.
-- #7821 BUG: Make sure npy_mul_with_overflow_<type> detects overflow.
-- #7824 MAINT: Allocate fewer bytes for empty arrays.
-- #7847 MAINT,DOC: Fix some imp module uses and update f2py.compile docstring.
-- #7849 MAINT: Fix remaining uses of deprecated Python imp module.
-- #7851 BLD: Fix ATLAS version detection.
-- #7896 BUG: Construct ma.array from np.array which contains padding.
-- #7904 BUG: Fix float16 type not being called due to wrong ordering.
-- #7917 BUG: Production install of numpy should not require nose.
-- #7919 BLD: Fixed MKL detection for recent versions of this library.
-- #7920 BUG: Fix for issue #7835 (ma.median of 1d).
-- #7932 BUG: Monkey-patch _msvccompile.gen_lib_option like other compilers.
-- #7939 BUG: Check for HAVE_LDOUBLE_DOUBLE_DOUBLE_LE in npy_math_complex.
-- #7953 BUG: Guard against buggy comparisons in generic quicksort.
-- #7954 BUG: Use keyword arguments to initialize Extension base class.
-- #7955 BUG: Make sure numpy globals keep identity after reload.
-- #7972 BUG: MSVCCompiler grows 'lib' & 'include' env strings exponentially.
-- #8005 BLD: Remove __NUMPY_SETUP__ from builtins at end of setup.py.
-- #8010 MAINT: Remove leftover imp module imports.
-- #8020 BUG: Fix return of np.ma.count if keepdims is True and axis is None.
-- #8024 BUG: Fix numpy.ma.median.
-- #8031 BUG: Fix np.ma.median with only one non-masked value.
-- #8044 BUG: Fix bug in NpyIter buffering with discontinuous arrays.
+++ /dev/null
-==========================
-NumPy 1.11.3 Release Notes
-==========================
-
-Numpy 1.11.3 fixes a bug that leads to file corruption when very large files
-opened in append mode are used in ``ndarray.tofile``. It supports Python
-versions 2.6 - 2.7 and 3.2 - 3.5. Wheels for Linux, Windows, and OS X can be
-found on PyPI.
-
-
-Contributors to maintenance/1.11.3
-==================================
-
-A total of 2 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-- Charles Harris
-- Pavel Potocek +
-
-Pull Requests Merged
-====================
-
-- `#8341 <https://github.com/numpy/numpy/pull/8341>`__: BUG: Fix ndarray.tofile large file corruption in append mode.
-- `#8346 <https://github.com/numpy/numpy/pull/8346>`__: TST: Fix tests in PR #8341 for NumPy 1.11.x
-
+++ /dev/null
-.. 1.12.0:
-
-==========================
-NumPy 1.12.0 Release Notes
-==========================
-
-This release supports Python 2.7 and 3.4 - 3.6.
-
-Highlights
-==========
-The NumPy 1.12.0 release contains a large number of fixes and improvements, but
-few that stand out above all others. That makes picking out the highlights
-somewhat arbitrary but the following may be of particular interest or indicate
-areas likely to have future consequences.
-
-* Order of operations in ``np.einsum`` can now be optimized for large speed improvements.
-* New ``signature`` argument to ``np.vectorize`` for vectorizing with core dimensions.
-* The ``keepdims`` argument was added to many functions.
-* New context manager for testing warnings
-* Support for BLIS in numpy.distutils
-* Much improved support for PyPy (not yet finished)
-
-Dropped Support
-===============
-
-* Support for Python 2.6, 3.2, and 3.3 has been dropped.
-
-
-Added Support
-=============
-
-* Support for PyPy 2.7 v5.6.0 has been added. While not complete (nditer
- ``updateifcopy`` is not supported yet), this is a milestone for PyPy's
- C-API compatibility layer.
-
-
-Build System Changes
-====================
-
-* Library order is preserved, instead of being reordered to match that of
- the directories.
-
-
-Deprecations
-============
-
-Assignment of ndarray object's ``data`` attribute
--------------------------------------------------
-Assigning the 'data' attribute is an inherently unsafe operation as pointed
-out in gh-7083. Such a capability will be removed in the future.
-
-Unsafe int casting of the num attribute in ``linspace``
--------------------------------------------------------
-``np.linspace`` now raises DeprecationWarning when num cannot be safely
-interpreted as an integer.
-
-Insufficient bit width parameter to ``binary_repr``
----------------------------------------------------
-If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
-represent the number in base 2 (positive) or 2's complement (negative) form,
-the function used to silently ignore the parameter and return a representation
-using the minimal number of bits needed for the form in question. Such behavior
-is now considered unsafe from a user perspective and will raise an error in the
-future.
-
-
-Future Changes
-==============
-
-* In 1.13 NAT will always compare False except for ``NAT != NAT``,
- which will be True. In short, NAT will behave like NaN
-* In 1.13 ``np.average`` will preserve subclasses, to match the behavior of most
- other numpy functions such as np.mean. In particular, this means calls which
- returned a scalar may return a 0-d subclass object instead.
-
-Multiple-field manipulation of structured arrays
-------------------------------------------------
-In 1.13 the behavior of structured arrays involving multiple fields will change
-in two ways:
-
-First, indexing a structured array with multiple fields (eg,
-``arr[['f1', 'f3']]``) will return a view into the original array in 1.13,
-instead of a copy. Note the returned view will have extra padding bytes
-corresponding to intervening fields in the original array, unlike the copy in
-1.12, which will affect code such as ``arr[['f1', 'f3']].view(newdtype)``.
-
-Second, for numpy versions 1.6 to 1.12 assignment between structured arrays
-occurs "by field name": Fields in the destination array are set to the
-identically-named field in the source array or to 0 if the source does not have
-a field::
-
- >>> a = np.array([(1,2),(3,4)], dtype=[('x', 'i4'), ('y', 'i4')])
- >>> b = np.ones(2, dtype=[('z', 'i4'), ('y', 'i4'), ('x', 'i4')])
- >>> b[:] = a
- >>> b
- array([(0, 2, 1), (0, 4, 3)],
- dtype=[('z', '<i4'), ('y', '<i4'), ('x', '<i4')])
-
-In 1.13 assignment will instead occur "by position": The Nth field of the
-destination will be set to the Nth field of the source regardless of field
-name. The old behavior can be obtained by using indexing to reorder the fields
-before
-assignment, e.g., ``b[['x', 'y']] = a[['y', 'x']]``.
-
-
-Compatibility notes
-===================
-
-DeprecationWarning to error
----------------------------
-
-* Indexing with floats raises ``IndexError``,
- e.g., a[0, 0.0].
-* Indexing with non-integer array_like raises ``IndexError``,
- e.g., ``a['1', '2']``
-* Indexing with multiple ellipsis raises ``IndexError``,
- e.g., ``a[..., ...]``.
-* Non-integers used as index values raise ``TypeError``,
- e.g., in ``reshape``, ``take``, and specifying reduce axis.
-
-FutureWarning to changed behavior
----------------------------------
-
-* ``np.full`` now returns an array of the fill-value's dtype if no dtype is
- given, instead of defaulting to float.
-* ``np.average`` will emit a warning if the argument is a subclass of ndarray,
- as the subclass will be preserved starting in 1.13. (see Future Changes)
-
-``power`` and ``**`` raise errors for integer to negative integer powers
-------------------------------------------------------------------------
-The previous behavior depended on whether numpy scalar integers or numpy
-integer arrays were involved.
-
-For arrays
-
-* Zero to negative integer powers returned least integral value.
-* Both 1, -1 to negative integer powers returned correct values.
-* The remaining integers returned zero when raised to negative integer powers.
-
-For scalars
-
-* Zero to negative integer powers returned least integral value.
-* Both 1, -1 to negative integer powers returned correct values.
-* The remaining integers sometimes returned zero, sometimes the
- correct float depending on the integer type combination.
-
-All of these cases now raise a ``ValueError`` except for those integer
-combinations whose common type is float, for instance uint64 and int8. It was
-felt that a simple rule was the best way to go rather than have special
-exceptions for the integer units. If you need negative powers, use an inexact
-type.
-
-Relaxed stride checking is the default
---------------------------------------
-This will have some impact on code that assumed that ``F_CONTIGUOUS`` and
-``C_CONTIGUOUS`` were mutually exclusive and could be set to determine the
-default order for arrays that are now both.
-
-The ``np.percentile`` 'midpoint' interpolation method fixed for exact indices
------------------------------------------------------------------------------
-The 'midpoint' interpolator now gives the same result as 'lower' and 'higher' when
-the two coincide. Previous behavior of 'lower' + 0.5 is fixed.
-
-``keepdims`` kwarg is passed through to user-class methods
-----------------------------------------------------------
-numpy functions that take a ``keepdims`` kwarg now pass the value
-through to the corresponding methods on ndarray sub-classes. Previously the
-``keepdims`` keyword would be silently dropped. These functions now have
-the following behavior:
-
-1. If user does not provide ``keepdims``, no keyword is passed to the underlying
- method.
-2. Any user-provided value of ``keepdims`` is passed through as a keyword
- argument to the method.
-
-This will raise in the case where the method does not support a
-``keepdims`` kwarg and the user explicitly passes in ``keepdims``.
-
-The following functions are changed: ``sum``, ``product``,
-``sometrue``, ``alltrue``, ``any``, ``all``, ``amax``, ``amin``,
-``prod``, ``mean``, ``std``, ``var``, ``nanmin``, ``nanmax``,
-``nansum``, ``nanprod``, ``nanmean``, ``nanmedian``, ``nanvar``,
-``nanstd``
-
-``bitwise_and`` identity changed
---------------------------------
-The previous identity was 1, it is now -1. See entry in Improvements for
-more explanation.
-
-ma.median warns and returns nan when unmasked invalid values are encountered
-----------------------------------------------------------------------------
-Similar to unmasked median the masked median `ma.median` now emits a Runtime
-warning and returns `NaN` in slices where an unmasked `NaN` is present.
-
-Greater consistency in ``assert_almost_equal``
-----------------------------------------------
-The precision check for scalars has been changed to match that for arrays. It
-is now::
-
- abs(actual - desired) < 1.5 * 10**(-decimal)
-
-Note that this is looser than previously documented, but agrees with the
-previous implementation used in ``assert_array_almost_equal``. Due to the
-change in implementation some very delicate tests may fail that did not
-fail before.
-
-``NoseTester`` behaviour of warnings during testing
----------------------------------------------------
-When ``raise_warnings="develop"`` is given, all uncaught warnings will now
-be considered a test failure. Previously only selected ones were raised.
-Warnings which are not caught or raised (mostly when in release mode)
-will be shown once during the test cycle similar to the default python
-settings.
-
-``assert_warns`` and ``deprecated`` decorator more specific
------------------------------------------------------------
-The ``assert_warns`` function and context manager are now more specific
-to the given warning category. This increased specificity leads to them
-being handled according to the outer warning settings. This means that
-no warning may be raised in cases where a wrong category warning is given
-and ignored outside the context. Alternatively the increased specificity
-may mean that warnings that were incorrectly ignored will now be shown
-or raised. See also the new ``suppress_warnings`` context manager.
-The same is true for the ``deprecated`` decorator.
-
-C API
------
-No changes.
-
-
-New Features
-============
-
-Writeable keyword argument for ``as_strided``
----------------------------------------------
-``np.lib.stride_tricks.as_strided`` now has a ``writeable``
-keyword argument. It can be set to False when no write operation
-to the returned array is expected to avoid accidental
-unpredictable writes.
-
-``axes`` keyword argument for ``rot90``
----------------------------------------
-The ``axes`` keyword argument in ``rot90`` determines the plane in which the
-array is rotated. It defaults to ``axes=(0,1)`` as in the original function.
-
-Generalized ``flip``
---------------------
-``flipud`` and ``fliplr`` reverse the elements of an array along axis=0 and
-axis=1 respectively. The newly added ``flip`` function reverses the elements of
-an array along any given axis.
-
-* ``np.count_nonzero`` now has an ``axis`` parameter, allowing
- non-zero counts to be generated on more than just a flattened
- array object.
-
-BLIS support in ``numpy.distutils``
------------------------------------
-Building against the BLAS implementation provided by the BLIS library is now
-supported. See the ``[blis]`` section in ``site.cfg.example`` (in the root of
-the numpy repo or source distribution).
-
-Hook in ``numpy/__init__.py`` to run distribution-specific checks
------------------------------------------------------------------
-Binary distributions of numpy may need to run specific hardware checks or load
-specific libraries during numpy initialization. For example, if we are
-distributing numpy with a BLAS library that requires SSE2 instructions, we
-would like to check the machine on which numpy is running does have SSE2 in
-order to give an informative error.
-
-Add a hook in ``numpy/__init__.py`` to import a ``numpy/_distributor_init.py``
-file that will remain empty (bar a docstring) in the standard numpy source,
-but that can be overwritten by people making binary distributions of numpy.
-
-New nanfunctions ``nancumsum`` and ``nancumprod`` added
--------------------------------------------------------
-Nan-functions ``nancumsum`` and ``nancumprod`` have been added to
-compute ``cumsum`` and ``cumprod`` by ignoring nans.
-
-``np.interp`` can now interpolate complex values
-------------------------------------------------
-``np.lib.interp(x, xp, fp)`` now allows the interpolated array ``fp``
-to be complex and will interpolate at ``complex128`` precision.
-
-New polynomial evaluation function ``polyvalfromroots`` added
--------------------------------------------------------------
-The new function ``polyvalfromroots`` evaluates a polynomial at given points
-from the roots of the polynomial. This is useful for higher order polynomials,
-where expansion into polynomial coefficients is inaccurate at machine
-precision.
-
-New array creation function ``geomspace`` added
------------------------------------------------
-The new function ``geomspace`` generates a geometric sequence. It is similar
-to ``logspace``, but with start and stop specified directly:
-``geomspace(start, stop)`` behaves the same as
-``logspace(log10(start), log10(stop))``.
-
-New context manager for testing warnings
-----------------------------------------
-A new context manager ``suppress_warnings`` has been added to the testing
-utils. This context manager is designed to help reliably test warnings.
-Specifically to reliably filter/ignore warnings. Ignoring warnings
-by using an "ignore" filter in Python versions before 3.4.x can quickly
-result in these (or similar) warnings not being tested reliably.
-
-The context manager allows to filter (as well as record) warnings similar
-to the ``catch_warnings`` context, but allows for easier specificity.
-Also printing warnings that have not been filtered or nesting the
-context manager will work as expected. Additionally, it is possible
-to use the context manager as a decorator which can be useful when
-multiple tests give need to hide the same warning.
-
-New masked array functions ``ma.convolve`` and ``ma.correlate`` added
----------------------------------------------------------------------
-These functions wrapped the non-masked versions, but propagate through masked
-values. There are two different propagation modes. The default causes masked
-values to contaminate the result with masks, but the other mode only outputs
-masks if there is no alternative.
-
-New ``float_power`` ufunc
--------------------------
-The new ``float_power`` ufunc is like the ``power`` function except all
-computation is done in a minimum precision of float64. There was a long
-discussion on the numpy mailing list of how to treat integers to negative
-integer powers and a popular proposal was that the ``__pow__`` operator should
-always return results of at least float64 precision. The ``float_power``
-function implements that option. Note that it does not support object arrays.
-
-``np.loadtxt`` now supports a single integer as ``usecol`` argument
--------------------------------------------------------------------
-Instead of using ``usecol=(n,)`` to read the nth column of a file
-it is now allowed to use ``usecol=n``. Also the error message is
-more user friendly when a non-integer is passed as a column index.
-
-Improved automated bin estimators for ``histogram``
----------------------------------------------------
-Added 'doane' and 'sqrt' estimators to ``histogram`` via the ``bins``
-argument. Added support for range-restricted histograms with automated
-bin estimation.
-
-``np.roll`` can now roll multiple axes at the same time
--------------------------------------------------------
-The ``shift`` and ``axis`` arguments to ``roll`` are now broadcast against each
-other, and each specified axis is shifted accordingly.
-
-The ``__complex__`` method has been implemented for the ndarrays
-----------------------------------------------------------------
-Calling ``complex()`` on a size 1 array will now cast to a python
-complex.
-
-``pathlib.Path`` objects now supported
---------------------------------------
-The standard ``np.load``, ``np.save``, ``np.loadtxt``, ``np.savez``, and similar
-functions can now take ``pathlib.Path`` objects as an argument instead of a
-filename or open file object.
-
-New ``bits`` attribute for ``np.finfo``
----------------------------------------
-This makes ``np.finfo`` consistent with ``np.iinfo`` which already has that
-attribute.
-
-New ``signature`` argument to ``np.vectorize``
-----------------------------------------------
-This argument allows for vectorizing user defined functions with core
-dimensions, in the style of NumPy's
-:ref:`generalized universal functions<c-api.generalized-ufuncs>`. This allows
-for vectorizing a much broader class of functions. For example, an arbitrary
-distance metric that combines two vectors to produce a scalar could be
-vectorized with ``signature='(n),(n)->()'``. See ``np.vectorize`` for full
-details.
-
-Emit py3kwarnings for division of integer arrays
-------------------------------------------------
-To help people migrate their code bases from Python 2 to Python 3, the
-python interpreter has a handy option -3, which issues warnings at runtime.
-One of its warnings is for integer division::
-
- $ python -3 -c "2/3"
-
- -c:1: DeprecationWarning: classic int division
-
-In Python 3, the new integer division semantics also apply to numpy arrays.
-With this version, numpy will emit a similar warning::
-
- $ python -3 -c "import numpy as np; np.array(2)/np.array(3)"
-
- -c:1: DeprecationWarning: numpy: classic int division
-
-numpy.sctypes now includes bytes on Python3 too
------------------------------------------------
-Previously, it included str (bytes) and unicode on Python2, but only str
-(unicode) on Python3.
-
-
-Improvements
-============
-
-``bitwise_and`` identity changed
---------------------------------
-The previous identity was 1 with the result that all bits except the LSB were
-masked out when the reduce method was used. The new identity is -1, which
-should work properly on twos complement machines as all bits will be set to
-one.
-
-Generalized Ufuncs will now unlock the GIL
-------------------------------------------
-Generalized Ufuncs, including most of the linalg module, will now unlock
-the Python global interpreter lock.
-
-Caches in `np.fft` are now bounded in total size and item count
----------------------------------------------------------------
-The caches in `np.fft` that speed up successive FFTs of the same length can no
-longer grow without bounds. They have been replaced with LRU (least recently
-used) caches that automatically evict no longer needed items if either the
-memory size or item count limit has been reached.
-
-Improved handling of zero-width string/unicode dtypes
------------------------------------------------------
-Fixed several interfaces that explicitly disallowed arrays with zero-width
-string dtypes (i.e. ``dtype('S0')`` or ``dtype('U0')``, and fixed several
-bugs where such dtypes were not handled properly. In particular, changed
-``ndarray.__new__`` to not implicitly convert ``dtype('S0')`` to
-``dtype('S1')`` (and likewise for unicode) when creating new arrays.
-
-Integer ufuncs vectorized with AVX2
------------------------------------
-If the cpu supports it at runtime the basic integer ufuncs now use AVX2
-instructions. This feature is currently only available when compiled with GCC.
-
-Order of operations optimization in ``np.einsum``
---------------------------------------------------
-``np.einsum`` now supports the ``optimize`` argument which will optimize the
-order of contraction. For example, ``np.einsum`` would complete the chain dot
-example ``np.einsum(‘ij,jk,kl->il’, a, b, c)`` in a single pass which would
-scale like ``N^4``; however, when ``optimize=True`` ``np.einsum`` will create
-an intermediate array to reduce this scaling to ``N^3`` or effectively
-``np.dot(a, b).dot(c)``. Usage of intermediate tensors to reduce scaling has
-been applied to the general einsum summation notation. See ``np.einsum_path``
-for more details.
-
-quicksort has been changed to an introsort
-------------------------------------------
-The quicksort kind of ``np.sort`` and ``np.argsort`` is now an introsort which
-is regular quicksort but changing to a heapsort when not enough progress is
-made. This retains the good quicksort performance while changing the worst case
-runtime from ``O(N^2)`` to ``O(N*log(N))``.
-
-``ediff1d`` improved performance and subclass handling
-------------------------------------------------------
-The ediff1d function uses an array instead on a flat iterator for the
-subtraction. When to_begin or to_end is not None, the subtraction is performed
-in place to eliminate a copy operation. A side effect is that certain
-subclasses are handled better, namely astropy.Quantity, since the complete
-array is created, wrapped, and then begin and end values are set, instead of
-using concatenate.
-
-Improved precision of ``ndarray.mean`` for float16 arrays
----------------------------------------------------------
-The computation of the mean of float16 arrays is now carried out in float32 for
-improved precision. This should be useful in packages such as Theano
-where the precision of float16 is adequate and its smaller footprint is
-desirable.
-
-
-Changes
-=======
-
-All array-like methods are now called with keyword arguments in fromnumeric.py
-------------------------------------------------------------------------------
-Internally, many array-like methods in fromnumeric.py were being called with
-positional arguments instead of keyword arguments as their external signatures
-were doing. This caused a complication in the downstream 'pandas' library
-that encountered an issue with 'numpy' compatibility. Now, all array-like
-methods in this module are called with keyword arguments instead.
-
-Operations on np.memmap objects return numpy arrays in most cases
------------------------------------------------------------------
-Previously operations on a memmap object would misleadingly return a memmap
-instance even if the result was actually not memmapped. For example,
-``arr + 1`` or ``arr + arr`` would return memmap instances, although no memory
-from the output array is memmapped. Version 1.12 returns ordinary numpy arrays
-from these operations.
-
-Also, reduction of a memmap (e.g. ``.sum(axis=None``) now returns a numpy
-scalar instead of a 0d memmap.
-
-stacklevel of warnings increased
---------------------------------
-The stacklevel for python based warnings was increased so that most warnings
-will report the offending line of the user code instead of the line the
-warning itself is given. Passing of stacklevel is now tested to ensure that
-new warnings will receive the ``stacklevel`` argument.
-
-This causes warnings with the "default" or "module" filter to be shown once
-for every offending user code line or user module instead of only once. On
-python versions before 3.4, this can cause warnings to appear that were falsely
-ignored before, which may be surprising especially in test suits.
+++ /dev/null
-==========================
-NumPy 1.12.1 Release Notes
-==========================
-
-NumPy 1.12.1 supports Python 2.7 and 3.4 - 3.6 and fixes bugs and regressions
-found in NumPy 1.12.0. In particular, the regression in f2py constant parsing
-is fixed. Wheels for Linux, Windows, and OSX can be found on pypi,
-
-Bugs Fixed
-==========
-
-* BUG: Fix wrong future nat warning and equiv type logic error...
-* BUG: Fix wrong masked median for some special cases
-* DOC: Place np.average in inline code
-* TST: Work around isfinite inconsistency on i386
-* BUG: Guard against replacing constants without '_' spec in f2py.
-* BUG: Fix mean for float 16 non-array inputs for 1.12
-* BUG: Fix calling python api with error set and minor leaks for...
-* BUG: Make iscomplexobj compatible with custom dtypes again
-* BUG: Fix undefined behaviour induced by bad __array_wrap__
-* BUG: Fix MaskedArray.__setitem__
-* BUG: PPC64el machines are POWER for Fortran in f2py
-* BUG: Look up methods on MaskedArray in `_frommethod`
-* BUG: Remove extra digit in binary_repr at limit
-* BUG: Fix deepcopy regression for empty arrays.
-* BUG: Fix ma.median for empty ndarrays
+++ /dev/null
-==========================
-NumPy 1.13.0 Release Notes
-==========================
-
-This release supports Python 2.7 and 3.4 - 3.6.
-
-
-Highlights
-==========
-
- * Operations like ``a + b + c`` will reuse temporaries on some platforms,
- resulting in less memory use and faster execution.
- * Inplace operations check if inputs overlap outputs and create temporaries
- to avoid problems.
- * New ``__array_ufunc__`` attribute provides improved ability for classes to
- override default ufunc behavior.
- * New ``np.block`` function for creating blocked arrays.
-
-
-New functions
-=============
-
-* New ``np.positive`` ufunc.
-* New ``np.divmod`` ufunc provides more efficient divmod.
-* New ``np.isnat`` ufunc tests for NaT special values.
-* New ``np.heaviside`` ufunc computes the Heaviside function.
-* New ``np.isin`` function, improves on ``in1d``.
-* New ``np.block`` function for creating blocked arrays.
-* New ``PyArray_MapIterArrayCopyIfOverlap`` added to NumPy C-API.
-
-See below for details.
-
-
-Deprecations
-============
-
-* Calling ``np.fix``, ``np.isposinf``, and ``np.isneginf`` with ``f(x, y=out)``
- is deprecated - the argument should be passed as ``f(x, out=out)``, which
- matches other ufunc-like interfaces.
-* Use of the C-API ``NPY_CHAR`` type number deprecated since version 1.7 will
- now raise deprecation warnings at runtime. Extensions built with older f2py
- versions need to be recompiled to remove the warning.
-* ``np.ma.argsort``, ``np.ma.minimum.reduce``, and ``np.ma.maximum.reduce``
- should be called with an explicit `axis` argument when applied to arrays with
- more than 2 dimensions, as the default value of this argument (``None``) is
- inconsistent with the rest of numpy (``-1``, ``0``, and ``0``, respectively).
-* ``np.ma.MaskedArray.mini`` is deprecated, as it almost duplicates the
- functionality of ``np.MaskedArray.min``. Exactly equivalent behaviour
- can be obtained with ``np.ma.minimum.reduce``.
-* The single-argument form of ``np.ma.minimum`` and ``np.ma.maximum`` is
- deprecated. ``np.maximum``. ``np.ma.minimum(x)`` should now be spelt
- ``np.ma.minimum.reduce(x)``, which is consistent with how this would be done
- with ``np.minimum``.
-* Calling ``ndarray.conjugate`` on non-numeric dtypes is deprecated (it
- should match the behavior of ``np.conjugate``, which throws an error).
-* Calling ``expand_dims`` when the ``axis`` keyword does not satisfy
- ``-a.ndim - 1 <= axis <= a.ndim``, where ``a`` is the array being reshaped,
- is deprecated.
-
-
-Future Changes
-==============
-
-* Assignment between structured arrays with different field names will change
- in NumPy 1.14. Previously, fields in the dst would be set to the value of the
- identically-named field in the src. In numpy 1.14 fields will instead be
- assigned 'by position': The n-th field of the dst will be set to the n-th
- field of the src array. Note that the ``FutureWarning`` raised in NumPy 1.12
- incorrectly reported this change as scheduled for NumPy 1.13 rather than
- NumPy 1.14.
-
-
-Build System Changes
-====================
-
-* ``numpy.distutils`` now automatically determines C-file dependencies with
- GCC compatible compilers.
-
-
-Compatibility notes
-===================
-
-Error type changes
-------------------
-
-* ``numpy.hstack()`` now throws ``ValueError`` instead of ``IndexError`` when
- input is empty.
-* Functions taking an axis argument, when that argument is out of range, now
- throw ``np.AxisError`` instead of a mixture of ``IndexError`` and
- ``ValueError``. For backwards compatibility, ``AxisError`` subclasses both of
- these.
-
-Tuple object dtypes
--------------------
-
-Support has been removed for certain obscure dtypes that were unintentionally
-allowed, of the form ``(old_dtype, new_dtype)``, where either of the dtypes
-is or contains the ``object`` dtype. As an exception, dtypes of the form
-``(object, [('name', object)])`` are still supported due to evidence of
-existing use.
-
-DeprecationWarning to error
----------------------------
-See Changes section for more detail.
-
-* ``partition``, TypeError when non-integer partition index is used.
-* ``NpyIter_AdvancedNew``, ValueError when ``oa_ndim == 0`` and ``op_axes`` is NULL
-* ``negative(bool_)``, TypeError when negative applied to booleans.
-* ``subtract(bool_, bool_)``, TypeError when subtracting boolean from boolean.
-* ``np.equal, np.not_equal``, object identity doesn't override failed comparison.
-* ``np.equal, np.not_equal``, object identity doesn't override non-boolean comparison.
-* Deprecated boolean indexing behavior dropped. See Changes below for details.
-* Deprecated ``np.alterdot()`` and ``np.restoredot()`` removed.
-
-FutureWarning to changed behavior
----------------------------------
-See Changes section for more detail.
-
-* ``numpy.average`` preserves subclasses
-* ``array == None`` and ``array != None`` do element-wise comparison.
-* ``np.equal, np.not_equal``, object identity doesn't override comparison result.
-
-dtypes are now always true
---------------------------
-
-Previously ``bool(dtype)`` would fall back to the default python
-implementation, which checked if ``len(dtype) > 0``. Since ``dtype`` objects
-implement ``__len__`` as the number of record fields, ``bool`` of scalar dtypes
-would evaluate to ``False``, which was unintuitive. Now ``bool(dtype) == True``
-for all dtypes.
-
-``__getslice__`` and ``__setslice__`` are no longer needed in ``ndarray`` subclasses
-------------------------------------------------------------------------------------
-When subclassing np.ndarray in Python 2.7, it is no longer _necessary_ to
-implement ``__*slice__`` on the derived class, as ``__*item__`` will intercept
-these calls correctly.
-
-Any code that did implement these will work exactly as before. Code that
-invokes``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will
-now issue a DeprecationWarning - ``.__getitem__(slice(start, end))`` should be
-used instead.
-
-Indexing MaskedArrays/Constants with ``...`` (ellipsis) now returns MaskedArray
--------------------------------------------------------------------------------
-This behavior mirrors that of np.ndarray, and accounts for nested arrays in
-MaskedArrays of object dtype, and ellipsis combined with other forms of
-indexing.
-
-C API changes
-=============
-
-GUfuncs on empty arrays and NpyIter axis removal
-------------------------------------------------
-It is now allowed to remove a zero-sized axis from NpyIter. Which may mean
-that code removing axes from NpyIter has to add an additional check when
-accessing the removed dimensions later on.
-
-The largest followup change is that gufuncs are now allowed to have zero-sized
-inner dimensions. This means that a gufunc now has to anticipate an empty inner
-dimension, while this was never possible and an error raised instead.
-
-For most gufuncs no change should be necessary. However, it is now possible
-for gufuncs with a signature such as ``(..., N, M) -> (..., M)`` to return
-a valid result if ``N=0`` without further wrapping code.
-
-``PyArray_MapIterArrayCopyIfOverlap`` added to NumPy C-API
-----------------------------------------------------------
-Similar to ``PyArray_MapIterArray`` but with an additional ``copy_if_overlap``
-argument. If ``copy_if_overlap != 0``, checks if input has memory overlap with
-any of the other arrays and make copies as appropriate to avoid problems if the
-input is modified during the iteration. See the documentation for more complete
-documentation.
-
-
-New Features
-============
-
-``__array_ufunc__`` added
--------------------------
-This is the renamed and redesigned ``__numpy_ufunc__``. Any class, ndarray
-subclass or not, can define this method or set it to ``None`` in order to
-override the behavior of NumPy's ufuncs. This works quite similarly to Python's
-``__mul__`` and other binary operation routines. See the documentation for a
-more detailed description of the implementation and behavior of this new
-option. The API is provisional, we do not yet guarantee backward compatibility
-as modifications may be made pending feedback. See `NEP 13`_ and
-documentation_ for more details.
-
-.. _`NEP 13`: http://www.numpy.org/neps/nep-0013-ufunc-overrides.html
-.. _documentation: https://github.com/numpy/numpy/blob/master/doc/source/reference/arrays.classes.rst
-
-New ``positive`` ufunc
-----------------------
-This ufunc corresponds to unary `+`, but unlike `+` on an ndarray it will raise
-an error if array values do not support numeric operations.
-
-New ``divmod`` ufunc
---------------------
-This ufunc corresponds to the Python builtin `divmod`, and is used to implement
-`divmod` when called on numpy arrays. ``np.divmod(x, y)`` calculates a result
-equivalent to ``(np.floor_divide(x, y), np.remainder(x, y))`` but is
-approximately twice as fast as calling the functions separately.
-
-``np.isnat`` ufunc tests for NaT special datetime and timedelta values
-----------------------------------------------------------------------
-The new ufunc ``np.isnat`` finds the positions of special NaT values
-within datetime and timedelta arrays. This is analogous to ``np.isnan``.
-
-``np.heaviside`` ufunc computes the Heaviside function
-------------------------------------------------------
-The new function ``np.heaviside(x, h0)`` (a ufunc) computes the Heaviside
-function:
-
-.. code::
-
- { 0 if x < 0,
- heaviside(x, h0) = { h0 if x == 0,
- { 1 if x > 0.
-
-``np.block`` function for creating blocked arrays
--------------------------------------------------
-Add a new ``block`` function to the current stacking functions ``vstack``,
-``hstack``, and ``stack``. This allows concatenation across multiple axes
-simultaneously, with a similar syntax to array creation, but where elements
-can themselves be arrays. For instance::
-
- >>> A = np.eye(2) * 2
- >>> B = np.eye(3) * 3
- >>> np.block([
- ... [A, np.zeros((2, 3))],
- ... [np.ones((3, 2)), B ]
- ... ])
- array([[ 2., 0., 0., 0., 0.],
- [ 0., 2., 0., 0., 0.],
- [ 1., 1., 3., 0., 0.],
- [ 1., 1., 0., 3., 0.],
- [ 1., 1., 0., 0., 3.]])
-
-While primarily useful for block matrices, this works for arbitrary dimensions
-of arrays.
-
-It is similar to Matlab's square bracket notation for creating block matrices.
-
-``isin`` function, improving on ``in1d``
-----------------------------------------
-The new function ``isin`` tests whether each element of an N-dimensonal
-array is present anywhere within a second array. It is an enhancement
-of ``in1d`` that preserves the shape of the first array.
-
-Temporary elision
------------------
-On platforms providing the ``backtrace`` function NumPy will try to avoid
-creating temporaries in expression involving basic numeric types.
-For example ``d = a + b + c`` is transformed to ``d = a + b; d += c`` which can
-improve performance for large arrays as less memory bandwidth is required to
-perform the operation.
-
-``axes`` argument for ``unique``
---------------------------------
-In an N-dimensional array, the user can now choose the axis along which to look
-for duplicate N-1-dimensional elements using ``numpy.unique``. The original
-behaviour is recovered if ``axis=None`` (default).
-
-``np.gradient`` now supports unevenly spaced data
--------------------------------------------------
-Users can now specify a not-constant spacing for data.
-In particular ``np.gradient`` can now take:
-
-1. A single scalar to specify a sample distance for all dimensions.
-2. N scalars to specify a constant sample distance for each dimension.
- i.e. ``dx``, ``dy``, ``dz``, ...
-3. N arrays to specify the coordinates of the values along each dimension of F.
- The length of the array must match the size of the corresponding dimension
-4. Any combination of N scalars/arrays with the meaning of 2. and 3.
-
-This means that, e.g., it is now possible to do the following::
-
- >>> f = np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float)
- >>> dx = 2.
- >>> y = [1., 1.5, 3.5]
- >>> np.gradient(f, dx, y)
- [array([[ 1. , 1. , -0.5], [ 1. , 1. , -0.5]]),
- array([[ 2. , 2. , 2. ], [ 2. , 1.7, 0.5]])]
-
-Support for returning arrays of arbitrary dimensions in ``apply_along_axis``
-----------------------------------------------------------------------------
-Previously, only scalars or 1D arrays could be returned by the function passed
-to ``apply_along_axis``. Now, it can return an array of any dimensionality
-(including 0D), and the shape of this array replaces the axis of the array
-being iterated over.
-
-``.ndim`` property added to ``dtype`` to complement ``.shape``
---------------------------------------------------------------
-For consistency with ``ndarray`` and ``broadcast``, ``d.ndim`` is a shorthand
-for ``len(d.shape)``.
-
-Support for tracemalloc in Python 3.6
--------------------------------------
-NumPy now supports memory tracing with tracemalloc_ module of Python 3.6 or
-newer. Memory allocations from NumPy are placed into the domain defined by
-``numpy.lib.tracemalloc_domain``.
-Note that NumPy allocation will not show up in tracemalloc_ of earlier Python
-versions.
-
-.. _tracemalloc: https://docs.python.org/3/library/tracemalloc.html
-
-NumPy may be built with relaxed stride checking debugging
----------------------------------------------------------
-Setting NPY_RELAXED_STRIDES_DEBUG=1 in the environment when relaxed stride
-checking is enabled will cause NumPy to be compiled with the affected strides
-set to the maximum value of npy_intp in order to help detect invalid usage of
-the strides in downstream projects. When enabled, invalid usage often results
-in an error being raised, but the exact type of error depends on the details of
-the code. TypeError and OverflowError have been observed in the wild.
-
-It was previously the case that this option was disabled for releases and
-enabled in master and changing between the two required editing the code. It is
-now disabled by default but can be enabled for test builds.
-
-
-Improvements
-============
-
-Ufunc behavior for overlapping inputs
--------------------------------------
-
-Operations where ufunc input and output operands have memory overlap
-produced undefined results in previous NumPy versions, due to data
-dependency issues. In NumPy 1.13.0, results from such operations are
-now defined to be the same as for equivalent operations where there is
-no memory overlap.
-
-Operations affected now make temporary copies, as needed to eliminate
-data dependency. As detecting these cases is computationally
-expensive, a heuristic is used, which may in rare cases result to
-needless temporary copies. For operations where the data dependency
-is simple enough for the heuristic to analyze, temporary copies will
-not be made even if the arrays overlap, if it can be deduced copies
-are not necessary. As an example,``np.add(a, b, out=a)`` will not
-involve copies.
-
-To illustrate a previously undefined operation::
-
- >>> x = np.arange(16).astype(float)
- >>> np.add(x[1:], x[:-1], out=x[1:])
-
-In NumPy 1.13.0 the last line is guaranteed to be equivalent to::
-
- >>> np.add(x[1:].copy(), x[:-1].copy(), out=x[1:])
-
-A similar operation with simple non-problematic data dependence is::
-
- >>> x = np.arange(16).astype(float)
- >>> np.add(x[1:], x[:-1], out=x[:-1])
-
-It will continue to produce the same results as in previous NumPy
-versions, and will not involve unnecessary temporary copies.
-
-The change applies also to in-place binary operations, for example::
-
- >>> x = np.random.rand(500, 500)
- >>> x += x.T
-
-This statement is now guaranteed to be equivalent to ``x[...] = x + x.T``,
-whereas in previous NumPy versions the results were undefined.
-
-Partial support for 64-bit f2py extensions with MinGW
------------------------------------------------------
-Extensions that incorporate Fortran libraries can now be built using the free
-MinGW_ toolset, also under Python 3.5. This works best for extensions that only
-do calculations and uses the runtime modestly (reading and writing from files,
-for instance). Note that this does not remove the need for Mingwpy; if you make
-extensive use of the runtime, you will most likely run into issues_. Instead,
-it should be regarded as a band-aid until Mingwpy is fully functional.
-
-Extensions can also be compiled using the MinGW toolset using the runtime
-library from the (moveable) WinPython 3.4 distribution, which can be useful for
-programs with a PySide1/Qt4 front-end.
-
-.. _MinGW: https://sf.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Personal%20Builds/mingw-builds/6.2.0/threads-win32/seh/
-
-.. _issues: https://mingwpy.github.io/issues.html
-
-Performance improvements for ``packbits`` and ``unpackbits``
-------------------------------------------------------------
-The functions ``numpy.packbits`` with boolean input and ``numpy.unpackbits`` have
-been optimized to be a significantly faster for contiguous data.
-
-Fix for PPC long double floating point information
---------------------------------------------------
-In previous versions of NumPy, the ``finfo`` function returned invalid
-information about the `double double`_ format of the ``longdouble`` float type
-on Power PC (PPC). The invalid values resulted from the failure of the NumPy
-algorithm to deal with the variable number of digits in the significand
-that are a feature of `PPC long doubles`. This release by-passes the failing
-algorithm by using heuristics to detect the presence of the PPC double double
-format. A side-effect of using these heuristics is that the ``finfo``
-function is faster than previous releases.
-
-.. _PPC long doubles: https://www.ibm.com/support/knowledgecenter/en/ssw_aix_71/com.ibm.aix.genprogc/128bit_long_double_floating-point_datatype.htm
-
-.. _double double: https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
-
-Better default repr for ``ndarray`` subclasses
-----------------------------------------------
-Subclasses of ndarray with no ``repr`` specialization now correctly indent
-their data and type lines.
-
-More reliable comparisons of masked arrays
-------------------------------------------
-Comparisons of masked arrays were buggy for masked scalars and failed for
-structured arrays with dimension higher than one. Both problems are now
-solved. In the process, it was ensured that in getting the result for a
-structured array, masked fields are properly ignored, i.e., the result is equal
-if all fields that are non-masked in both are equal, thus making the behaviour
-identical to what one gets by comparing an unstructured masked array and then
-doing ``.all()`` over some axis.
-
-np.matrix with booleans elements can now be created using the string syntax
----------------------------------------------------------------------------
-``np.matrix`` failed whenever one attempts to use it with booleans, e.g.,
-``np.matrix('True')``. Now, this works as expected.
-
-More ``linalg`` operations now accept empty vectors and matrices
-----------------------------------------------------------------
-All of the following functions in ``np.linalg`` now work when given input
-arrays with a 0 in the last two dimensions: ``det``, ``slogdet``, ``pinv``,
-``eigvals``, ``eigvalsh``, ``eig``, ``eigh``.
-
-Bundled version of LAPACK is now 3.2.2
---------------------------------------
-NumPy comes bundled with a minimal implementation of lapack for systems without
-a lapack library installed, under the name of ``lapack_lite``. This has been
-upgraded from LAPACK 3.0.0 (June 30, 1999) to LAPACK 3.2.2 (June 30, 2010). See
-the `LAPACK changelogs`_ for details on the all the changes this entails.
-
-While no new features are exposed through ``numpy``, this fixes some bugs
-regarding "workspace" sizes, and in some places may use faster algorithms.
-
-.. _`LAPACK changelogs`: http://www.netlib.org/lapack/release_notes.html#_4_history_of_lapack_releases
-
-``reduce`` of ``np.hypot.reduce`` and ``np.logical_xor`` allowed in more cases
-------------------------------------------------------------------------------
-This now works on empty arrays, returning 0, and can reduce over multiple axes.
-Previously, a ``ValueError`` was thrown in these cases.
-
-Better ``repr`` of object arrays
---------------------------------
-Object arrays that contain themselves no longer cause a recursion error.
-
-Object arrays that contain ``list`` objects are now printed in a way that makes
-clear the difference between a 2d object array, and a 1d object array of lists.
-
-Changes
-=======
-
-``argsort`` on masked arrays takes the same default arguments as ``sort``
--------------------------------------------------------------------------
-By default, ``argsort`` now places the masked values at the end of the sorted
-array, in the same way that ``sort`` already did. Additionally, the
-``end_with`` argument is added to ``argsort``, for consistency with ``sort``.
-Note that this argument is not added at the end, so breaks any code that
-passed ``fill_value`` as a positional argument.
-
-``average`` now preserves subclasses
-------------------------------------
-For ndarray subclasses, ``numpy.average`` will now return an instance of the
-subclass, matching the behavior of most other NumPy functions such as ``mean``.
-As a consequence, also calls that returned a scalar may now return a subclass
-array scalar.
-
-``array == None`` and ``array != None`` do element-wise comparison
-------------------------------------------------------------------
-Previously these operations returned scalars ``False`` and ``True`` respectively.
-
-``np.equal, np.not_equal`` for object arrays ignores object identity
---------------------------------------------------------------------
-Previously, these functions always treated identical objects as equal. This had
-the effect of overriding comparison failures, comparison of objects that did
-not return booleans, such as np.arrays, and comparison of objects where the
-results differed from object identity, such as NaNs.
-
-Boolean indexing changes
-------------------------
-* Boolean array-likes (such as lists of python bools) are always treated as
- boolean indexes.
-
-* Boolean scalars (including python ``True``) are legal boolean indexes and
- never treated as integers.
-
-* Boolean indexes must match the dimension of the axis that they index.
-
-* Boolean indexes used on the lhs of an assignment must match the dimensions of
- the rhs.
-
-* Boolean indexing into scalar arrays return a new 1-d array. This means that
- ``array(1)[array(True)]`` gives ``array([1])`` and not the original array.
-
-``np.random.multivariate_normal`` behavior with bad covariance matrix
----------------------------------------------------------------------
-
-It is now possible to adjust the behavior the function will have when dealing
-with the covariance matrix by using two new keyword arguments:
-
-* ``tol`` can be used to specify a tolerance to use when checking that
- the covariance matrix is positive semidefinite.
-
-* ``check_valid`` can be used to configure what the function will do in the
- presence of a matrix that is not positive semidefinite. Valid options are
- ``ignore``, ``warn`` and ``raise``. The default value, ``warn`` keeps the
- the behavior used on previous releases.
-
-``assert_array_less`` compares ``np.inf`` and ``-np.inf`` now
--------------------------------------------------------------
-Previously, ``np.testing.assert_array_less`` ignored all infinite values. This
-is not the expected behavior both according to documentation and intuitively.
-Now, -inf < x < inf is considered ``True`` for any real number x and all
-other cases fail.
-
-``assert_array_`` and masked arrays ``assert_equal`` hide less warnings
------------------------------------------------------------------------
-Some warnings that were previously hidden by the ``assert_array_``
-functions are not hidden anymore. In most cases the warnings should be
-correct and, should they occur, will require changes to the tests using
-these functions.
-For the masked array ``assert_equal`` version, warnings may occur when
-comparing NaT. The function presently does not handle NaT or NaN
-specifically and it may be best to avoid it at this time should a warning
-show up due to this change.
-
-``offset`` attribute value in ``memmap`` objects
-------------------------------------------------
-The ``offset`` attribute in a ``memmap`` object is now set to the
-offset into the file. This is a behaviour change only for offsets
-greater than ``mmap.ALLOCATIONGRANULARITY``.
-
-``np.real`` and ``np.imag`` return scalars for scalar inputs
-------------------------------------------------------------
-Previously, ``np.real`` and ``np.imag`` used to return array objects when
-provided a scalar input, which was inconsistent with other functions like
-``np.angle`` and ``np.conj``.
-
-The polynomial convenience classes cannot be passed to ufuncs
--------------------------------------------------------------
-The ABCPolyBase class, from which the convenience classes are derived, sets
-``__array_ufun__ = None`` in order of opt out of ufuncs. If a polynomial
-convenience class instance is passed as an argument to a ufunc, a ``TypeError``
-will now be raised.
-
-Output arguments to ufuncs can be tuples also for ufunc methods
----------------------------------------------------------------
-For calls to ufuncs, it was already possible, and recommended, to use an
-``out`` argument with a tuple for ufuncs with multiple outputs. This has now
-been extended to output arguments in the ``reduce``, ``accumulate``, and
-``reduceat`` methods. This is mostly for compatibility with ``__array_ufunc``;
-there are no ufuncs yet that have more than one output.
+++ /dev/null
-==========================
-NumPy 1.13.1 Release Notes
-==========================
-
-This is a bugfix release for problems found in 1.13.0. The major changes are
-fixes for the new memory overlap detection and temporary elision as well as
-reversion of the removal of the boolean binary ``-`` operator. Users of 1.13.0
-should upgrade.
-
-Thr Python versions supported are 2.7 and 3.4 - 3.6. Note that the Python 3.6
-wheels available from PIP are built against 3.6.1, hence will not work when
-used with 3.6.0 due to Python bug 29943_. NumPy 1.13.2 will be released shortly
-after Python 3.6.2 is out to fix that problem. If you are using 3.6.0 the
-workaround is to upgrade to 3.6.1 or use an earlier Python version.
-
-.. _29943: https://bugs.python.org/issue29943
-
-
-Pull requests merged
-====================
-A total of 19 pull requests were merged for this release.
-
-* #9240 DOC: BLD: fix lots of Sphinx warnings/errors.
-* #9255 Revert "DEP: Raise TypeError for subtract(bool, bool)."
-* #9261 BUG: don't elide into readonly and updateifcopy temporaries for...
-* #9262 BUG: fix missing keyword rename for common block in numpy.f2py
-* #9263 BUG: handle resize of 0d array
-* #9267 DOC: update f2py front page and some doc build metadata.
-* #9299 BUG: Fix Intel compilation on Unix.
-* #9317 BUG: fix wrong ndim used in empty where check
-* #9319 BUG: Make extensions compilable with MinGW on Py2.7
-* #9339 BUG: Prevent crash if ufunc doc string is null
-* #9340 BUG: umath: un-break ufunc where= when no out= is given
-* #9371 DOC: Add isnat/positive ufunc to documentation
-* #9372 BUG: Fix error in fromstring function from numpy.core.records...
-* #9373 BUG: ')' is printed at the end pointer of the buffer in numpy.f2py.
-* #9374 DOC: Create NumPy 1.13.1 release notes.
-* #9376 BUG: Prevent hang traversing ufunc userloop linked list
-* #9377 DOC: Use x1 and x2 in the heaviside docstring.
-* #9378 DOC: Add $PARAMS to the isnat docstring
-* #9379 DOC: Update the 1.13.1 release notes
-
-
-Contributors
-============
-A total of 12 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Andras Deak +
-* Bob Eldering +
-* Charles Harris
-* Daniel Hrisca +
-* Eric Wieser
-* Joshua Leahy +
-* Julian Taylor
-* Michael Seifert
-* Pauli Virtanen
-* Ralf Gommers
-* Roland Kaufmann
-* Warren Weckesser
+++ /dev/null
-==========================
-NumPy 1.13.2 Release Notes
-==========================
-
-This is a bugfix release for some problems found since 1.13.1. The most
-important fixes are for CVE-2017-12852 and temporary elision. Users of earlier
-versions of 1.13 should upgrade.
-
-The Python versions supported are 2.7 and 3.4 - 3.6. The Python 3.6 wheels
-available from PIP are built with Python 3.6.2 and should be compatible with
-all previous versions of Python 3.6. The Windows wheels are now built
-with OpenBlas instead ATLAS, which should improve the performance of the linear
-algebra functions.
-
-Contributors
-============
-
-A total of 12 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Allan Haldane
-* Brandon Carter
-* Charles Harris
-* Eric Wieser
-* Iryna Shcherbina +
-* James Bourbeau +
-* Jonathan Helmus
-* Julian Taylor
-* Matti Picus
-* Michael Lamparski +
-* Michael Seifert
-* Ralf Gommers
-
-Pull requests merged
-====================
-
-A total of 20 pull requests were merged for this release.
-
-* #9390 BUG: Return the poly1d coefficients array directly
-* #9555 BUG: Fix regression in 1.13.x in distutils.mingw32ccompiler.
-* #9556 BUG: Fix true_divide when dtype=np.float64 specified.
-* #9557 DOC: Fix some rst markup in numpy/doc/basics.py.
-* #9558 BLD: Remove -xhost flag from IntelFCompiler.
-* #9559 DOC: Removes broken docstring example (source code, png, pdf)...
-* #9580 BUG: Add hypot and cabs functions to WIN32 blacklist.
-* #9732 BUG: Make scalar function elision check if temp is writeable.
-* #9736 BUG: Various fixes to np.gradient
-* #9742 BUG: Fix np.pad for CVE-2017-12852
-* #9744 BUG: Check for exception in sort functions, add tests
-* #9745 DOC: Add whitespace after "versionadded::" directive so it actually...
-* #9746 BUG: Memory leak in np.dot of size 0
-* #9747 BUG: Adjust gfortran version search regex
-* #9757 BUG: Cython 0.27 breaks NumPy on Python 3.
-* #9764 BUG: Ensure `_npy_scaled_cexp{,f,l}` is defined when needed.
-* #9765 BUG: PyArray_CountNonzero does not check for exceptions
-* #9766 BUG: Fixes histogram monotonicity check for unsigned bin values
-* #9767 BUG: Ensure consistent result dtype of count_nonzero
-* #9771 BUG, MAINT: Fix mtrand for Cython 0.27.
+++ /dev/null
-==========================
-NumPy 1.13.3 Release Notes
-==========================
-
-This is a bugfix release for some problems found since 1.13.1. The most
-important fixes are for CVE-2017-12852 and temporary elision. Users of earlier
-versions of 1.13 should upgrade.
-
-The Python versions supported are 2.7 and 3.4 - 3.6. The Python 3.6 wheels
-available from PIP are built with Python 3.6.2 and should be compatible with
-all previous versions of Python 3.6. It was cythonized with Cython 0.26.1,
-which should be free of the bugs found in 0.27 while also being compatible with
-Python 3.7-dev. The Windows wheels were built with OpenBlas instead ATLAS,
-which should improve the performance of the linear algebra functions.
-
-The NumPy 1.13.3 release is a re-release of 1.13.2, which suffered from a
-bug in Cython 0.27.0.
-
-Contributors
-============
-
-A total of 12 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Allan Haldane
-* Brandon Carter
-* Charles Harris
-* Eric Wieser
-* Iryna Shcherbina +
-* James Bourbeau +
-* Jonathan Helmus
-* Julian Taylor
-* Matti Picus
-* Michael Lamparski +
-* Michael Seifert
-* Ralf Gommers
-
-Pull requests merged
-====================
-
-A total of 22 pull requests were merged for this release.
-
-* #9390 BUG: Return the poly1d coefficients array directly
-* #9555 BUG: Fix regression in 1.13.x in distutils.mingw32ccompiler.
-* #9556 BUG: Fix true_divide when dtype=np.float64 specified.
-* #9557 DOC: Fix some rst markup in numpy/doc/basics.py.
-* #9558 BLD: Remove -xhost flag from IntelFCompiler.
-* #9559 DOC: Removes broken docstring example (source code, png, pdf)...
-* #9580 BUG: Add hypot and cabs functions to WIN32 blacklist.
-* #9732 BUG: Make scalar function elision check if temp is writeable.
-* #9736 BUG: Various fixes to np.gradient
-* #9742 BUG: Fix np.pad for CVE-2017-12852
-* #9744 BUG: Check for exception in sort functions, add tests
-* #9745 DOC: Add whitespace after "versionadded::" directive so it actually...
-* #9746 BUG: Memory leak in np.dot of size 0
-* #9747 BUG: Adjust gfortran version search regex
-* #9757 BUG: Cython 0.27 breaks NumPy on Python 3.
-* #9764 BUG: Ensure `_npy_scaled_cexp{,f,l}` is defined when needed.
-* #9765 BUG: PyArray_CountNonzero does not check for exceptions
-* #9766 BUG: Fixes histogram monotonicity check for unsigned bin values
-* #9767 BUG: Ensure consistent result dtype of count_nonzero
-* #9771 BUG: MAINT: Fix mtrand for Cython 0.27.
-* #9772 DOC: Create the 1.13.2 release notes.
-* #9794 DOC: Create 1.13.3 release notes.
+++ /dev/null
-==========================
-NumPy 1.14.0 Release Notes
-==========================
-
-Numpy 1.14.0 is the result of seven months of work and contains a large number
-of bug fixes and new features, along with several changes with potential
-compatibility issues. The major change that users will notice are the
-stylistic changes in the way numpy arrays and scalars are printed, a change
-that will affect doctests. See below for details on how to preserve the
-old style printing when needed.
-
-A major decision affecting future development concerns the schedule for
-dropping Python 2.7 support in the runup to 2020. The decision has been made to
-support 2.7 for all releases made in 2018, with the last release being
-designated a long term release with support for bug fixes extending through
-2019. In 2019 support for 2.7 will be dropped in all new releases. More details
-can be found in `NEP 12`_.
-
-This release supports Python 2.7 and 3.4 - 3.6.
-
-.. _`NEP 12`: http://www.numpy.org/neps/nep-0014-dropping-python2.7-proposal.html
-
-
-Highlights
-==========
-
-* The `np.einsum` function uses BLAS when possible
-
-* ``genfromtxt``, ``loadtxt``, ``fromregex`` and ``savetxt`` can now handle
- files with arbitrary Python supported encoding.
-
-* Major improvements to printing of NumPy arrays and scalars.
-
-
-New functions
-=============
-
-* ``parametrize``: decorator added to numpy.testing
-
-* ``chebinterpolate``: Interpolate function at Chebyshev points.
-
-* ``format_float_positional`` and ``format_float_scientific`` : format
- floating-point scalars unambiguously with control of rounding and padding.
-
-* ``PyArray_ResolveWritebackIfCopy`` and ``PyArray_SetWritebackIfCopyBase``,
- new C-API functions useful in achieving PyPy compatibity.
-
-
-Deprecations
-============
-
-* Using ``np.bool_`` objects in place of integers is deprecated. Previously
- ``operator.index(np.bool_)`` was legal and allowed constructs such as
- ``[1, 2, 3][np.True_]``. That was misleading, as it behaved differently from
- ``np.array([1, 2, 3])[np.True_]``.
-
-* Truth testing of an empty array is deprecated. To check if an array is not
- empty, use ``array.size > 0``.
-
-* Calling ``np.bincount`` with ``minlength=None`` is deprecated.
- ``minlength=0`` should be used instead.
-
-* Calling ``np.fromstring`` with the default value of the ``sep`` argument is
- deprecated. When that argument is not provided, a broken version of
- ``np.frombuffer`` is used that silently accepts unicode strings and -- after
- encoding them as either utf-8 (python 3) or the default encoding
- (python 2) -- treats them as binary data. If reading binary data is
- desired, ``np.frombuffer`` should be used directly.
-
-* The ``style`` option of array2string is deprecated in non-legacy printing mode.
-
-* ``PyArray_SetUpdateIfCopyBase`` has been deprecated. For NumPy versions >= 1.14
- use ``PyArray_SetWritebackIfCopyBase`` instead, see `C API changes` below for
- more details.
-
-
-
-* The use of ``UPDATEIFCOPY`` arrays is deprecated, see `C API changes` below
- for details. We will not be dropping support for those arrays, but they are
- not compatible with PyPy.
-
-
-Future Changes
-==============
-
-* ``np.issubdtype`` will stop downcasting dtype-like arguments.
- It might be expected that ``issubdtype(np.float32, 'float64')`` and
- ``issubdtype(np.float32, np.float64)`` mean the same thing - however, there
- was an undocumented special case that translated the former into
- ``issubdtype(np.float32, np.floating)``, giving the surprising result of True.
-
- This translation now gives a warning that explains what translation is
- occurring. In the future, the translation will be disabled, and the first
- example will be made equivalent to the second.
-
-* ``np.linalg.lstsq`` default for ``rcond`` will be changed. The ``rcond``
- parameter to ``np.linalg.lstsq`` will change its default to machine precision
- times the largest of the input array dimensions. A FutureWarning is issued
- when ``rcond`` is not passed explicitly.
-
-* ``a.flat.__array__()`` will return a writeable copy of ``a`` when ``a`` is
- non-contiguous. Previously it returned an UPDATEIFCOPY array when ``a`` was
- writeable. Currently it returns a non-writeable copy. See gh-7054 for a
- discussion of the issue.
-
-* Unstructured void array's ``.item`` method will return a bytes object. In the
- future, calling ``.item()`` on arrays or scalars of ``np.void`` datatype will
- return a ``bytes`` object instead of a buffer or int array, the same as
- returned by ``bytes(void_scalar)``. This may affect code which assumed the
- return value was mutable, which will no longer be the case. A
- ``FutureWarning`` is now issued when this would occur.
-
-
-Compatibility notes
-===================
-
-The mask of a masked array view is also a view rather than a copy
------------------------------------------------------------------
-There was a FutureWarning about this change in NumPy 1.11.x. In short, it is
-now the case that, when changing a view of a masked array, changes to the mask
-are propagated to the original. That was not previously the case. This change
-affects slices in particular. Note that this does not yet work properly if the
-mask of the original array is ``nomask`` and the mask of the view is changed.
-See gh-5580 for an extended discussion. The original behavior of having a copy
-of the mask can be obtained by calling the ``unshare_mask`` method of the view.
-
-``np.ma.masked`` is no longer writeable
----------------------------------------
-Attempts to mutate the ``masked`` constant now error, as the underlying arrays
-are marked readonly. In the past, it was possible to get away with::
-
- # emulating a function that sometimes returns np.ma.masked
- val = random.choice([np.ma.masked, 10])
- var_arr = np.asarray(val)
- val_arr += 1 # now errors, previously changed np.ma.masked.data
-
-``np.ma`` functions producing ``fill_value`` s have changed
------------------------------------------------------------
-Previously, ``np.ma.default_fill_value`` would return a 0d array, but
-``np.ma.minimum_fill_value`` and ``np.ma.maximum_fill_value`` would return a
-tuple of the fields. Instead, all three methods return a structured ``np.void``
-object, which is what you would already find in the ``.fill_value`` attribute.
-
-Additionally, the dtype guessing now matches that of ``np.array`` - so when
-passing a python scalar ``x``, ``maximum_fill_value(x)`` is always the same as
-``maximum_fill_value(np.array(x))``. Previously ``x = long(1)`` on Python 2
-violated this assumption.
-
-``a.flat.__array__()`` returns non-writeable arrays when ``a`` is non-contiguous
---------------------------------------------------------------------------------
-The intent is that the UPDATEIFCOPY array previously returned when ``a`` was
-non-contiguous will be replaced by a writeable copy in the future. This
-temporary measure is aimed to notify folks who expect the underlying array be
-modified in this situation that that will no longer be the case. The most
-likely places for this to be noticed is when expressions of the form
-``np.asarray(a.flat)`` are used, or when ``a.flat`` is passed as the out
-parameter to a ufunc.
-
-``np.tensordot`` now returns zero array when contracting over 0-length dimension
---------------------------------------------------------------------------------
-Previously ``np.tensordot`` raised a ValueError when contracting over 0-length
-dimension. Now it returns a zero array, which is consistent with the behaviour
-of ``np.dot`` and ``np.einsum``.
-
-``numpy.testing`` reorganized
------------------------------
-This is not expected to cause problems, but possibly something has been left
-out. If you experience an unexpected import problem using ``numpy.testing``
-let us know.
-
-``np.asfarray`` no longer accepts non-dtypes through the ``dtype`` argument
----------------------------------------------------------------------------
-This previously would accept ``dtype=some_array``, with the implied semantics
-of ``dtype=some_array.dtype``. This was undocumented, unique across the numpy
-functions, and if used would likely correspond to a typo.
-
-1D ``np.linalg.norm`` preserves float input types, even for arbitrary orders
-----------------------------------------------------------------------------
-Previously, this would promote to ``float64`` when arbitrary orders were
-passed, despite not doing so under the simple cases::
-
- >>> f32 = np.float32([[1, 2]])
- >>> np.linalg.norm(f32, 2.0, axis=-1).dtype
- dtype('float32')
- >>> np.linalg.norm(f32, 2.0001, axis=-1).dtype
- dtype('float64') # numpy 1.13
- dtype('float32') # numpy 1.14
-
-This change affects only ``float32`` and ``float16`` arrays.
-
-``count_nonzero(arr, axis=())`` now counts over no axes, not all axes
----------------------------------------------------------------------
-Elsewhere, ``axis==()`` is always understood as "no axes", but
-`count_nonzero` had a special case to treat this as "all axes". This was
-inconsistent and surprising. The correct way to count over all axes has always
-been to pass ``axis == None``.
-
-``__init__.py`` files added to test directories
------------------------------------------------
-This is for pytest compatibility in the case of duplicate test file names in
-the different directories. As a result, ``run_module_suite`` no longer works,
-i.e., ``python <path-to-test-file>`` results in an error.
-
-``.astype(bool)`` on unstructured void arrays now calls ``bool`` on each element
---------------------------------------------------------------------------------
-On Python 2, ``void_array.astype(bool)`` would always return an array of
-``True``, unless the dtype is ``V0``. On Python 3, this operation would usually
-crash. Going forwards, `astype` matches the behavior of ``bool(np.void)``,
-considering a buffer of all zeros as false, and anything else as true.
-Checks for ``V0`` can still be done with ``arr.dtype.itemsize == 0``.
-
-``MaskedArray.squeeze`` never returns ``np.ma.masked``
-------------------------------------------------------
-``np.squeeze`` is documented as returning a view, but the masked variant would
-sometimes return ``masked``, which is not a view. This has been fixed, so that
-the result is always a view on the original masked array.
-This breaks any code that used ``masked_arr.squeeze() is np.ma.masked``, but
-fixes code that writes to the result of `.squeeze()`.
-
-Renamed first parameter of ``can_cast`` from ``from`` to ``from_``
-------------------------------------------------------------------
-The previous parameter name ``from`` is a reserved keyword in Python, which made
-it difficult to pass the argument by name. This has been fixed by renaming
-the parameter to ``from_``.
-
-``isnat`` raises ``TypeError`` when passed wrong type
-------------------------------------------------------
-The ufunc ``isnat`` used to raise a ``ValueError`` when it was not passed
-variables of type ``datetime`` or ``timedelta``. This has been changed to
-raising a ``TypeError``.
-
-``dtype.__getitem__`` raises ``TypeError`` when passed wrong type
------------------------------------------------------------------
-When indexed with a float, the dtype object used to raise ``ValueError``.
-
-User-defined types now need to implement ``__str__`` and ``__repr__``
----------------------------------------------------------------------
-Previously, user-defined types could fall back to a default implementation of
-``__str__`` and ``__repr__`` implemented in numpy, but this has now been
-removed. Now user-defined types will fall back to the python default
-``object.__str__`` and ``object.__repr__``.
-
-Many changes to array printing, disableable with the new "legacy" printing mode
--------------------------------------------------------------------------------
-The ``str`` and ``repr`` of ndarrays and numpy scalars have been changed in
-a variety of ways. These changes are likely to break downstream user's
-doctests.
-
-These new behaviors can be disabled to mostly reproduce numpy 1.13 behavior by
-enabling the new 1.13 "legacy" printing mode. This is enabled by calling
-``np.set_printoptions(legacy="1.13")``, or using the new ``legacy`` argument to
-``np.array2string``, as ``np.array2string(arr, legacy='1.13')``.
-
-In summary, the major changes are:
-
-* For floating-point types:
-
- * The ``repr`` of float arrays often omits a space previously printed
- in the sign position. See the new ``sign`` option to ``np.set_printoptions``.
- * Floating-point arrays and scalars use a new algorithm for decimal
- representations, giving the shortest unique representation. This will
- usually shorten ``float16`` fractional output, and sometimes ``float32`` and
- ``float128`` output. ``float64`` should be unaffected. See the new
- ``floatmode`` option to ``np.set_printoptions``.
- * Float arrays printed in scientific notation no longer use fixed-precision,
- and now instead show the shortest unique representation.
- * The ``str`` of floating-point scalars is no longer truncated in python2.
-
-* For other data types:
-
- * Non-finite complex scalars print like ``nanj`` instead of ``nan*j``.
- * ``NaT`` values in datetime arrays are now properly aligned.
- * Arrays and scalars of ``np.void`` datatype are now printed using hex
- notation.
-
-* For line-wrapping:
-
- * The "dtype" part of ndarray reprs will now be printed on the next line
- if there isn't space on the last line of array output.
- * The ``linewidth`` format option is now always respected.
- The `repr` or `str` of an array will never exceed this, unless a single
- element is too wide.
- * The last line of an array string will never have more elements than earlier
- lines.
- * An extra space is no longer inserted on the first line if the elements are
- too wide.
-
-* For summarization (the use of ``...`` to shorten long arrays):
-
- * A trailing comma is no longer inserted for ``str``.
- Previously, ``str(np.arange(1001))`` gave
- ``'[ 0 1 2 ..., 998 999 1000]'``, which has an extra comma.
- * For arrays of 2-D and beyond, when ``...`` is printed on its own line in
- order to summarize any but the last axis, newlines are now appended to that
- line to match its leading newlines and a trailing space character is
- removed.
-
-* ``MaskedArray`` arrays now separate printed elements with commas, always
- print the dtype, and correctly wrap the elements of long arrays to multiple
- lines. If there is more than 1 dimension, the array attributes are now
- printed in a new "left-justified" printing style.
-* ``recarray`` arrays no longer print a trailing space before their dtype, and
- wrap to the right number of columns.
-* 0d arrays no longer have their own idiosyncratic implementations of ``str``
- and ``repr``. The ``style`` argument to ``np.array2string`` is deprecated.
-* Arrays of ``bool`` datatype will omit the datatype in the ``repr``.
-* User-defined ``dtypes`` (subclasses of ``np.generic``) now need to
- implement ``__str__`` and ``__repr__``.
-
-Some of these changes are described in more detail below. If you need to retain
-the previous behavior for doctests or other reasons, you may want to do
-something like::
-
- # FIXME: We need the str/repr formatting used in Numpy < 1.14.
- try:
- np.set_printoptions(legacy='1.13')
- except TypeError:
- pass
-
-
-C API changes
-=============
-
-PyPy compatible alternative to ``UPDATEIFCOPY`` arrays
-------------------------------------------------------
-``UPDATEIFCOPY`` arrays are contiguous copies of existing arrays, possibly with
-different dimensions, whose contents are copied back to the original array when
-their refcount goes to zero and they are deallocated. Because PyPy does not use
-refcounts, they do not function correctly with PyPy. NumPy is in the process of
-eliminating their use internally and two new C-API functions,
-
-* ``PyArray_SetWritebackIfCopyBase``
-* ``PyArray_ResolveWritebackIfCopy``,
-
-have been added together with a complimentary flag,
-``NPY_ARRAY_WRITEBACKIFCOPY``. Using the new functionality also requires that
-some flags be changed when new arrays are created, to wit:
-``NPY_ARRAY_INOUT_ARRAY`` should be replaced by ``NPY_ARRAY_INOUT_ARRAY2`` and
-``NPY_ARRAY_INOUT_FARRAY`` should be replaced by ``NPY_ARRAY_INOUT_FARRAY2``.
-Arrays created with these new flags will then have the ``WRITEBACKIFCOPY``
-semantics.
-
-If PyPy compatibility is not a concern, these new functions can be ignored,
-although there will be a ``DeprecationWarning``. If you do wish to pursue PyPy
-compatibility, more information on these functions and their use may be found
-in the c-api_ documentation and the example in how-to-extend_.
-
-.. _c-api: https://github.com/numpy/numpy/blob/master/doc/source/reference/c-api.array.rst
-.. _how-to-extend: https://github.com/numpy/numpy/blob/master/doc/source/user/c-info.how-to-extend.rst
-
-
-New Features
-============
-
-Encoding argument for text IO functions
----------------------------------------
-``genfromtxt``, ``loadtxt``, ``fromregex`` and ``savetxt`` can now handle files
-with arbitrary encoding supported by Python via the encoding argument.
-For backward compatibility the argument defaults to the special ``bytes`` value
-which continues to treat text as raw byte values and continues to pass latin1
-encoded bytes to custom converters.
-Using any other value (including ``None`` for system default) will switch the
-functions to real text IO so one receives unicode strings instead of bytes in
-the resulting arrays.
-
-External ``nose`` plugins are usable by ``numpy.testing.Tester``
-----------------------------------------------------------------
-``numpy.testing.Tester`` is now aware of ``nose`` plugins that are outside the
-``nose`` built-in ones. This allows using, for example, ``nose-timer`` like
-so: ``np.test(extra_argv=['--with-timer', '--timer-top-n', '20'])`` to
-obtain the runtime of the 20 slowest tests. An extra keyword ``timer`` was
-also added to ``Tester.test``, so ``np.test(timer=20)`` will also report the 20
-slowest tests.
-
-``parametrize`` decorator added to ``numpy.testing``
-----------------------------------------------------
-A basic ``parametrize`` decorator is now available in ``numpy.testing``. It is
-intended to allow rewriting yield based tests that have been deprecated in
-pytest so as to facilitate the transition to pytest in the future. The nose
-testing framework has not been supported for several years and looks like
-abandonware.
-
-The new ``parametrize`` decorator does not have the full functionality of the
-one in pytest. It doesn't work for classes, doesn't support nesting, and does
-not substitute variable names. Even so, it should be adequate to rewrite the
-NumPy tests.
-
-``chebinterpolate`` function added to ``numpy.polynomial.chebyshev``
---------------------------------------------------------------------
-The new ``chebinterpolate`` function interpolates a given function at the
-Chebyshev points of the first kind. A new ``Chebyshev.interpolate`` class
-method adds support for interpolation over arbitrary intervals using the scaled
-and shifted Chebyshev points of the first kind.
-
-Support for reading lzma compressed text files in Python 3
-----------------------------------------------------------
-With Python versions containing the ``lzma`` module the text IO functions can
-now transparently read from files with ``xz`` or ``lzma`` extension.
-
-``sign`` option added to ``np.setprintoptions`` and ``np.array2string``
------------------------------------------------------------------------
-This option controls printing of the sign of floating-point types, and may be
-one of the characters '-', '+' or ' '. With '+' numpy always prints the sign of
-positive values, with ' ' it always prints a space (whitespace character) in
-the sign position of positive values, and with '-' it will omit the sign
-character for positive values. The new default is '-'.
-
-This new default changes the float output relative to numpy 1.13. The old
-behavior can be obtained in 1.13 "legacy" printing mode, see compatibility
-notes above.
-
-``hermitian`` option added to``np.linalg.matrix_rank``
-------------------------------------------------------
-The new ``hermitian`` option allows choosing between standard SVD based matrix
-rank calculation and the more efficient eigenvalue based method for
-symmetric/hermitian matrices.
-
-``threshold`` and ``edgeitems`` options added to ``np.array2string``
---------------------------------------------------------------------
-These options could previously be controlled using ``np.set_printoptions``, but
-now can be changed on a per-call basis as arguments to ``np.array2string``.
-
-``concatenate`` and ``stack`` gained an ``out`` argument
---------------------------------------------------------
-A preallocated buffer of the desired dtype can now be used for the output of
-these functions.
-
-Support for PGI flang compiler on Windows
------------------------------------------
-The PGI flang compiler is a Fortran front end for LLVM released by NVIDIA under
-the Apache 2 license. It can be invoked by ::
-
- python setup.py config --compiler=clang --fcompiler=flang install
-
-There is little experience with this new compiler, so any feedback from people
-using it will be appreciated.
-
-
-Improvements
-============
-
-Numerator degrees of freedom in ``random.noncentral_f`` need only be positive.
-------------------------------------------------------------------------------
-Prior to NumPy 1.14.0, the numerator degrees of freedom needed to be > 1, but
-the distribution is valid for values > 0, which is the new requirement.
-
-The GIL is released for all ``np.einsum`` variations
-----------------------------------------------------
-Some specific loop structures which have an accelerated loop version
-did not release the GIL prior to NumPy 1.14.0. This oversight has been
-fixed.
-
-The `np.einsum` function will use BLAS when possible and optimize by default
-----------------------------------------------------------------------------
-The ``np.einsum`` function will now call ``np.tensordot`` when appropriate.
-Because ``np.tensordot`` uses BLAS when possible, that will speed up execution.
-By default, ``np.einsum`` will also attempt optimization as the overhead is
-small relative to the potential improvement in speed.
-
-``f2py`` now handles arrays of dimension 0
-------------------------------------------
-``f2py`` now allows for the allocation of arrays of dimension 0. This allows
-for more consistent handling of corner cases downstream.
-
-``numpy.distutils`` supports using MSVC and mingw64-gfortran together
----------------------------------------------------------------------
-Numpy distutils now supports using Mingw64 gfortran and MSVC compilers
-together. This enables the production of Python extension modules on Windows
-containing Fortran code while retaining compatibility with the
-binaries distributed by Python.org. Not all use cases are supported,
-but most common ways to wrap Fortran for Python are functional.
-
-Compilation in this mode is usually enabled automatically, and can be
-selected via the ``--fcompiler`` and ``--compiler`` options to
-``setup.py``. Moreover, linking Fortran codes to static OpenBLAS is
-supported; by default a gfortran compatible static archive
-``openblas.a`` is looked for.
-
-``np.linalg.pinv`` now works on stacked matrices
-------------------------------------------------
-Previously it was limited to a single 2d array.
-
-``numpy.save`` aligns data to 64 bytes instead of 16
-----------------------------------------------------
-Saving NumPy arrays in the ``npy`` format with ``numpy.save`` inserts
-padding before the array data to align it at 64 bytes. Previously
-this was only 16 bytes (and sometimes less due to a bug in the code
-for version 2). Now the alignment is 64 bytes, which matches the
-widest SIMD instruction set commonly available, and is also the most
-common cache line size. This makes ``npy`` files easier to use in
-programs which open them with ``mmap``, especially on Linux where an
-``mmap`` offset must be a multiple of the page size.
-
-NPZ files now can be written without using temporary files
-----------------------------------------------------------
-In Python 3.6+ ``numpy.savez`` and ``numpy.savez_compressed`` now write
-directly to a ZIP file, without creating intermediate temporary files.
-
-Better support for empty structured and string types
-----------------------------------------------------
-Structured types can contain zero fields, and string dtypes can contain zero
-characters. Zero-length strings still cannot be created directly, and must be
-constructed through structured dtypes::
-
- str0 = np.empty(10, np.dtype([('v', str, N)]))['v']
- void0 = np.empty(10, np.void)
-
-It was always possible to work with these, but the following operations are
-now supported for these arrays:
-
- * `arr.sort()`
- * `arr.view(bytes)`
- * `arr.resize(...)`
- * `pickle.dumps(arr)`
-
-Support for ``decimal.Decimal`` in ``np.lib.financial``
--------------------------------------------------------
-Unless otherwise stated all functions within the ``financial`` package now
-support using the ``decimal.Decimal`` built-in type.
-
-Float printing now uses "dragon4" algorithm for shortest decimal representation
--------------------------------------------------------------------------------
-The ``str`` and ``repr`` of floating-point values (16, 32, 64 and 128 bit) are
-now printed to give the shortest decimal representation which uniquely
-identifies the value from others of the same type. Previously this was only
-true for ``float64`` values. The remaining float types will now often be shorter
-than in numpy 1.13. Arrays printed in scientific notation now also use the
-shortest scientific representation, instead of fixed precision as before.
-
- Additionally, the `str` of float scalars scalars will no longer be truncated
- in python2, unlike python2 `float`s. `np.double` scalars now have a ``str``
- and ``repr`` identical to that of a python3 float.
-
-New functions ``np.format_float_scientific`` and ``np.format_float_positional``
-are provided to generate these decimal representations.
-
-A new option ``floatmode`` has been added to ``np.set_printoptions`` and
-``np.array2string``, which gives control over uniqueness and rounding of
-printed elements in an array. The new default is ``floatmode='maxprec'`` with
-``precision=8``, which will print at most 8 fractional digits, or fewer if an
-element can be uniquely represented with fewer. A useful new mode is
-``floatmode="unique"``, which will output enough digits to specify the array
-elements uniquely.
-
-Numpy complex-floating-scalars with values like ``inf*j`` or ``nan*j`` now
-print as ``infj`` and ``nanj``, like the pure-python ``complex`` type.
-
-The ``FloatFormat`` and ``LongFloatFormat`` classes are deprecated and should
-both be replaced by ``FloatingFormat``. Similarly ``ComplexFormat`` and
-``LongComplexFormat`` should be replaced by ``ComplexFloatingFormat``.
-
-``void`` datatype elements are now printed in hex notation
-----------------------------------------------------------
-A hex representation compatible with the python ``bytes`` type is now printed
-for unstructured ``np.void`` elements, e.g., ``V4`` datatype. Previously, in
-python2 the raw void data of the element was printed to stdout, or in python3
-the integer byte values were shown.
-
-printing style for ``void`` datatypes is now independently customizable
------------------------------------------------------------------------
-The printing style of ``np.void`` arrays is now independently customizable
-using the ``formatter`` argument to ``np.set_printoptions``, using the
-``'void'`` key, instead of the catch-all ``numpystr`` key as before.
-
-Reduced memory usage of ``np.loadtxt``
---------------------------------------
-``np.loadtxt`` now reads files in chunks instead of all at once which decreases
-its memory usage significantly for large files.
-
-
-Changes
-=======
-
-Multiple-field indexing/assignment of structured arrays
--------------------------------------------------------
-The indexing and assignment of structured arrays with multiple fields has
-changed in a number of ways, as warned about in previous releases.
-
-First, indexing a structured array with multiple fields, e.g.,
-``arr[['f1', 'f3']]``, returns a view into the original array instead of a
-copy. The returned view will have extra padding bytes corresponding to
-intervening fields in the original array, unlike the copy in 1.13, which will
-affect code such as ``arr[['f1', 'f3']].view(newdtype)``.
-
-Second, assignment between structured arrays will now occur "by position"
-instead of "by field name". The Nth field of the destination will be set to the
-Nth field of the source regardless of field name, unlike in numpy versions 1.6
-to 1.13 in which fields in the destination array were set to the
-identically-named field in the source array or to 0 if the source did not have
-a field.
-
-Correspondingly, the order of fields in a structured dtypes now matters when
-computing dtype equality. For example, with the dtypes ::
-
- x = dtype({'names': ['A', 'B'], 'formats': ['i4', 'f4'], 'offsets': [0, 4]})
- y = dtype({'names': ['B', 'A'], 'formats': ['f4', 'i4'], 'offsets': [4, 0]})
-
-the expression ``x == y`` will now return ``False``, unlike before.
-This makes dictionary based dtype specifications like
-``dtype({'a': ('i4', 0), 'b': ('f4', 4)})`` dangerous in python < 3.6
-since dict key order is not preserved in those versions.
-
-Assignment from a structured array to a boolean array now raises a ValueError,
-unlike in 1.13, where it always set the destination elements to ``True``.
-
-Assignment from structured array with more than one field to a non-structured
-array now raises a ValueError. In 1.13 this copied just the first field of the
-source to the destination.
-
-Using field "titles" in multiple-field indexing is now disallowed, as is
-repeating a field name in a multiple-field index.
-
-The documentation for structured arrays in the user guide has been
-significantly updated to reflect these changes.
-
-Integer and Void scalars are now unaffected by ``np.set_string_function``
--------------------------------------------------------------------------
-Previously, unlike most other numpy scalars, the ``str`` and ``repr`` of
-integer and void scalars could be controlled by ``np.set_string_function``.
-This is no longer possible.
-
-0d array printing changed, ``style`` arg of array2string deprecated
--------------------------------------------------------------------
-Previously the ``str`` and ``repr`` of 0d arrays had idiosyncratic
-implementations which returned ``str(a.item())`` and ``'array(' +
-repr(a.item()) + ')'`` respectively for 0d array ``a``, unlike both numpy
-scalars and higher dimension ndarrays.
-
-Now, the ``str`` of a 0d array acts like a numpy scalar using ``str(a[()])``
-and the ``repr`` acts like higher dimension arrays using ``formatter(a[()])``,
-where ``formatter`` can be specified using ``np.set_printoptions``. The
-``style`` argument of ``np.array2string`` is deprecated.
-
-This new behavior is disabled in 1.13 legacy printing mode, see compatibility
-notes above.
-
-Seeding ``RandomState`` using an array requires a 1-d array
------------------------------------------------------------
-``RandomState`` previously would accept empty arrays or arrays with 2 or more
-dimensions, which resulted in either a failure to seed (empty arrays) or for
-some of the passed values to be ignored when setting the seed.
-
-``MaskedArray`` objects show a more useful ``repr``
----------------------------------------------------
-The ``repr`` of a ``MaskedArray`` is now closer to the python code that would
-produce it, with arrays now being shown with commas and dtypes. Like the other
-formatting changes, this can be disabled with the 1.13 legacy printing mode in
-order to help transition doctests.
-
-The ``repr`` of ``np.polynomial`` classes is more explicit
-----------------------------------------------------------
-It now shows the domain and window parameters as keyword arguments to make
-them more clear::
-
- >>> np.polynomial.Polynomial(range(4))
- Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
+++ /dev/null
-==========================
-NumPy 1.14.1 Release Notes
-==========================
-
-This is a bugfix release for some problems reported following the 1.14.0 release. The major
-problems fixed are the following.
-
-* Problems with the new array printing, particularly the printing of complex
- values, Please report any additional problems that may turn up.
-* Problems with ``np.einsum`` due to the new ``optimized=True`` default. Some
- fixes for optimization have been applied and ``optimize=False`` is now the
- default.
-* The sort order in ``np.unique`` when ``axis=<some-number>`` will now always
- be lexicographic in the subarray elements. In previous NumPy versions there
- was an optimization that could result in sorting the subarrays as unsigned
- byte strings.
-* The change in 1.14.0 that multi-field indexing of structured arrays returns a
- view instead of a copy has been reverted but remains on track for NumPy 1.15.
- Affected users should read the 1.14.1 Numpy User Guide section
- "basics/structured arrays/accessing multiple fields" for advice on how to
- manage this transition.
-
-The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
-3.6 wheels available from PIP are built with Python 3.6.2 and should be
-compatible with all previous versions of Python 3.6. The source releases were
-cythonized with Cython 0.26.1, which is known to **not** support the upcoming
-Python 3.7 release. People who wish to run Python 3.7 should check out the
-NumPy repo and try building with the, as yet, unreleased master branch of
-Cython.
-
-Contributors
-============
-
-A total of 14 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Allan Haldane
-* Charles Harris
-* Daniel Smith
-* Dennis Weyland +
-* Eric Larson
-* Eric Wieser
-* Jarrod Millman
-* Kenichi Maehashi +
-* Marten van Kerkwijk
-* Mathieu Lamarre
-* Sebastian Berg
-* Simon Conseil
-* Simon Gibbons
-* xoviat
-
-Pull requests merged
-====================
-
-A total of 36 pull requests were merged for this release.
-
-* `#10339 <https://github.com/numpy/numpy/pull/10339>`__: BUG: restrict the __config__ modifications to win32
-* `#10368 <https://github.com/numpy/numpy/pull/10368>`__: MAINT: Adjust type promotion in linalg.norm
-* `#10375 <https://github.com/numpy/numpy/pull/10375>`__: BUG: add missing paren and remove quotes from repr of fieldless...
-* `#10395 <https://github.com/numpy/numpy/pull/10395>`__: MAINT: Update download URL in setup.py.
-* `#10396 <https://github.com/numpy/numpy/pull/10396>`__: BUG: fix einsum issue with unicode input and py2
-* `#10397 <https://github.com/numpy/numpy/pull/10397>`__: BUG: fix error message not formatted in einsum
-* `#10398 <https://github.com/numpy/numpy/pull/10398>`__: DOC: add documentation about how to handle new array printing
-* `#10403 <https://github.com/numpy/numpy/pull/10403>`__: BUG: Set einsum optimize parameter default to `False`.
-* `#10424 <https://github.com/numpy/numpy/pull/10424>`__: ENH: Fix repr of np.record objects to match np.void types #10412
-* `#10425 <https://github.com/numpy/numpy/pull/10425>`__: MAINT: Update zesty to artful for i386 testing
-* `#10431 <https://github.com/numpy/numpy/pull/10431>`__: REL: Add 1.14.1 release notes template
-* `#10435 <https://github.com/numpy/numpy/pull/10435>`__: MAINT: Use ValueError for duplicate field names in lookup (backport)
-* `#10534 <https://github.com/numpy/numpy/pull/10534>`__: BUG: Provide a better error message for out-of-order fields
-* `#10536 <https://github.com/numpy/numpy/pull/10536>`__: BUG: Resize bytes columns in genfromtxt (backport of #10401)
-* `#10537 <https://github.com/numpy/numpy/pull/10537>`__: BUG: multifield-indexing adds padding bytes: revert for 1.14.1
-* `#10539 <https://github.com/numpy/numpy/pull/10539>`__: BUG: fix np.save issue with python 2.7.5
-* `#10540 <https://github.com/numpy/numpy/pull/10540>`__: BUG: Add missing DECREF in Py2 int() cast
-* `#10541 <https://github.com/numpy/numpy/pull/10541>`__: TST: Add circleci document testing to maintenance/1.14.x
-* `#10542 <https://github.com/numpy/numpy/pull/10542>`__: BUG: complex repr has extra spaces, missing + (1.14 backport)
-* `#10550 <https://github.com/numpy/numpy/pull/10550>`__: BUG: Set missing exception after malloc
-* `#10557 <https://github.com/numpy/numpy/pull/10557>`__: BUG: In numpy.i, clear CARRAY flag if wrapped buffer is not C_CONTIGUOUS.
-* `#10558 <https://github.com/numpy/numpy/pull/10558>`__: DEP: Issue FutureWarning when malformed records detected.
-* `#10559 <https://github.com/numpy/numpy/pull/10559>`__: BUG: Fix einsum optimize logic for singleton dimensions
-* `#10560 <https://github.com/numpy/numpy/pull/10560>`__: BUG: Fix calling ufuncs with a positional output argument.
-* `#10561 <https://github.com/numpy/numpy/pull/10561>`__: BUG: Fix various Big-Endian test failures (ppc64)
-* `#10562 <https://github.com/numpy/numpy/pull/10562>`__: BUG: Make dtype.descr error for out-of-order fields.
-* `#10563 <https://github.com/numpy/numpy/pull/10563>`__: BUG: arrays not being flattened in `union1d`
-* `#10607 <https://github.com/numpy/numpy/pull/10607>`__: MAINT: Update sphinxext submodule hash.
-* `#10608 <https://github.com/numpy/numpy/pull/10608>`__: BUG: Revert sort optimization in np.unique.
-* `#10609 <https://github.com/numpy/numpy/pull/10609>`__: BUG: infinite recursion in str of 0d subclasses
-* `#10610 <https://github.com/numpy/numpy/pull/10610>`__: BUG: Align type definition with generated lapack
-* `#10612 <https://github.com/numpy/numpy/pull/10612>`__: BUG/ENH: Improve output for structured non-void types
-* `#10622 <https://github.com/numpy/numpy/pull/10622>`__: BUG: deallocate recursive closure in arrayprint.py (1.14 backport)
-* `#10624 <https://github.com/numpy/numpy/pull/10624>`__: BUG: Correctly identify comma separated dtype strings
-* `#10629 <https://github.com/numpy/numpy/pull/10629>`__: BUG: deallocate recursive closure in arrayprint.py (backport...
-* `#10630 <https://github.com/numpy/numpy/pull/10630>`__: REL: Prepare for 1.14.1 release.
+++ /dev/null
-==========================
-NumPy 1.14.2 Release Notes
-==========================
-
-This is a bugfix release for some bugs reported following the 1.14.1 release. The major
-problems dealt with are as follows.
-
-* Residual bugs in the new array printing functionality.
-* Regression resulting in a relocation problem with shared library.
-* Improved PyPy compatibility.
-
-The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
-3.6 wheels available from PIP are built with Python 3.6.2 and should be
-compatible with all previous versions of Python 3.6. The source releases were
-cythonized with Cython 0.26.1, which is known to **not** support the upcoming
-Python 3.7 release. People who wish to run Python 3.7 should check out the
-NumPy repo and try building with the, as yet, unreleased master branch of
-Cython.
-
-Contributors
-============
-
-A total of 4 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Allan Haldane
-* Charles Harris
-* Eric Wieser
-* Pauli Virtanen
-
-Pull requests merged
-====================
-
-A total of 5 pull requests were merged for this release.
-
-* `#10674 <https://github.com/numpy/numpy/pull/10674>`__: BUG: Further back-compat fix for subclassed array repr
-* `#10725 <https://github.com/numpy/numpy/pull/10725>`__: BUG: dragon4 fractional output mode adds too many trailing zeros
-* `#10726 <https://github.com/numpy/numpy/pull/10726>`__: BUG: Fix f2py generated code to work on PyPy
-* `#10727 <https://github.com/numpy/numpy/pull/10727>`__: BUG: Fix missing NPY_VISIBILITY_HIDDEN on npy_longdouble_to_PyLong
-* `#10729 <https://github.com/numpy/numpy/pull/10729>`__: DOC: Create 1.14.2 notes and changelog.
+++ /dev/null
-==========================
-NumPy 1.14.3 Release Notes
-==========================
-
-This is a bugfix release for a few bugs reported following the 1.14.2 release:
-
-* np.lib.recfunctions.fromrecords accepts a list-of-lists, until 1.15
-* In python2, float types use the new print style when printing to a file
-* style arg in "legacy" print mode now works for 0d arrays
-
-The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
-3.6 wheels available from PIP are built with Python 3.6.2 and should be
-compatible with all previous versions of Python 3.6. The source releases were
-cythonized with Cython 0.28.2.
-
-Contributors
-============
-
-A total of 6 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Allan Haldane
-* Charles Harris
-* Jonathan March +
-* Malcolm Smith +
-* Matti Picus
-* Pauli Virtanen
-
-Pull requests merged
-====================
-
-A total of 8 pull requests were merged for this release.
-
-* `#10862 <https://github.com/numpy/numpy/pull/10862>`__: BUG: floating types should override tp_print (1.14 backport)
-* `#10905 <https://github.com/numpy/numpy/pull/10905>`__: BUG: for 1.14 back-compat, accept list-of-lists in fromrecords
-* `#10947 <https://github.com/numpy/numpy/pull/10947>`__: BUG: 'style' arg to array2string broken in legacy mode (1.14...
-* `#10959 <https://github.com/numpy/numpy/pull/10959>`__: BUG: test, fix for missing flags['WRITEBACKIFCOPY'] key
-* `#10960 <https://github.com/numpy/numpy/pull/10960>`__: BUG: Add missing underscore to prototype in check_embedded_lapack
-* `#10961 <https://github.com/numpy/numpy/pull/10961>`__: BUG: Fix encoding regression in ma/bench.py (Issue #10868)
-* `#10962 <https://github.com/numpy/numpy/pull/10962>`__: BUG: core: fix NPY_TITLE_KEY macro on pypy
-* `#10974 <https://github.com/numpy/numpy/pull/10974>`__: BUG: test, fix PyArray_DiscardWritebackIfCopy...
+++ /dev/null
-==========================
-NumPy 1.14.4 Release Notes
-==========================
-
-This is a bugfix release for bugs reported following the 1.14.3 release. The
-most significant fixes are:
-
-* fixes for compiler instruction reordering that resulted in NaN's not being
- properly propagated in `np.max` and `np.min`,
-
-* fixes for bus faults on SPARC and older ARM due to incorrect alignment
- checks.
-
-There are also improvements to printing of long doubles on PPC platforms. All
-is not yet perfect on that platform, the whitespace padding is still incorrect
-and is to be fixed in numpy 1.15, consequently NumPy still fails some
-printing-related (and other) unit tests on ppc systems. However, the printed
-values are now correct.
-
-Note that NumPy will error on import if it detects incorrect float32 `dot`
-results. This problem has been seen on the Mac when working in the Anaconda
-environment and is due to a subtle interaction between MKL and PyQt5. It is not
-strictly a NumPy problem, but it is best that users be aware of it. See the
-gh-8577 NumPy issue for more information.
-
-The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
-3.6 wheels available from PIP are built with Python 3.6.2 and should be
-compatible with all previous versions of Python 3.6. The source releases were
-cythonized with Cython 0.28.2 and should work for the upcoming Python 3.7.
-
-Contributors
-============
-
-A total of 7 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Allan Haldane
-* Charles Harris
-* Marten van Kerkwijk
-* Matti Picus
-* Pauli Virtanen
-* Ryan Soklaski +
-* Sebastian Berg
-
-Pull requests merged
-====================
-
-A total of 11 pull requests were merged for this release.
-
-* `#11104 <https://github.com/numpy/numpy/pull/11104>`__: BUG: str of DOUBLE_DOUBLE format wrong on ppc64
-* `#11170 <https://github.com/numpy/numpy/pull/11170>`__: TST: linalg: add regression test for gh-8577
-* `#11174 <https://github.com/numpy/numpy/pull/11174>`__: MAINT: add sanity-checks to be run at import time
-* `#11181 <https://github.com/numpy/numpy/pull/11181>`__: BUG: void dtype setup checked offset not actual pointer for alignment
-* `#11194 <https://github.com/numpy/numpy/pull/11194>`__: BUG: Python2 doubles don't print correctly in interactive shell.
-* `#11198 <https://github.com/numpy/numpy/pull/11198>`__: BUG: optimizing compilers can reorder call to npy_get_floatstatus
-* `#11199 <https://github.com/numpy/numpy/pull/11199>`__: BUG: reduce using SSE only warns if inside SSE loop
-* `#11203 <https://github.com/numpy/numpy/pull/11203>`__: BUG: Bytes delimiter/comments in genfromtxt should be decoded
-* `#11211 <https://github.com/numpy/numpy/pull/11211>`__: BUG: Fix reference count/memory leak exposed by better testing
-* `#11219 <https://github.com/numpy/numpy/pull/11219>`__: BUG: Fixes einsum broadcasting bug when optimize=True
-* `#11251 <https://github.com/numpy/numpy/pull/11251>`__: DOC: Document 1.14.4 release.
+++ /dev/null
-==========================
-NumPy 1.14.5 Release Notes
-==========================
-
-This is a bugfix release for bugs reported following the 1.14.4 release. The
-most significant fixes are:
-
-* fixes for compilation errors on alpine and NetBSD
-
-The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
-3.6 wheels available from PIP are built with Python 3.6.2 and should be
-compatible with all previous versions of Python 3.6. The source releases were
-cythonized with Cython 0.28.2 and should work for the upcoming Python 3.7.
-
-Contributors
-============
-
-A total of 1 person contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Charles Harris
-
-Pull requests merged
-====================
-
-A total of 2 pull requests were merged for this release.
-
-* `#11274 <https://github.com/numpy/numpy/pull/11274>`__: BUG: Correct use of NPY_UNUSED.
-* `#11294 <https://github.com/numpy/numpy/pull/11294>`__: BUG: Remove extra trailing parentheses.
-
+++ /dev/null
-==========================
-NumPy 1.14.6 Release Notes
-==========================
-
-This is a bugfix release for bugs reported following the 1.14.5 release. The
-most significant fixes are:
-
-* Fix for behavior change in ``ma.masked_values(shrink=True)``
-* Fix the new cached allocations machinery to be thread safe.
-
-The Python versions supported in this release are 2.7 and 3.4 - 3.7. The Python
-3.6 wheels on PyPI should be compatible with all Python 3.6 versions.
-
-Contributors
-============
-
-A total of 4 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Charles Harris
-* Eric Wieser
-* Julian Taylor
-* Matti Picus
-
-Pull requests merged
-====================
-
-A total of 4 pull requests were merged for this release.
-
-* `#11985 <https://github.com/numpy/numpy/pull/11985>`__: BUG: fix cached allocations without the GIL
-* `#11986 <https://github.com/numpy/numpy/pull/11986>`__: BUG: Undo behavior change in ma.masked_values(shrink=True)
-* `#11987 <https://github.com/numpy/numpy/pull/11987>`__: BUG: fix refcount leak in PyArray_AdaptFlexibleDType
-* `#11995 <https://github.com/numpy/numpy/pull/11995>`__: TST: Add Python 3.7 testing to NumPy 1.14.
+++ /dev/null
-==========================
-NumPy 1.15.0 Release Notes
-==========================
-
-NumPy 1.15.0 is a release with an unusual number of cleanups, many deprecations
-of old functions, and improvements to many existing functions. Please read the
-detailed descriptions below to see if you are affected.
-
-For testing, we have switched to pytest as a replacement for the no longer
-maintained nose framework. The old nose based interface remains for downstream
-projects who may still be using it.
-
-The Python versions supported by this release are 2.7, 3.4-3.7. The wheels are
-linked with OpenBLAS v0.3.0, which should fix some of the linalg problems
-reported for NumPy 1.14.
-
-
-Highlights
-==========
-
-* NumPy has switched to pytest for testing.
-* A new `numpy.printoptions` context manager.
-* Many improvements to the histogram functions.
-* Support for unicode field names in python 2.7.
-* Improved support for PyPy.
-* Fixes and improvements to `numpy.einsum`.
-
-
-New functions
-=============
-
-* `numpy.gcd` and `numpy.lcm`, to compute the greatest common divisor and least
- common multiple.
-
-* `numpy.ma.stack`, the `numpy.stack` array-joining function generalized to
- masked arrays.
-
-* `numpy.quantile` function, an interface to ``percentile`` without factors of
- 100
-
-* `numpy.nanquantile` function, an interface to ``nanpercentile`` without
- factors of 100
-
-* `numpy.printoptions`, a context manager that sets print options temporarily
- for the scope of the ``with`` block::
-
- >>> with np.printoptions(precision=2):
- ... print(np.array([2.0]) / 3)
- [0.67]
-
-* `numpy.histogram_bin_edges`, a function to get the edges of the bins used by a
- histogram without needing to calculate the histogram.
-
-* C functions `npy_get_floatstatus_barrier` and `npy_clear_floatstatus_barrier`
- have been added to deal with compiler optimization changing the order of
- operations. See below for details.
-
-
-Deprecations
-============
-
-* Aliases of builtin `pickle` functions are deprecated, in favor of their
- unaliased ``pickle.<func>`` names:
-
- * `numpy.loads`
- * `numpy.core.numeric.load`
- * `numpy.core.numeric.loads`
- * `numpy.ma.loads`, `numpy.ma.dumps`
- * `numpy.ma.load`, `numpy.ma.dump` - these functions already failed on
- python 3 when called with a string.
-
-* Multidimensional indexing with anything but a tuple is deprecated. This means
- that the index list in ``ind = [slice(None), 0]; arr[ind]`` should be changed
- to a tuple, e.g., ``ind = [slice(None), 0]; arr[tuple(ind)]`` or
- ``arr[(slice(None), 0)]``. That change is necessary to avoid ambiguity in
- expressions such as ``arr[[[0, 1], [0, 1]]]``, currently interpreted as
- ``arr[array([0, 1]), array([0, 1])]``, that will be interpreted
- as ``arr[array([[0, 1], [0, 1]])]`` in the future.
-
-* Imports from the following sub-modules are deprecated, they will be removed
- at some future date.
-
- * `numpy.testing.utils`
- * `numpy.testing.decorators`
- * `numpy.testing.nosetester`
- * `numpy.testing.noseclasses`
- * `numpy.core.umath_tests`
-
-* Giving a generator to `numpy.sum` is now deprecated. This was undocumented
- behavior, but worked. Previously, it would calculate the sum of the generator
- expression. In the future, it might return a different result. Use
- ``np.sum(np.from_iter(generator))`` or the built-in Python ``sum`` instead.
-
-* Users of the C-API should call ``PyArrayResolveWriteBackIfCopy`` or
- ``PyArray_DiscardWritbackIfCopy`` on any array with the ``WRITEBACKIFCOPY``
- flag set, before deallocating the array. A deprecation warning will be
- emitted if those calls are not used when needed.
-
-* Users of ``nditer`` should use the nditer object as a context manager
- anytime one of the iterator operands is writeable, so that numpy can
- manage writeback semantics, or should call ``it.close()``. A
- `RuntimeWarning` may be emitted otherwise in these cases.
-
-* The ``normed`` argument of ``np.histogram``, deprecated long ago in 1.6.0,
- now emits a ``DeprecationWarning``.
-
-
-Future Changes
-==============
-
-* NumPy 1.16 will drop support for Python 3.4.
-* NumPy 1.17 will drop support for Python 2.7.
-
-
-Compatibility notes
-===================
-
-Compiled testing modules renamed and made private
--------------------------------------------------
-The following compiled modules have been renamed and made private:
-
-* ``umath_tests`` -> ``_umath_tests``
-* ``test_rational`` -> ``_rational_tests``
-* ``multiarray_tests`` -> ``_multiarray_tests``
-* ``struct_ufunc_test`` -> ``_struct_ufunc_tests``
-* ``operand_flag_tests`` -> ``_operand_flag_tests``
-
-The ``umath_tests`` module is still available for backwards compatibility, but
-will be removed in the future.
-
-The ``NpzFile`` returned by ``np.savez`` is now a ``collections.abc.Mapping``
------------------------------------------------------------------------------
-This means it behaves like a readonly dictionary, and has a new ``.values()``
-method and ``len()`` implementation.
-
-For python 3, this means that ``.iteritems()``, ``.iterkeys()`` have been
-deprecated, and ``.keys()`` and ``.items()`` now return views and not lists.
-This is consistent with how the builtin ``dict`` type changed between python 2
-and python 3.
-
-Under certain conditions, ``nditer`` must be used in a context manager
-----------------------------------------------------------------------
-When using an `numpy.nditer` with the ``"writeonly"`` or ``"readwrite"`` flags, there
-are some circumstances where nditer doesn't actually give you a view of the
-writable array. Instead, it gives you a copy, and if you make changes to the
-copy, nditer later writes those changes back into your actual array. Currently,
-this writeback occurs when the array objects are garbage collected, which makes
-this API error-prone on CPython and entirely broken on PyPy. Therefore,
-``nditer`` should now be used as a context manager whenever it is used
-with writeable arrays, e.g., ``with np.nditer(...) as it: ...``. You may also
-explicitly call ``it.close()`` for cases where a context manager is unusable,
-for instance in generator expressions.
-
-Numpy has switched to using pytest instead of nose for testing
---------------------------------------------------------------
-The last nose release was 1.3.7 in June, 2015, and development of that tool has
-ended, consequently NumPy has now switched to using pytest. The old decorators
-and nose tools that were previously used by some downstream projects remain
-available, but will not be maintained. The standard testing utilities,
-``assert_almost_equal`` and such, are not be affected by this change except for
-the nose specific functions ``import_nose`` and ``raises``. Those functions are
-not used in numpy, but are kept for downstream compatibility.
-
-Numpy no longer monkey-patches ``ctypes`` with ``__array_interface__``
-----------------------------------------------------------------------
-Previously numpy added ``__array_interface__`` attributes to all the integer
-types from ``ctypes``.
-
-``np.ma.notmasked_contiguous`` and ``np.ma.flatnotmasked_contiguous`` always return lists
------------------------------------------------------------------------------------------
-This is the documented behavior, but previously the result could be any of
-slice, None, or list.
-
-All downstream users seem to check for the ``None`` result from
-``flatnotmasked_contiguous`` and replace it with ``[]``. Those callers will
-continue to work as before.
-
-``np.squeeze`` restores old behavior of objects that cannot handle an ``axis`` argument
----------------------------------------------------------------------------------------
-Prior to version ``1.7.0``, `numpy.squeeze` did not have an ``axis`` argument and
-all empty axes were removed by default. The incorporation of an ``axis``
-argument made it possible to selectively squeeze single or multiple empty axes,
-but the old API expectation was not respected because axes could still be
-selectively removed (silent success) from an object expecting all empty axes to
-be removed. That silent, selective removal of empty axes for objects expecting
-the old behavior has been fixed and the old behavior restored.
-
-unstructured void array's ``.item`` method now returns a bytes object
----------------------------------------------------------------------
-``.item`` now returns a ``bytes`` object instead of a buffer or byte array.
-This may affect code which assumed the return value was mutable, which is no
-longer the case.
-
-``copy.copy`` and ``copy.deepcopy`` no longer turn ``masked`` into an array
----------------------------------------------------------------------------
-Since ``np.ma.masked`` is a readonly scalar, copying should be a no-op. These
-functions now behave consistently with ``np.copy()``.
-
-Multifield Indexing of Structured Arrays will still return a copy
------------------------------------------------------------------
-The change that multi-field indexing of structured arrays returns a view
-instead of a copy is pushed back to 1.16. A new method
-``numpy.lib.recfunctions.repack_fields`` has been introduced to help mitigate
-the effects of this change, which can be used to write code compatible with
-both numpy 1.15 and 1.16. For more information on how to update code to account
-for this future change see the "accessing multiple fields" section of the
-`user guide <https://docs.scipy.org/doc/numpy/user/basics.rec.html>`__.
-
-
-C API changes
-=============
-
-New functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier``
------------------------------------------------------------------------------------
-Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier``
-have been added and should be used in place of the ``npy_get_floatstatus``and
-``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang
-were rearranging the order of operations when the previous functions were used
-in the ufunc SIMD functions, resulting in the floatstatus flags being checked
-before the operation whose status we wanted to check was run. See `#10339
-<https://github.com/numpy/numpy/issues/10370>`__.
-
-Changes to ``PyArray_GetDTypeTransferFunction``
------------------------------------------------
-``PyArray_GetDTypeTransferFunction`` now defaults to using user-defined
-``copyswapn`` / ``copyswap`` for user-defined dtypes. If this causes a
-significant performance hit, consider implementing ``copyswapn`` to reflect the
-implementation of ``PyArray_GetStridedCopyFn``. See `#10898
-<https://github.com/numpy/numpy/pull/10898>`__.
-
-
-New Features
-============
-
-``np.gcd`` and ``np.lcm`` ufuncs added for integer and objects types
---------------------------------------------------------------------
-These compute the greatest common divisor, and lowest common multiple,
-respectively. These work on all the numpy integer types, as well as the
-builtin arbitrary-precision ``Decimal`` and ``long`` types.
-
-Support for cross-platform builds for iOS
------------------------------------------
-The build system has been modified to add support for the
-``_PYTHON_HOST_PLATFORM`` environment variable, used by ``distutils`` when
-compiling on one platform for another platform. This makes it possible to
-compile NumPy for iOS targets.
-
-This only enables you to compile NumPy for one specific platform at a time.
-Creating a full iOS-compatible NumPy package requires building for the 5
-architectures supported by iOS (i386, x86_64, armv7, armv7s and arm64), and
-combining these 5 compiled builds products into a single "fat" binary.
-
-``return_indices`` keyword added for ``np.intersect1d``
--------------------------------------------------------
-New keyword ``return_indices`` returns the indices of the two input arrays
-that correspond to the common elements.
-
-``np.quantile`` and ``np.nanquantile``
---------------------------------------
-Like ``np.percentile`` and ``np.nanpercentile``, but takes quantiles in [0, 1]
-rather than percentiles in [0, 100]. ``np.percentile`` is now a thin wrapper
-around ``np.quantile`` with the extra step of dividing by 100.
-
-
-Build system
-------------
-Added experimental support for the 64-bit RISC-V architecture.
-
-
-Improvements
-============
-
-``np.einsum`` updates
----------------------
-Syncs einsum path optimization tech between `numpy` and `opt_einsum`. In
-particular, the `greedy` path has received many enhancements by @jcmgray. A
-full list of issues fixed are:
-
-* Arbitrary memory can be passed into the `greedy` path. Fixes gh-11210.
-* The greedy path has been updated to contain more dynamic programming ideas
- preventing a large number of duplicate (and expensive) calls that figure out
- the actual pair contraction that takes place. Now takes a few seconds on
- several hundred input tensors. Useful for matrix product state theories.
-* Reworks the broadcasting dot error catching found in gh-11218 gh-10352 to be
- a bit earlier in the process.
-* Enhances the `can_dot` functionality that previous missed an edge case (part
- of gh-11308).
-
-``np.ufunc.reduce`` and related functions now accept an initial value
----------------------------------------------------------------------
-``np.ufunc.reduce``, ``np.sum``, ``np.prod``, ``np.min`` and ``np.max`` all
-now accept an ``initial`` keyword argument that specifies the value to start
-the reduction with.
-
-``np.flip`` can operate over multiple axes
-------------------------------------------
-``np.flip`` now accepts None, or tuples of int, in its ``axis`` argument. If
-axis is None, it will flip over all the axes.
-
-``histogram`` and ``histogramdd`` functions have moved to ``np.lib.histograms``
--------------------------------------------------------------------------------
-These were originally found in ``np.lib.function_base``. They are still
-available under their un-scoped ``np.histogram(dd)`` names, and
-to maintain compatibility, aliased at ``np.lib.function_base.histogram(dd)``.
-
-Code that does ``from np.lib.function_base import *`` will need to be updated
-with the new location, and should consider not using ``import *`` in future.
-
-``histogram`` will accept NaN values when explicit bins are given
------------------------------------------------------------------
-Previously it would fail when trying to compute a finite range for the data.
-Since the range is ignored anyway when the bins are given explicitly, this error
-was needless.
-
-Note that calling ``histogram`` on NaN values continues to raise the
-``RuntimeWarning`` s typical of working with nan values, which can be silenced
-as usual with ``errstate``.
-
-``histogram`` works on datetime types, when explicit bin edges are given
-------------------------------------------------------------------------
-Dates, times, and timedeltas can now be histogrammed. The bin edges must be
-passed explicitly, and are not yet computed automatically.
-
-``histogram`` "auto" estimator handles limited variance better
---------------------------------------------------------------
-No longer does an IQR of 0 result in ``n_bins=1``, rather the number of bins
-chosen is related to the data size in this situation.
-
-The edges retuned by `histogram`` and ``histogramdd`` now match the data float type
------------------------------------------------------------------------------------
-When passed ``np.float16``, ``np.float32``, or ``np.longdouble`` data, the
-returned edges are now of the same dtype. Previously, ``histogram`` would only
-return the same type if explicit bins were given, and ``histogram`` would
-produce ``float64`` bins no matter what the inputs.
-
-``histogramdd`` allows explicit ranges to be given in a subset of axes
-----------------------------------------------------------------------
-The ``range`` argument of `numpy.histogramdd` can now contain ``None`` values to
-indicate that the range for the corresponding axis should be computed from the
-data. Previously, this could not be specified on a per-axis basis.
-
-The normed arguments of ``histogramdd`` and ``histogram2d`` have been renamed
------------------------------------------------------------------------------
-These arguments are now called ``density``, which is consistent with
-``histogram``. The old argument continues to work, but the new name should be
-preferred.
-
-``np.r_`` works with 0d arrays, and ``np.ma.mr_`` works with ``np.ma.masked``
------------------------------------------------------------------------------
-0d arrays passed to the `r_` and `mr_` concatenation helpers are now treated as
-though they are arrays of length 1. Previously, passing these was an error.
-As a result, `numpy.ma.mr_` now works correctly on the ``masked`` constant.
-
-``np.ptp`` accepts a ``keepdims`` argument, and extended axis tuples
---------------------------------------------------------------------
-``np.ptp`` (peak-to-peak) can now work over multiple axes, just like ``np.max``
-and ``np.min``.
-
-``MaskedArray.astype`` now is identical to ``ndarray.astype``
--------------------------------------------------------------
-This means it takes all the same arguments, making more code written for
-ndarray work for masked array too.
-
-Enable AVX2/AVX512 at compile time
-----------------------------------
-Change to simd.inc.src to allow use of AVX2 or AVX512 at compile time. Previously
-compilation for avx2 (or 512) with -march=native would still use the SSE
-code for the simd functions even when the rest of the code got AVX2.
-
-``nan_to_num`` always returns scalars when receiving scalar or 0d inputs
-------------------------------------------------------------------------
-Previously an array was returned for integer scalar inputs, which is
-inconsistent with the behavior for float inputs, and that of ufuncs in general.
-For all types of scalar or 0d input, the result is now a scalar.
-
-``np.flatnonzero`` works on numpy-convertible types
----------------------------------------------------
-``np.flatnonzero`` now uses ``np.ravel(a)`` instead of ``a.ravel()``, so it
-works for lists, tuples, etc.
-
-``np.interp`` returns numpy scalars rather than builtin scalars
----------------------------------------------------------------
-Previously ``np.interp(0.5, [0, 1], [10, 20])`` would return a ``float``, but
-now it returns a ``np.float64`` object, which more closely matches the behavior
-of other functions.
-
-Additionally, the special case of ``np.interp(object_array_0d, ...)`` is no
-longer supported, as ``np.interp(object_array_nd)`` was never supported anyway.
-
-As a result of this change, the ``period`` argument can now be used on 0d
-arrays.
-
-Allow dtype field names to be unicode in Python 2
--------------------------------------------------
-Previously ``np.dtype([(u'name', float)])`` would raise a ``TypeError`` in
-Python 2, as only bytestrings were allowed in field names. Now any unicode
-string field names will be encoded with the ``ascii`` codec, raising a
-``UnicodeEncodeError`` upon failure.
-
-This change makes it easier to write Python 2/3 compatible code using
-``from __future__ import unicode_literals``, which previously would cause
-string literal field names to raise a TypeError in Python 2.
-
-Comparison ufuncs accept ``dtype=object``, overriding the default ``bool``
---------------------------------------------------------------------------
-This allows object arrays of symbolic types, which override ``==`` and other
-operators to return expressions, to be compared elementwise with
-``np.equal(a, b, dtype=object)``.
-
-``sort`` functions accept ``kind='stable'``
--------------------------------------------
-Up until now, to perform a stable sort on the data, the user must do:
-
- >>> np.sort([5, 2, 6, 2, 1], kind='mergesort')
- [1, 2, 2, 5, 6]
-
-because merge sort is the only stable sorting algorithm available in
-NumPy. However, having kind='mergesort' does not make it explicit that
-the user wants to perform a stable sort thus harming the readability.
-
-This change allows the user to specify kind='stable' thus clarifying
-the intent.
-
-Do not make temporary copies for in-place accumulation
-------------------------------------------------------
-When ufuncs perform accumulation they no longer make temporary copies because
-of the overlap between input an output, that is, the next element accumulated
-is added before the accumulated result is stored in its place, hence the
-overlap is safe. Avoiding the copy results in faster execution.
-
-``linalg.matrix_power`` can now handle stacks of matrices
----------------------------------------------------------
-Like other functions in ``linalg``, ``matrix_power`` can now deal with arrays
-of dimension larger than 2, which are treated as stacks of matrices. As part
-of the change, to further improve consistency, the name of the first argument
-has been changed to ``a`` (from ``M``), and the exceptions for non-square
-matrices have been changed to ``LinAlgError`` (from ``ValueError``).
-
-Increased performance in ``random.permutation`` for multidimensional arrays
----------------------------------------------------------------------------
-``permutation`` uses the fast path in ``random.shuffle`` for all input
-array dimensions. Previously the fast path was only used for 1-d arrays.
-
-Generalized ufuncs now accept ``axes``, ``axis`` and ``keepdims`` arguments
----------------------------------------------------------------------------
-One can control over which axes a generalized ufunc operates by passing in an
-``axes`` argument, a list of tuples with indices of particular axes. For
-instance, for a signature of ``(i,j),(j,k)->(i,k)`` appropriate for matrix
-multiplication, the base elements are two-dimensional matrices and these are
-taken to be stored in the two last axes of each argument. The corresponding
-axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``. If one wanted to
-use leading dimensions instead, one would pass in ``[(0, 1), (0, 1), (0, 1)]``.
-
-For simplicity, for generalized ufuncs that operate on 1-dimensional arrays
-(vectors), a single integer is accepted instead of a single-element tuple, and
-for generalized ufuncs for which all outputs are scalars, the (empty) output
-tuples can be omitted. Hence, for a signature of ``(i),(i)->()`` appropriate
-for an inner product, one could pass in ``axes=[0, 0]`` to indicate that the
-vectors are stored in the first dimensions of the two inputs arguments.
-
-As a short-cut for generalized ufuncs that are similar to reductions, i.e.,
-that act on a single, shared core dimension such as the inner product example
-above, one can pass an ``axis`` argument. This is equivalent to passing in
-``axes`` with identical entries for all arguments with that core dimension
-(e.g., for the example above, ``axes=[(axis,), (axis,)]``).
-
-Furthermore, like for reductions, for generalized ufuncs that have inputs that
-all have the same number of core dimensions and outputs with no core dimension,
-one can pass in ``keepdims`` to leave a dimension with size 1 in the outputs,
-thus allowing proper broadcasting against the original inputs. The location of
-the extra dimension can be controlled with ``axes``. For instance, for the
-inner-product example, ``keepdims=True, axes=[-2, -2, -2]`` would act on the
-inner-product example, ``keepdims=True, axis=-2`` would act on the
-one-but-last dimension of the input arguments, and leave a size 1 dimension in
-that place in the output.
-
-float128 values now print correctly on ppc systems
---------------------------------------------------
-Previously printing float128 values was buggy on ppc, since the special
-double-double floating-point-format on these systems was not accounted for.
-float128s now print with correct rounding and uniqueness.
-
-Warning to ppc users: You should upgrade glibc if it is version <=2.23,
-especially if using float128. On ppc, glibc's malloc in these version often
-misaligns allocated memory which can crash numpy when using float128 values.
-
-New ``np.take_along_axis`` and ``np.put_along_axis`` functions
---------------------------------------------------------------
-When used on multidimensional arrays, ``argsort``, ``argmin``, ``argmax``, and
-``argpartition`` return arrays that are difficult to use as indices.
-``take_along_axis`` provides an easy way to use these indices to lookup values
-within an array, so that::
-
- np.take_along_axis(a, np.argsort(a, axis=axis), axis=axis)
-
-is the same as::
-
- np.sort(a, axis=axis)
-
-``np.put_along_axis`` acts as the dual operation for writing to these indices
-within an array.
-
+++ /dev/null
-==========================
-NumPy 1.15.1 Release Notes
-==========================
-
-This is a bugfix release for bugs and regressions reported following the 1.15.0
-release.
-
-* The annoying but harmless RuntimeWarning that "numpy.dtype size changed" has
- been suppressed. The long standing suppression was lost in the transition to
- pytest.
-* The update to Cython 0.28.3 exposed a problematic use of a gcc attribute used
- to prefer code size over speed in module initialization, possibly resulting in
- incorrect compiled code. This has been fixed in latest Cython but has been
- disabled here for safety.
-* Support for big-endian and ARMv8 architectures has been improved.
-
-The Python versions supported by this release are 2.7, 3.4-3.7. The wheels are
-linked with OpenBLAS v0.3.0, which should fix some of the linalg problems
-reported for NumPy 1.14.
-
-
-Compatibility Note
-==================
-
-The NumPy 1.15.x OS X wheels released on PyPI no longer contain 32-bit
-binaries. That will also be the case in future releases. See
-`#11625 <https://github.com/numpy/numpy/issues/11625>`__ for the related
-discussion. Those needing 32-bit support should look elsewhere or build
-from source.
-
-
-Contributors
-============
-
-A total of 7 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Charles Harris
-* Chris Billington
-* Elliott Sales de Andrade +
-* Eric Wieser
-* Jeremy Manning +
-* Matti Picus
-* Ralf Gommers
-
-Pull requests merged
-====================
-
-A total of 24 pull requests were merged for this release.
-
-* `#11647 <https://github.com/numpy/numpy/pull/11647>`__: MAINT: Filter Cython warnings in ``__init__.py``
-* `#11648 <https://github.com/numpy/numpy/pull/11648>`__: BUG: Fix doc source links to unwrap decorators
-* `#11657 <https://github.com/numpy/numpy/pull/11657>`__: BUG: Ensure singleton dimensions are not dropped when converting...
-* `#11661 <https://github.com/numpy/numpy/pull/11661>`__: BUG: Warn on Nan in minimum,maximum for scalars
-* `#11665 <https://github.com/numpy/numpy/pull/11665>`__: BUG: cython sometimes emits invalid gcc attribute
-* `#11682 <https://github.com/numpy/numpy/pull/11682>`__: BUG: Fix regression in void_getitem
-* `#11698 <https://github.com/numpy/numpy/pull/11698>`__: BUG: Make matrix_power again work for object arrays.
-* `#11700 <https://github.com/numpy/numpy/pull/11700>`__: BUG: Add missing PyErr_NoMemory after failing malloc
-* `#11719 <https://github.com/numpy/numpy/pull/11719>`__: BUG: Fix undefined functions on big-endian systems.
-* `#11720 <https://github.com/numpy/numpy/pull/11720>`__: MAINT: Make einsum optimize default to False.
-* `#11746 <https://github.com/numpy/numpy/pull/11746>`__: BUG: Fix regression in loadtxt for bz2 text files in Python 2.
-* `#11757 <https://github.com/numpy/numpy/pull/11757>`__: BUG: Revert use of `console_scripts`.
-* `#11758 <https://github.com/numpy/numpy/pull/11758>`__: BUG: Fix Fortran kind detection for aarch64 & s390x.
-* `#11759 <https://github.com/numpy/numpy/pull/11759>`__: BUG: Fix printing of longdouble on ppc64le.
-* `#11760 <https://github.com/numpy/numpy/pull/11760>`__: BUG: Fixes for unicode field names in Python 2
-* `#11761 <https://github.com/numpy/numpy/pull/11761>`__: BUG: Increase required cython version on python 3.7
-* `#11763 <https://github.com/numpy/numpy/pull/11763>`__: BUG: check return value of _buffer_format_string
-* `#11775 <https://github.com/numpy/numpy/pull/11775>`__: MAINT: Make assert_array_compare more generic.
-* `#11776 <https://github.com/numpy/numpy/pull/11776>`__: TST: Fix urlopen stubbing.
-* `#11777 <https://github.com/numpy/numpy/pull/11777>`__: BUG: Fix regression in intersect1d.
-* `#11779 <https://github.com/numpy/numpy/pull/11779>`__: BUG: Fix test sensitive to platform byte order.
-* `#11781 <https://github.com/numpy/numpy/pull/11781>`__: BUG: Avoid signed overflow in histogram
-* `#11785 <https://github.com/numpy/numpy/pull/11785>`__: BUG: Fix pickle and memoryview for datetime64, timedelta64 scalars
-* `#11786 <https://github.com/numpy/numpy/pull/11786>`__: BUG: Deprecation triggers segfault
+++ /dev/null
-==========================
-NumPy 1.15.2 Release Notes
-==========================
-
-This is a bugfix release for bugs and regressions reported following the 1.15.1
-release.
-
-* The matrix PendingDeprecationWarning is now suppressed in pytest 3.8.
-* The new cached allocations machinery has been fixed to be thread safe.
-* The boolean indexing of subclasses now works correctly.
-* A small memory leak in PyArray_AdaptFlexibleDType has been fixed.
-
-The Python versions supported by this release are 2.7, 3.4-3.7. The wheels are
-linked with OpenBLAS v0.3.0, which should fix some of the linalg problems
-reported for NumPy 1.14.
-
-Compatibility Note
-==================
-
-The NumPy 1.15.x OS X wheels released on PyPI no longer contain 32-bit
-binaries. That will also be the case in future releases. See
-`#11625 <https://github.com/numpy/numpy/issues/11625>`__ for the related
-discussion. Those needing 32-bit support should look elsewhere or build
-from source.
-
-Contributors
-============
-
-A total of 4 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Charles Harris
-* Julian Taylor
-* Marten van Kerkwijk
-* Matti Picus
-
-Pull requests merged
-====================
-
-A total of 4 pull requests were merged for this release.
-
-* `#11902 <https://github.com/numpy/numpy/pull/11902>`__: BUG: Fix matrix PendingDeprecationWarning suppression for pytest...
-* `#11981 <https://github.com/numpy/numpy/pull/11981>`__: BUG: fix cached allocations without the GIL for 1.15.x
-* `#11982 <https://github.com/numpy/numpy/pull/11982>`__: BUG: fix refcount leak in PyArray_AdaptFlexibleDType
-* `#11992 <https://github.com/numpy/numpy/pull/11992>`__: BUG: Ensure boolean indexing of subclasses sets base correctly.
+++ /dev/null
-==========================
-NumPy 1.15.3 Release Notes
-==========================
-
-This is a bugfix release for bugs and regressions reported following the 1.15.2
-release. The Python versions supported by this release are 2.7, 3.4-3.7. The
-wheels are linked with OpenBLAS v0.3.0, which should fix some of the linalg
-problems reported for NumPy 1.14.
-
-Compatibility Note
-==================
-
-The NumPy 1.15.x OS X wheels released on PyPI no longer contain 32-bit
-binaries. That will also be the case in future releases. See
-`#11625 <https://github.com/numpy/numpy/issues/11625>`__ for the related
-discussion. Those needing 32-bit support should look elsewhere or build
-from source.
-
-Contributors
-============
-
-A total of 7 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Allan Haldane
-* Charles Harris
-* Jeroen Demeyer
-* Kevin Sheppard
-* Matthew Bowden +
-* Matti Picus
-* Tyler Reddy
-
-Pull requests merged
-====================
-
-A total of 12 pull requests were merged for this release.
-
-* `#12080 <https://github.com/numpy/numpy/pull/12080>`__: MAINT: Blacklist some MSVC complex functions.
-* `#12083 <https://github.com/numpy/numpy/pull/12083>`__: TST: Add azure CI testing to 1.15.x branch.
-* `#12084 <https://github.com/numpy/numpy/pull/12084>`__: BUG: test_path() now uses Path.resolve()
-* `#12085 <https://github.com/numpy/numpy/pull/12085>`__: TST, MAINT: Fix some failing tests on azure-pipelines mac and...
-* `#12187 <https://github.com/numpy/numpy/pull/12187>`__: BUG: Fix memory leak in mapping.c
-* `#12188 <https://github.com/numpy/numpy/pull/12188>`__: BUG: Allow boolean subtract in histogram
-* `#12189 <https://github.com/numpy/numpy/pull/12189>`__: BUG: Fix in-place permutation
-* `#12190 <https://github.com/numpy/numpy/pull/12190>`__: BUG: limit default for get_num_build_jobs() to 8
-* `#12191 <https://github.com/numpy/numpy/pull/12191>`__: BUG: OBJECT_to_* should check for errors
-* `#12192 <https://github.com/numpy/numpy/pull/12192>`__: DOC: Prepare for NumPy 1.15.3 release.
-* `#12237 <https://github.com/numpy/numpy/pull/12237>`__: BUG: Fix MaskedArray fill_value type conversion.
-* `#12238 <https://github.com/numpy/numpy/pull/12238>`__: TST: Backport azure-pipeline testing fixes for Mac
+++ /dev/null
-==========================
-NumPy 1.15.4 Release Notes
-==========================
-
-This is a bugfix release for bugs and regressions reported following the 1.15.3
-release. The Python versions supported by this release are 2.7, 3.4-3.7. The
-wheels are linked with OpenBLAS v0.3.0, which should fix some of the linalg
-problems reported for NumPy 1.14.
-
-Compatibility Note
-==================
-
-The NumPy 1.15.x OS X wheels released on PyPI no longer contain 32-bit
-binaries. That will also be the case in future releases. See
-`#11625 <https://github.com/numpy/numpy/issues/11625>`__ for the related
-discussion. Those needing 32-bit support should look elsewhere or build
-from source.
-
-Contributors
-============
-
-A total of 4 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Charles Harris
-* Matti Picus
-* Sebastian Berg
-* bbbbbbbbba +
-
-Pull requests merged
-====================
-
-A total of 4 pull requests were merged for this release.
-
-* `#12296 <https://github.com/numpy/numpy/pull/12296>`__: BUG: Dealloc cached buffer info
-* `#12297 <https://github.com/numpy/numpy/pull/12297>`__: BUG: Fix fill value in masked array '==' and '!=' ops.
-* `#12307 <https://github.com/numpy/numpy/pull/12307>`__: DOC: Correct the default value of `optimize` in `numpy.einsum`
-* `#12320 <https://github.com/numpy/numpy/pull/12320>`__: REL: Prepare for the NumPy 1.15.4 release
+++ /dev/null
-==========================
-NumPy 1.16.0 Release Notes
-==========================
-
-This NumPy release is the last one to support Python 2.7 and will be maintained
-as a long term release with bug fixes until 2020. Support for Python 3.4 been
-dropped, the supported Python versions are 2.7 and 3.5-3.7. The wheels on PyPI
-are linked with OpenBLAS v0.3.4+, which should fix the known threading issues
-found in previous OpenBLAS versions.
-
-Downstream developers building this release should use Cython >= 0.29 and, if
-using OpenBLAS, OpenBLAS > v0.3.4.
-
-This release has seen a lot of refactoring and features many bug fixes, improved
-code organization, and better cross platform compatibility. Not all of these
-improvements will be visible to users, but they should help make maintenance
-easier going forward.
-
-
-Highlights
-==========
-
-* Experimental (opt-in only) support for overriding numpy functions,
- see ``__array_function__`` below.
-
-* The ``matmul`` function is now a ufunc. This provides better
- performance and allows overriding with ``__array_ufunc__``.
-
-* Improved support for the ARM and POWER architectures.
-
-* Improved support for AIX and PyPy.
-
-* Improved interop with ctypes.
-
-* Improved support for PEP 3118.
-
-
-
-New functions
-=============
-
-* New functions added to the `numpy.lib.recfuntions` module to ease the
- structured assignment changes:
-
- * ``assign_fields_by_name``
- * ``structured_to_unstructured``
- * ``unstructured_to_structured``
- * ``apply_along_fields``
- * ``require_fields``
-
- See the user guide at <https://docs.scipy.org/doc/numpy/user/basics.rec.html>
- for more info.
-
-
-New deprecations
-================
-
-* The type dictionaries `numpy.core.typeNA` and `numpy.core.sctypeNA` are
- deprecated. They were buggy and not documented and will be removed in the
- 1.18 release. Use`numpy.sctypeDict` instead.
-
-* The `numpy.asscalar` function is deprecated. It is an alias to the more
- powerful `numpy.ndarray.item`, not tested, and fails for scalars.
-
-* The `numpy.set_array_ops` and `numpy.get_array_ops` functions are deprecated.
- As part of `NEP 15`, they have been deprecated along with the C-API functions
- :c:func:`PyArray_SetNumericOps` and :c:func:`PyArray_GetNumericOps`. Users
- who wish to override the inner loop functions in built-in ufuncs should use
- :c:func:`PyUFunc_ReplaceLoopBySignature`.
-
-* The `numpy.unravel_index` keyword argument ``dims`` is deprecated, use
- ``shape`` instead.
-
-* The `numpy.histogram` ``normed`` argument is deprecated. It was deprecated
- previously, but no warning was issued.
-
-* The ``positive`` operator (``+``) applied to non-numerical arrays is
- deprecated. See below for details.
-
-* Passing an iterator to the stack functions is deprecated
-
-
-Expired deprecations
-====================
-
-* NaT comparisons now return ``False`` without a warning, finishing a
- deprecation cycle begun in NumPy 1.11.
-
-* ``np.lib.function_base.unique`` was removed, finishing a deprecation cycle
- begun in NumPy 1.4. Use `numpy.unique` instead.
-
-* multi-field indexing now returns views instead of copies, finishing a
- deprecation cycle begun in NumPy 1.7. The change was previously attempted in
- NumPy 1.14 but reverted until now.
-
-* ``np.PackageLoader`` and ``np.pkgload`` have been removed. These were
- deprecated in 1.10, had no tests, and seem to no longer work in 1.15.
-
-
-Future changes
-==============
-
-* NumPy 1.17 will drop support for Python 2.7.
-
-
-Compatibility notes
-===================
-
-f2py script on Windows
-----------------------
-On Windows, the installed script for running f2py is now an ``.exe`` file
-rather than a ``*.py`` file and should be run from the command line as ``f2py``
-whenever the ``Scripts`` directory is in the path. Running ``f2py`` as a module
-``python -m numpy.f2py [...]`` will work without path modification in any
-version of NumPy.
-
-NaT comparisons
----------------
-Consistent with the behavior of NaN, all comparisons other than inequality
-checks with datetime64 or timedelta64 NaT ("not-a-time") values now always
-return ``False``, and inequality checks with NaT now always return ``True``.
-This includes comparisons beteween NaT values. For compatibility with the
-old behavior, use ``np.isnat`` to explicitly check for NaT or convert
-datetime64/timedelta64 arrays with ``.astype(np.int64)`` before making
-comparisons.
-
-complex64/128 alignment has changed
------------------------------------
-The memory alignment of complex types is now the same as a C-struct composed of
-two floating point values, while before it was equal to the size of the type.
-For many users (for instance on x64/unix/gcc) this means that complex64 is now
-4-byte aligned instead of 8-byte aligned. An important consequence is that
-aligned structured dtypes may now have a different size. For instance,
-``np.dtype('c8,u1', align=True)`` used to have an itemsize of 16 (on x64/gcc)
-but now it is 12.
-
-More in detail, the complex64 type now has the same alignment as a C-struct
-``struct {float r, i;}``, according to the compiler used to compile numpy, and
-similarly for the complex128 and complex256 types.
-
-nd_grid __len__ removal
------------------------
-``len(np.mgrid)`` and ``len(np.ogrid)`` are now considered nonsensical
-and raise a ``TypeError``.
-
-``np.unravel_index`` now accepts ``shape`` keyword argument
------------------------------------------------------------
-Previously, only the ``dims`` keyword argument was accepted
-for specification of the shape of the array to be used
-for unraveling. ``dims`` remains supported, but is now deprecated.
-
-multi-field views return a view instead of a copy
--------------------------------------------------
-Indexing a structured array with multiple fields, e.g., ``arr[['f1', 'f3']]``,
-returns a view into the original array instead of a copy. The returned view
-will often have extra padding bytes corresponding to intervening fields in the
-original array, unlike before, which will affect code such as
-``arr[['f1', 'f3']].view('float64')``. This change has been planned since numpy
-1.7. Operations hitting this path have emitted ``FutureWarnings`` since then.
-Additional ``FutureWarnings`` about this change were added in 1.12.
-
-To help users update their code to account for these changes, a number of
-functions have been added to the ``numpy.lib.recfunctions`` module which
-safely allow such operations. For instance, the code above can be replaced
-with ``structured_to_unstructured(arr[['f1', 'f3']], dtype='float64')``.
-See the "accessing multiple fields" section of the
-`user guide <https://docs.scipy.org/doc/numpy/user/basics.rec.html#accessing-multiple-fields>`__.
-
-
-C API changes
-=============
-
-The :c:data:`NPY_API_VERSION` was incremented to 0x0000D, due to the addition
-of:
-
-* :c:member:`PyUFuncObject.core_dim_flags`
-* :c:member:`PyUFuncObject.core_dim_sizes`
-* :c:member:`PyUFuncObject.identity_value`
-* :c:func:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity`
-
-
-New Features
-============
-
-Integrated squared error (ISE) estimator added to ``histogram``
----------------------------------------------------------------
-This method (``bins='stone'``) for optimizing the bin number is a
-generalization of the Scott's rule. The Scott's rule assumes the distribution
-is approximately Normal, while the ISE_ is a non-parametric method based on
-cross-validation.
-
-.. _ISE: https://en.wikipedia.org/wiki/Histogram#Minimizing_cross-validation_estimated_squared_error
-
-``max_rows`` keyword added for ``np.loadtxt``
----------------------------------------------
-New keyword ``max_rows`` in `numpy.loadtxt` sets the maximum rows of the
-content to be read after ``skiprows``, as in `numpy.genfromtxt`.
-
-modulus operator support added for ``np.timedelta64`` operands
---------------------------------------------------------------
-The modulus (remainder) operator is now supported for two operands
-of type ``np.timedelta64``. The operands may have different units
-and the return value will match the type of the operands.
-
-
-Improvements
-============
-
-no-copy pickling of numpy arrays
---------------------------------
-Up to protocol 4, numpy array pickling created 2 spurious copies of the data
-being serialized. With pickle protocol 5, and the ``PickleBuffer`` API, a
-large variety of numpy arrays can now be serialized without any copy using
-out-of-band buffers, and with one less copy using in-band buffers. This
-results, for large arrays, in an up to 66% drop in peak memory usage.
-
-build shell independence
-------------------------
-NumPy builds should no longer interact with the host machine
-shell directly. ``exec_command`` has been replaced with
-``subprocess.check_output`` where appropriate.
-
-`np.polynomial.Polynomial` classes render in LaTeX in Jupyter notebooks
------------------------------------------------------------------------
-When used in a front-end that supports it, `Polynomial` instances are now
-rendered through LaTeX. The current format is experimental, and is subject to
-change.
-
-``randint`` and ``choice`` now work on empty distributions
-----------------------------------------------------------
-Even when no elements needed to be drawn, ``np.random.randint`` and
-``np.random.choice`` raised an error when the arguments described an empty
-distribution. This has been fixed so that e.g.
-``np.random.choice([], 0) == np.array([], dtype=float64)``.
-
-``linalg.lstsq``, ``linalg.qr``, and ``linalg.svd`` now work with empty arrays
-------------------------------------------------------------------------------
-Previously, a ``LinAlgError`` would be raised when an empty matrix/empty
-matrices (with zero rows and/or columns) is/are passed in. Now outputs of
-appropriate shapes are returned.
-
-Chain exceptions to give better error messages for invalid PEP3118 format strings
----------------------------------------------------------------------------------
-This should help track down problems.
-
-Einsum optimization path updates and efficiency improvements
-------------------------------------------------------------
-Einsum was synchronized with the current upstream work.
-
-`numpy.angle` and `numpy.expand_dims` now work on ``ndarray`` subclasses
-------------------------------------------------------------------------
-In particular, they now work for masked arrays.
-
-``NPY_NO_DEPRECATED_API`` compiler warning suppression
-------------------------------------------------------
-Setting ``NPY_NO_DEPRECATED_API`` to a value of 0 will suppress the current compiler
-warnings when the deprecated numpy API is used.
-
-``np.diff`` Added kwargs prepend and append
--------------------------------------------
-New kwargs ``prepend`` and ``append``, allow for values to be inserted on
-either end of the differences. Similar to options for `ediff1d`. Now the
-inverse of `cumsum` can be obtained easily via ``prepend=0``.
-
-ARM support updated
--------------------
-Support for ARM CPUs has been updated to accommodate 32 and 64 bit targets,
-and also big and little endian byte ordering. AARCH32 memory alignment issues
-have been addressed. CI testing has been expanded to include AARCH64 targets
-via the services of shippable.com.
-
-Appending to build flags
-------------------------
-`numpy.distutils` has always overridden rather than appended to `LDFLAGS` and
-other similar such environment variables for compiling Fortran extensions.
-Now, if the `NPY_DISTUTILS_APPEND_FLAGS` environment variable is set to 1, the
-behavior will be appending. This applied to: `LDFLAGS`, `F77FLAGS`,
-`F90FLAGS`, `FREEFLAGS`, `FOPT`, `FDEBUG`, and `FFLAGS`. See gh-11525 for more
-details.
-
-Generalized ufunc signatures now allow fixed-size dimensions
-------------------------------------------------------------
-By using a numerical value in the signature of a generalized ufunc, one can
-indicate that the given function requires input or output to have dimensions
-with the given size. E.g., the signature of a function that converts a polar
-angle to a two-dimensional cartesian unit vector would be ``()->(2)``; that
-for one that converts two spherical angles to a three-dimensional unit vector
-would be ``(),()->(3)``; and that for the cross product of two
-three-dimensional vectors would be ``(3),(3)->(3)``.
-
-Note that to the elementary function these dimensions are not treated any
-differently from variable ones indicated with a name starting with a letter;
-the loop still is passed the corresponding size, but it can now count on that
-size being equal to the fixed one given in the signature.
-
-Generalized ufunc signatures now allow flexible dimensions
-----------------------------------------------------------
-Some functions, in particular numpy's implementation of ``@`` as ``matmul``,
-are very similar to generalized ufuncs in that they operate over core
-dimensions, but one could not present them as such because they were able to
-deal with inputs in which a dimension is missing. To support this, it is now
-allowed to postfix a dimension name with a question mark to indicate that the
-dimension does not necessarily have to be present.
-
-With this addition, the signature for ``matmul`` can be expressed as
-``(m?,n),(n,p?)->(m?,p?)``. This indicates that if, e.g., the second operand
-has only one dimension, for the purposes of the elementary function it will be
-treated as if that input has core shape ``(n, 1)``, and the output has the
-corresponding core shape of ``(m, 1)``. The actual output array, however, has
-the flexible dimension removed, i.e., it will have shape ``(..., m)``.
-Similarly, if both arguments have only a single dimension, the inputs will be
-presented as having shapes ``(1, n)`` and ``(n, 1)`` to the elementary
-function, and the output as ``(1, 1)``, while the actual output array returned
-will have shape ``()``. In this way, the signature allows one to use a
-single elementary function for four related but different signatures,
-``(m,n),(n,p)->(m,p)``, ``(n),(n,p)->(p)``, ``(m,n),(n)->(m)`` and
-``(n),(n)->()``.
-
-``np.clip`` and the ``clip`` method check for memory overlap
-------------------------------------------------------------
-The ``out`` argument to these functions is now always tested for memory overlap
-to avoid corrupted results when memory overlap occurs.
-
-New value ``unscaled`` for option ``cov`` in ``np.polyfit``
------------------------------------------------------------
-A further possible value has been added to the ``cov`` parameter of the
-``np.polyfit`` function. With ``cov='unscaled'`` the scaling of the covariance
-matrix is disabled completely (similar to setting ``absolute_sigma=True`` in
-``scipy.optimize.curve_fit``). This would be useful in occasions, where the
-weights are given by 1/sigma with sigma being the (known) standard errors of
-(Gaussian distributed) data points, in which case the unscaled matrix is
-already a correct estimate for the covariance matrix.
-
-Detailed docstrings for scalar numeric types
---------------------------------------------
-The ``help`` function, when applied to numeric types such as `numpy.intc`,
-`numpy.int_`, and `numpy.longlong`, now lists all of the aliased names for that
-type, distinguishing between platform -dependent and -independent aliases.
-
-``__module__`` attribute now points to public modules
------------------------------------------------------
-The ``__module__`` attribute on most NumPy functions has been updated to refer
-to the preferred public module from which to access a function, rather than
-the module in which the function happens to be defined. This produces more
-informative displays for functions in tools such as IPython, e.g., instead of
-``<function 'numpy.core.fromnumeric.sum'>`` you now see
-``<function 'numpy.sum'>``.
-
-Large allocations marked as suitable for transparent hugepages
---------------------------------------------------------------
-On systems that support transparent hugepages over the madvise system call
-numpy now marks that large memory allocations can be backed by hugepages which
-reduces page fault overhead and can in some fault heavy cases improve
-performance significantly. On Linux the setting for huge pages to be used,
-`/sys/kernel/mm/transparent_hugepage/enabled`, must be at least `madvise`.
-Systems which already have it set to `always` will not see much difference as
-the kernel will automatically use huge pages where appropriate.
-
-Users of very old Linux kernels (~3.x and older) should make sure that
-`/sys/kernel/mm/transparent_hugepage/defrag` is not set to `always` to avoid
-performance problems due concurrency issues in the memory defragmentation.
-
-Alpine Linux (and other musl c library distros) support
--------------------------------------------------------
-We now default to use `fenv.h` for floating point status error reporting.
-Previously we had a broken default that sometimes would not report underflow,
-overflow, and invalid floating point operations. Now we can support non-glibc
-distrubutions like Alpine Linux as long as they ship `fenv.h`.
-
-Speedup ``np.block`` for large arrays
--------------------------------------
-Large arrays (greater than ``512 * 512``) now use a blocking algorithm based on
-copying the data directly into the appropriate slice of the resulting array.
-This results in significant speedups for these large arrays, particularly for
-arrays being blocked along more than 2 dimensions.
-
-``arr.ctypes.data_as(...)`` holds a reference to arr
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Previously the caller was responsible for keeping the array alive for the
-lifetime of the pointer.
-
-Speedup ``np.take`` for read-only arrays
-----------------------------------------
-The implementation of ``np.take`` no longer makes an unnecessary copy of the
-source array when its ``writeable`` flag is set to ``False``.
-
-Support path-like objects for more functions
---------------------------------------------
-The ``np.core.records.fromfile`` function now supports ``pathlib.Path``
-and other path-like objects in addition to a file object. Furthermore, the
-``np.load`` function now also supports path-like objects when using memory
-mapping (``mmap_mode`` keyword argument).
-
-Better behaviour of ufunc identities during reductions
-------------------------------------------------------
-Universal functions have an ``.identity`` which is used when ``.reduce`` is
-called on an empty axis.
-
-As of this release, the logical binary ufuncs, `logical_and`, `logical_or`,
-and `logical_xor`, now have ``identity`` s of type `bool`, where previously they
-were of type `int`. This restores the 1.14 behavior of getting ``bool`` s when
-reducing empty object arrays with these ufuncs, while also keeping the 1.15
-behavior of getting ``int`` s when reducing empty object arrays with arithmetic
-ufuncs like ``add`` and ``multiply``.
-
-Additionally, `logaddexp` now has an identity of ``-inf``, allowing it to be
-called on empty sequences, where previously it could not be.
-
-This is possible thanks to the new
-:c:func:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity`, which allows
-arbitrary values to be used as identities now.
-
-Improved conversion from ctypes objects
----------------------------------------
-Numpy has always supported taking a value or type from ``ctypes`` and
-converting it into an array or dtype, but only behaved correctly for simpler
-types. As of this release, this caveat is lifted - now:
-
-* The ``_pack_`` attribute of ``ctypes.Structure``, used to emulate C's
- ``__attribute__((packed))``, is respected.
-* Endianness of all ctypes objects is preserved
-* ``ctypes.Union`` is supported
-* Non-representable constructs raise exceptions, rather than producing
- dangerously incorrect results:
-
- * Bitfields are no longer interpreted as sub-arrays
- * Pointers are no longer replaced with the type that they point to
-
-A new ``ndpointer.contents`` member
------------------------------------
-This matches the ``.contents`` member of normal ctypes arrays, and can be used
-to construct an ``np.array`` around the pointers contents. This replaces
-``np.array(some_nd_pointer)``, which stopped working in 1.15. As a side effect
-of this change, ``ndpointer`` now supports dtypes with overlapping fields and
-padding.
-
-``matmul`` is now a ``ufunc``
------------------------------
-`numpy.matmul` is now a ufunc which means that both the function and the
-``__matmul__`` operator can now be overridden by ``__array_ufunc__``. Its
-implementation has also changed. It uses the same BLAS routines as
-`numpy.dot`, ensuring its performance is similar for large matrices.
-
-Start and stop arrays for ``linspace``, ``logspace`` and ``geomspace``
-----------------------------------------------------------------------
-These functions used to be limited to scalar stop and start values, but can
-now take arrays, which will be properly broadcast and result in an output
-which has one axis prepended. This can be used, e.g., to obtain linearly
-interpolated points between sets of points.
-
-CI extended with additional services
-------------------------------------
-We now use additional free CI services, thanks to the companies that provide:
-
-* Codecoverage testing via codecov.io
-* Arm testing via shippable.com
-* Additional test runs on azure pipelines
-
-These are in addition to our continued use of travis, appveyor (for wheels) and
-LGTM
-
-
-Changes
-=======
-
-Comparison ufuncs will now error rather than return NotImplemented
-------------------------------------------------------------------
-Previously, comparison ufuncs such as ``np.equal`` would return
-`NotImplemented` if their arguments had structured dtypes, to help comparison
-operators such as ``__eq__`` deal with those. This is no longer needed, as the
-relevant logic has moved to the comparison operators proper (which thus do
-continue to return `NotImplemented` as needed). Hence, like all other ufuncs,
-the comparison ufuncs will now error on structured dtypes.
-
-Positive will now raise a deprecation warning for non-numerical arrays
-----------------------------------------------------------------------
-Previously, ``+array`` unconditionally returned a copy. Now, it will
-raise a ``DeprecationWarning`` if the array is not numerical (i.e.,
-if ``np.positive(array)`` raises a ``TypeError``. For ``ndarray``
-subclasses that override the default ``__array_ufunc__`` implementation,
-the ``TypeError`` is passed on.
-
-``NDArrayOperatorsMixin`` now implements matrix multiplication
---------------------------------------------------------------
-Previously, ``np.lib.mixins.NDArrayOperatorsMixin`` did not implement the
-special methods for Python's matrix multiplication operator (``@``). This has
-changed now that ``matmul`` is a ufunc and can be overridden using
-``__array_ufunc__``.
-
-The scaling of the covariance matrix in ``np.polyfit`` is different
--------------------------------------------------------------------
-So far, ``np.polyfit`` used a non-standard factor in the scaling of the the
-covariance matrix. Namely, rather than using the standard ``chisq/(M-N)``, it
-scaled it with ``chisq/(M-N-2)`` where M is the number of data points and N is the
-number of parameters. This scaling is inconsistent with other fitting programs
-such as e.g. ``scipy.optimize.curve_fit`` and was changed to ``chisq/(M-N)``.
-
-``maximum`` and ``minimum`` no longer emit warnings
----------------------------------------------------
-As part of code introduced in 1.10, ``float32`` and ``float64`` set invalid
-float status when a Nan is encountered in `numpy.maximum` and `numpy.minimum`,
-when using SSE2 semantics. This caused a `RuntimeWarning` to sometimes be
-emitted. In 1.15 we fixed the inconsistencies which caused the warnings to
-become more conspicuous. Now no warnings will be emitted.
-
-Umath and multiarray c-extension modules merged into a single module
---------------------------------------------------------------------
-The two modules were merged, according to `NEP 15`_. Previously `np.core.umath`
-and `np.core.multiarray` were seperate c-extension modules. They are now python
-wrappers to the single `np.core/_multiarray_math` c-extension module.
-
-.. _`NEP 15` : http://www.numpy.org/neps/nep-0015-merge-multiarray-umath.html
-
-``getfield`` validity checks extended
--------------------------------------
-`numpy.ndarray.getfield` now checks the dtype and offset arguments to prevent
-accessing invalid memory locations.
-
-NumPy functions now support overrides with ``__array_function__``
------------------------------------------------------------------
-NumPy has a new experimental mechanism for overriding the implementation of
-almost all NumPy functions on non-NumPy arrays by defining an
-``__array_function__`` method, as described in `NEP 18`_.
-
-This feature is not yet been enabled by default, but has been released to
-facilitate experimentation by potential users. See the NEP for details on
-setting the appropriate environment variable. We expect the NumPy 1.17 release
-will enable overrides by default, which will also be more performant due to a
-new implementation written in C.
-
-.. _`NEP 18` : http://www.numpy.org/neps/nep-0018-array-function-protocol.html
-
-Arrays based off readonly buffers cannot be set ``writeable``
--------------------------------------------------------------
-We now disallow setting the ``writeable`` flag True on arrays created
-from ``fromstring(readonly-buffer)``.
+++ /dev/null
-==========================
-NumPy 1.16.1 Release Notes
-==========================
-
-The NumPy 1.16.1 release fixes bugs reported against the 1.16.0 release, and
-also backports several enhancements from master that seem appropriate for a
-release series that is the last to support Python 2.7. The wheels on PyPI are
-linked with OpenBLAS v0.3.4+, which should fix the known threading issues
-found in previous OpenBLAS versions.
-
-Downstream developers building this release should use Cython >= 0.29.2 and, if
-using OpenBLAS, OpenBLAS > v0.3.4.
-
-If you are installing using pip, you may encounter a problem with older
-installed versions of NumPy that pip did not delete becoming mixed with the
-current version, resulting in an ``ImportError``. That problem is particularly
-common on Debian derived distributions due to a modified pip. The fix is to
-make sure all previous NumPy versions installed by pip have been removed. See
-`#12736 <https://github.com/numpy/numpy/issues/12736>`__ for discussion of the
-issue. Note that previously this problem resulted in an ``AttributeError``.
-
-
-Contributors
-============
-
-A total of 16 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Antoine Pitrou
-* Arcesio Castaneda Medina +
-* Charles Harris
-* Chris Markiewicz +
-* Christoph Gohlke
-* Christopher J. Markiewicz +
-* Daniel Hrisca +
-* EelcoPeacs +
-* Eric Wieser
-* Kevin Sheppard
-* Matti Picus
-* OBATA Akio +
-* Ralf Gommers
-* Sebastian Berg
-* Stephan Hoyer
-* Tyler Reddy
-
-
-Enhancements
-============
-
-* `#12767 <https://github.com/numpy/numpy/pull/12767>`__: ENH: add mm->q floordiv
-* `#12768 <https://github.com/numpy/numpy/pull/12768>`__: ENH: port np.core.overrides to C for speed
-* `#12769 <https://github.com/numpy/numpy/pull/12769>`__: ENH: Add np.ctypeslib.as_ctypes_type(dtype), improve `np.ctypeslib.as_ctypes`
-* `#12773 <https://github.com/numpy/numpy/pull/12773>`__: ENH: add "max difference" messages to np.testing.assert_array_equal...
-* `#12820 <https://github.com/numpy/numpy/pull/12820>`__: ENH: Add mm->qm divmod
-* `#12890 <https://github.com/numpy/numpy/pull/12890>`__: ENH: add _dtype_ctype to namespace for freeze analysis
-
-
-Compatibility notes
-===================
-
-* The changed error message emited by array comparison testing functions may
- affect doctests. See below for detail.
-
-* Casting from double and single denormals to float16 has been corrected. In
- some rare cases, this may result in results being rounded up instead of down,
- changing the last bit (ULP) of the result.
-
-
-New Features
-============
-
-divmod operation is now supported for two ``timedelta64`` operands
-------------------------------------------------------------------
-The divmod operator now handles two ``np.timedelta64`` operands, with
-type signature ``mm->qm``.
-
-
-Improvements
-============
-
-Further improvements to ``ctypes`` support in ``np.ctypeslib``
---------------------------------------------------------------
-A new `numpy.ctypeslib.as_ctypes_type` function has been added, which can be
-used to converts a `dtype` into a best-guess `ctypes` type. Thanks to this
-new function, `numpy.ctypeslib.as_ctypes` now supports a much wider range of
-array types, including structures, booleans, and integers of non-native
-endianness.
-
-Array comparison assertions include maximum differences
--------------------------------------------------------
-Error messages from array comparison tests such as
-`np.testing.assert_allclose` now include "max absolute difference" and
-"max relative difference," in addition to the previous "mismatch" percentage.
-This information makes it easier to update absolute and relative error
-tolerances.
-
-
-Changes
-=======
-
-``timedelta64 % 0`` behavior adjusted to return ``NaT``
--------------------------------------------------------
-The modulus operation with two ``np.timedelta64`` operands now returns
-``NaT`` in the case of division by zero, rather than returning zero
-
-
-
+++ /dev/null
-==========================
-NumPy 1.16.2 Release Notes
-==========================
-
-NumPy 1.16.2 is a quick release fixing several problems encountered on Windows.
-The Python versions supported are 2.7 and 3.5-3.7. The Windows problems
-addressed are:
-
-- DLL load problems for NumPy wheels on Windows,
-- distutils command line parsing on Windows.
-
-There is also a regression fix correcting signed zeros produced by divmod, see
-below for details.
-
-Downstream developers building this release should use Cython >= 0.29.2 and, if
-using OpenBLAS, OpenBLAS > v0.3.4.
-
-If you are installing using pip, you may encounter a problem with older
-installed versions of NumPy that pip did not delete becoming mixed with the
-current version, resulting in an ``ImportError``. That problem is particularly
-common on Debian derived distributions due to a modified pip. The fix is to
-make sure all previous NumPy versions installed by pip have been removed. See
-`#12736 <https://github.com/numpy/numpy/issues/12736>`__ for discussion of the
-issue.
-
-
-Compatibility notes
-===================
-
-Signed zero when using divmod
------------------------------
-Starting in version 1.12.0, numpy incorrectly returned a negatively signed zero
-when using the ``divmod`` and ``floor_divide`` functions when the result was
-zero. For example::
-
- >>> np.zeros(10)//1
- array([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0.])
-
-With this release, the result is correctly returned as a positively signed
-zero::
-
- >>> np.zeros(10)//1
- array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
-
-
-Contributors
-============
-
-A total of 5 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Charles Harris
-* Eric Wieser
-* Matti Picus
-* Tyler Reddy
-* Tony LaTorre +
-
-
-Pull requests merged
-====================
-
-A total of 7 pull requests were merged for this release.
-
-* `#12909 <https://github.com/numpy/numpy/pull/12909>`__: TST: fix vmImage dispatch in Azure
-* `#12923 <https://github.com/numpy/numpy/pull/12923>`__: MAINT: remove complicated test of multiarray import failure mode
-* `#13020 <https://github.com/numpy/numpy/pull/13020>`__: BUG: fix signed zero behavior in npy_divmod
-* `#13026 <https://github.com/numpy/numpy/pull/13026>`__: MAINT: Add functions to parse shell-strings in the platform-native...
-* `#13028 <https://github.com/numpy/numpy/pull/13028>`__: BUG: Fix regression in parsing of F90 and F77 environment variables
-* `#13038 <https://github.com/numpy/numpy/pull/13038>`__: BUG: parse shell escaping in extra_compile_args and extra_link_args
-* `#13041 <https://github.com/numpy/numpy/pull/13041>`__: BLD: Windows absolute path DLL loading
+++ /dev/null
-==========================
-NumPy 1.16.3 Release Notes
-==========================
-
-The NumPy 1.16.3 release fixes bugs reported against the 1.16.2 release, and
-also backports several enhancements from master that seem appropriate for a
-release series that is the last to support Python 2.7. The wheels on PyPI are
-linked with OpenBLAS v0.3.4+, which should fix the known threading issues
-found in previous OpenBLAS versions.
-
-Downstream developers building this release should use Cython >= 0.29.2 and,
-if using OpenBLAS, OpenBLAS > v0.3.4.
-
-The most noticeable change in this release is that unpickling object arrays
-when loading ``*.npy`` or ``*.npz`` files now requires an explicit opt-in.
-This backwards incompatible change was made in response to
-`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
-
-
-Compatibility notes
-===================
-
-Unpickling while loading requires explicit opt-in
--------------------------------------------------
-The functions ``np.load``, and ``np.lib.format.read_array`` take an
-`allow_pickle` keyword which now defaults to ``False`` in response to
-`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
-
-
-Improvements
-============
-
-Covariance in `random.mvnormal` cast to double
-----------------------------------------------
-This should make the tolerance used when checking the singular values of the
-covariance matrix more meaningful.
-
-
-Changes
-=======
-
-``__array_interface__`` offset now works as documented
-------------------------------------------------------
-The interface may use an ``offset`` value that was previously mistakenly
-ignored.
-
+++ /dev/null
-==========================
-NumPy 1.16.4 Release Notes
-==========================
-
-The NumPy 1.16.4 release fixes bugs reported against the 1.16.3 release, and
-also backports several enhancements from master that seem appropriate for a
-release series that is the last to support Python 2.7. The wheels on PyPI are
-linked with OpenBLAS v0.3.7-dev, which should fix issues on Skylake series
-cpus.
-
-Downstream developers building this release should use Cython >= 0.29.2 and,
-if using OpenBLAS, OpenBLAS > v0.3.7. The supported Python versions are 2.7 and
-3.5-3.7.
-
-
-New deprecations
-================
-Writeable flag of C-API wrapped arrays
---------------------------------------
-When an array is created from the C-API to wrap a pointer to data, the only
-indication we have of the read-write nature of the data is the ``writeable``
-flag set during creation. It is dangerous to force the flag to writeable. In
-the future it will not be possible to switch the writeable flag to ``True``
-from python. This deprecation should not affect many users since arrays
-created in such a manner are very rare in practice and only available through
-the NumPy C-API.
-
-
-Compatibility notes
-===================
-
-Potential changes to the random stream
---------------------------------------
-Due to bugs in the application of log to random floating point numbers,
-the stream may change when sampling from ``np.random.beta``, ``np.random.binomial``,
-``np.random.laplace``, ``np.random.logistic``, ``np.random.logseries`` or
-``np.random.multinomial`` if a 0 is generated in the underlying MT19937 random stream.
-There is a 1 in :math:`10^{53}` chance of this occurring, and so the probability that
-the stream changes for any given seed is extremely small. If a 0 is encountered in the
-underlying generator, then the incorrect value produced (either ``np.inf``
-or ``np.nan``) is now dropped.
-
-
-Changes
-=======
-
-`numpy.lib.recfunctions.structured_to_unstructured` does not squeeze single-field views
----------------------------------------------------------------------------------------
-Previously ``structured_to_unstructured(arr[['a']])`` would produce a squeezed
-result inconsistent with ``structured_to_unstructured(arr[['a', b']])``. This
-was accidental. The old behavior can be retained with
-``structured_to_unstructured(arr[['a']]).squeeze(axis=-1)`` or far more simply,
-``arr['a']``.
-
-
-Contributors
-============
-
-A total of 10 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Charles Harris
-* Eric Wieser
-* Dennis Zollo +
-* Hunter Damron +
-* Jingbei Li +
-* Kevin Sheppard
-* Matti Picus
-* Nicola Soranzo +
-* Sebastian Berg
-* Tyler Reddy
-
-
-Pull requests merged
-====================
-
-A total of 16 pull requests were merged for this release.
-
-* `#13392 <https://github.com/numpy/numpy/pull/13392>`__: BUG: Some PyPy versions lack PyStructSequence_InitType2.
-* `#13394 <https://github.com/numpy/numpy/pull/13394>`__: MAINT, DEP: Fix deprecated ``assertEquals()``
-* `#13396 <https://github.com/numpy/numpy/pull/13396>`__: BUG: Fix structured_to_unstructured on single-field types (backport)
-* `#13549 <https://github.com/numpy/numpy/pull/13549>`__: BLD: Make CI pass again with pytest 4.5
-* `#13552 <https://github.com/numpy/numpy/pull/13552>`__: TST: Register markers in conftest.py.
-* `#13559 <https://github.com/numpy/numpy/pull/13559>`__: BUG: Removes ValueError for empty kwargs in arraymultiter_new
-* `#13560 <https://github.com/numpy/numpy/pull/13560>`__: BUG: Add TypeError to accepted exceptions in crackfortran.
-* `#13561 <https://github.com/numpy/numpy/pull/13561>`__: BUG: Handle subarrays in descr_to_dtype
-* `#13562 <https://github.com/numpy/numpy/pull/13562>`__: BUG: Protect generators from log(0.0)
-* `#13563 <https://github.com/numpy/numpy/pull/13563>`__: BUG: Always return views from structured_to_unstructured when...
-* `#13564 <https://github.com/numpy/numpy/pull/13564>`__: BUG: Catch stderr when checking compiler version
-* `#13565 <https://github.com/numpy/numpy/pull/13565>`__: BUG: longdouble(int) does not work
-* `#13587 <https://github.com/numpy/numpy/pull/13587>`__: BUG: distutils/system_info.py fix missing subprocess import (#13523)
-* `#13620 <https://github.com/numpy/numpy/pull/13620>`__: BUG,DEP: Fix writeable flag setting for arrays without base
-* `#13641 <https://github.com/numpy/numpy/pull/13641>`__: MAINT: Prepare for the 1.16.4 release.
-* `#13644 <https://github.com/numpy/numpy/pull/13644>`__: BUG: special case object arrays when printing rel-, abs-error
+++ /dev/null
-.. currentmodule:: numpy
-
-==========================
-NumPy 1.16.5 Release Notes
-==========================
-
-The NumPy 1.16.5 release fixes bugs reported against the 1.16.4 release, and
-also backports several enhancements from master that seem appropriate for a
-release series that is the last to support Python 2.7. The wheels on PyPI are
-linked with OpenBLAS v0.3.7-dev, which should fix errors on Skylake series
-cpus.
-
-Downstream developers building this release should use Cython >= 0.29.2 and, if
-using OpenBLAS, OpenBLAS >= v0.3.7. The supported Python versions are 2.7 and
-3.5-3.7.
-
-
-Contributors
-============
-
-A total of 18 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Alexander Shadchin
-* Allan Haldane
-* Bruce Merry +
-* Charles Harris
-* Colin Snyder +
-* Dan Allan +
-* Emile +
-* Eric Wieser
-* Grey Baker +
-* Maksim Shabunin +
-* Marten van Kerkwijk
-* Matti Picus
-* Peter Andreas Entschev +
-* Ralf Gommers
-* Richard Harris +
-* Sebastian Berg
-* Sergei Lebedev +
-* Stephan Hoyer
-
-Pull requests merged
-====================
-
-A total of 23 pull requests were merged for this release.
-
-* `#13742 <https://github.com/numpy/numpy/pull/13742>`__: ENH: Add project URLs to setup.py
-* `#13823 <https://github.com/numpy/numpy/pull/13823>`__: TEST, ENH: fix tests and ctypes code for PyPy
-* `#13845 <https://github.com/numpy/numpy/pull/13845>`__: BUG: use npy_intp instead of int for indexing array
-* `#13867 <https://github.com/numpy/numpy/pull/13867>`__: TST: Ignore DeprecationWarning during nose imports
-* `#13905 <https://github.com/numpy/numpy/pull/13905>`__: BUG: Fix use-after-free in boolean indexing
-* `#13933 <https://github.com/numpy/numpy/pull/13933>`__: MAINT/BUG/DOC: Fix errors in _add_newdocs
-* `#13984 <https://github.com/numpy/numpy/pull/13984>`__: BUG: fix byte order reversal for datetime64[ns]
-* `#13994 <https://github.com/numpy/numpy/pull/13994>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation
-* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject
-* `#14043 <https://github.com/numpy/numpy/pull/14043>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors.
-* `#14044 <https://github.com/numpy/numpy/pull/14044>`__: BUG: ensure that casting to/from structured is properly checked.
-* `#14045 <https://github.com/numpy/numpy/pull/14045>`__: MAINT: fix histogram*d dispatchers
-* `#14046 <https://github.com/numpy/numpy/pull/14046>`__: BUG: further fixup to histogram2d dispatcher.
-* `#14052 <https://github.com/numpy/numpy/pull/14052>`__: BUG: Replace contextlib.suppress for Python 2.7
-* `#14056 <https://github.com/numpy/numpy/pull/14056>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API...
-* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict contructor
-* `#14058 <https://github.com/numpy/numpy/pull/14058>`__: DOC: Document array_function at a higher level.
-* `#14084 <https://github.com/numpy/numpy/pull/14084>`__: BUG, DOC: add new recfunctions to `__all__`
-* `#14162 <https://github.com/numpy/numpy/pull/14162>`__: BUG: Remove stray print that causes a SystemError on python 3.7
-* `#14297 <https://github.com/numpy/numpy/pull/14297>`__: TST: Pin pytest version to 5.0.1.
-* `#14322 <https://github.com/numpy/numpy/pull/14322>`__: ENH: Enable huge pages in all Linux builds
-* `#14346 <https://github.com/numpy/numpy/pull/14346>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
-* `#14382 <https://github.com/numpy/numpy/pull/14382>`__: REL: Prepare for the NumPy 1.16.5 release.
+++ /dev/null
-.. currentmodule:: numpy
-
-==========================
-NumPy 1.16.6 Release Notes
-==========================
-
-The NumPy 1.16.6 release fixes bugs reported against the 1.16.5 release, and
-also backports several enhancements from master that seem appropriate for a
-release series that is the last to support Python 2.7. The wheels on PyPI are
-linked with OpenBLAS v0.3.7, which should fix errors on Skylake series
-cpus.
-
-Downstream developers building this release should use Cython >= 0.29.2 and, if
-using OpenBLAS, OpenBLAS >= v0.3.7. The supported Python versions are 2.7 and
-3.5-3.7.
-
-Highlights
-==========
-
-- The ``np.testing.utils`` functions have been updated from 1.19.0-dev0.
- This improves the function documentation and error messages as well
- extending the ``assert_array_compare`` function to additional types.
-
-
-New functions
-=============
-
-Allow matmul (`@` operator) to work with object arrays.
--------------------------------------------------------
-This is an enhancement that was added in NumPy 1.17 and seems reasonable to
-include in the LTS 1.16 release series.
-
-
-Compatibility notes
-===================
-
-Fix regression in matmul (`@` operator) for boolean types
----------------------------------------------------------
-Booleans were being treated as integers rather than booleans,
-which was a regression from previous behavior.
-
-
-Improvements
-============
-
-Array comparison assertions include maximum differences
--------------------------------------------------------
-Error messages from array comparison tests such as ``testing.assert_allclose``
-now include "max absolute difference" and "max relative difference," in
-addition to the previous "mismatch" percentage. This information makes it
-easier to update absolute and relative error tolerances.
-
-Contributors
-============
-
-A total of 10 people contributed to this release.
-
-* CakeWithSteak
-* Charles Harris
-* Chris Burr
-* Eric Wieser
-* Fernando Saravia
-* Lars Grueter
-* Matti Picus
-* Maxwell Aladago
-* Qiming Sun
-* Warren Weckesser
-
-Pull requests merged
-====================
-
-A total of 14 pull requests were merged for this release.
-
-* `#14211 <https://github.com/numpy/numpy/pull/14211>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative...
-* `#14275 <https://github.com/numpy/numpy/pull/14275>`__: BUG: fixing to allow unpickling of PY3 pickles from PY2
-* `#14340 <https://github.com/numpy/numpy/pull/14340>`__: BUG: Fix misuse of .names and .fields in various places (backport...
-* `#14423 <https://github.com/numpy/numpy/pull/14423>`__: BUG: test, fix regression in converting to ctypes.
-* `#14434 <https://github.com/numpy/numpy/pull/14434>`__: BUG: Fixed maximum relative error reporting in assert_allclose
-* `#14509 <https://github.com/numpy/numpy/pull/14509>`__: BUG: Fix regression in boolean matmul.
-* `#14686 <https://github.com/numpy/numpy/pull/14686>`__: BUG: properly define PyArray_DescrCheck
-* `#14853 <https://github.com/numpy/numpy/pull/14853>`__: BLD: add 'apt update' to shippable
-* `#14854 <https://github.com/numpy/numpy/pull/14854>`__: BUG: Fix _ctypes class circular reference. (#13808)
-* `#14856 <https://github.com/numpy/numpy/pull/14856>`__: BUG: Fix `np.einsum` errors on Power9 Linux and z/Linux
-* `#14863 <https://github.com/numpy/numpy/pull/14863>`__: BLD: Prevent -flto from optimising long double representation...
-* `#14864 <https://github.com/numpy/numpy/pull/14864>`__: BUG: lib: Fix histogram problem with signed integer arrays.
-* `#15172 <https://github.com/numpy/numpy/pull/15172>`__: ENH: Backport improvements to testing functions.
-* `#15191 <https://github.com/numpy/numpy/pull/15191>`__: REL: Prepare for 1.16.6 release.
+++ /dev/null
-.. currentmodule:: numpy
-
-==========================
-NumPy 1.17.0 Release Notes
-==========================
-
-This NumPy release contains a number of new features that should substantially
-improve its performance and usefulness, see Highlights below for a summary. The
-Python versions supported are 3.5-3.7, note that Python 2.7 has been dropped.
-Python 3.8b2 should work with the released source packages, but there are no
-future guarantees.
-
-Downstream developers should use Cython >= 0.29.11 for Python 3.8 support and
-OpenBLAS >= 3.7 (not currently out) to avoid problems on the Skylake
-architecture. The NumPy wheels on PyPI are built from the OpenBLAS development
-branch in order to avoid those problems.
-
-
-Highlights
-==========
-
-* A new extensible `random` module along with four selectable `random number
- generators <random.BitGenerators>` and improved seeding designed for use in parallel
- processes has been added. The currently available bit generators are `MT19937
- <random.mt19937.MT19937>`, `PCG64 <random.pcg64.PCG64>`, `Philox
- <random.philox.Philox>`, and `SFC64 <random.sfc64.SFC64>`. See below under
- New Features.
-
-* NumPy's `FFT <fft>` implementation was changed from fftpack to pocketfft,
- resulting in faster, more accurate transforms and better handling of datasets
- of prime length. See below under Improvements.
-
-* New radix sort and timsort sorting methods. It is currently not possible to
- choose which will be used. They are hardwired to the datatype and used
- when either ``stable`` or ``mergesort`` is passed as the method. See below
- under Improvements.
-
-* Overriding numpy functions is now possible by default,
- see ``__array_function__`` below.
-
-
-New functions
-=============
-
-* `numpy.errstate` is now also a function decorator
-
-
-Deprecations
-============
-
-`numpy.polynomial` functions warn when passed ``float`` in place of ``int``
----------------------------------------------------------------------------
-Previously functions in this module would accept ``float`` values provided they
-were integral (``1.0``, ``2.0``, etc). For consistency with the rest of numpy,
-doing so is now deprecated, and in future will raise a ``TypeError``.
-
-Similarly, passing a float like ``0.5`` in place of an integer will now raise a
-``TypeError`` instead of the previous ``ValueError``.
-
-Deprecate `numpy.distutils.exec_command` and ``temp_file_name``
----------------------------------------------------------------
-The internal use of these functions has been refactored and there are better
-alternatives. Replace ``exec_command`` with `subprocess.Popen` and
-`temp_file_name <numpy.distutils.exec_command>` with `tempfile.mkstemp`.
-
-Writeable flag of C-API wrapped arrays
---------------------------------------
-When an array is created from the C-API to wrap a pointer to data, the only
-indication we have of the read-write nature of the data is the ``writeable``
-flag set during creation. It is dangerous to force the flag to writeable.
-In the future it will not be possible to switch the writeable flag to ``True``
-from python.
-This deprecation should not affect many users since arrays created in such
-a manner are very rare in practice and only available through the NumPy C-API.
-
-`numpy.nonzero` should no longer be called on 0d arrays
--------------------------------------------------------
-The behavior of `numpy.nonzero` on 0d arrays was surprising, making uses of it
-almost always incorrect. If the old behavior was intended, it can be preserved
-without a warning by using ``nonzero(atleast_1d(arr))`` instead of
-``nonzero(arr)``. In a future release, it is most likely this will raise a
-``ValueError``.
-
-Writing to the result of `numpy.broadcast_arrays` will warn
------------------------------------------------------------
-
-Commonly `numpy.broadcast_arrays` returns a writeable array with internal
-overlap, making it unsafe to write to. A future version will set the
-``writeable`` flag to ``False``, and require users to manually set it to
-``True`` if they are sure that is what they want to do. Now writing to it will
-emit a deprecation warning with instructions to set the ``writeable`` flag
-``True``. Note that if one were to inspect the flag before setting it, one
-would find it would already be ``True``. Explicitly setting it, though, as one
-will need to do in future versions, clears an internal flag that is used to
-produce the deprecation warning. To help alleviate confusion, an additional
-`FutureWarning` will be emitted when accessing the ``writeable`` flag state to
-clarify the contradiction.
-
-Note that for the C-side buffer protocol such an array will return a
-readonly buffer immediately unless a writable buffer is requested. If
-a writeable buffer is requested a warning will be given. When using
-cython, the ``const`` qualifier should be used with such arrays to avoid
-the warning (e.g. ``cdef const double[::1] view``).
-
-
-Future Changes
-==============
-
-Shape-1 fields in dtypes won't be collapsed to scalars in a future version
---------------------------------------------------------------------------
-
-Currently, a field specified as ``[(name, dtype, 1)]`` or ``"1type"`` is
-interpreted as a scalar field (i.e., the same as ``[(name, dtype)]`` or
-``[(name, dtype, ()]``). This now raises a FutureWarning; in a future version,
-it will be interpreted as a shape-(1,) field, i.e. the same as ``[(name,
-dtype, (1,))]`` or ``"(1,)type"`` (consistently with ``[(name, dtype, n)]``
-/ ``"ntype"`` with ``n>1``, which is already equivalent to ``[(name, dtype,
-(n,)]`` / ``"(n,)type"``).
-
-
-Compatibility notes
-===================
-
-``float16`` subnormal rounding
-------------------------------
-Casting from a different floating point precision to ``float16`` used incorrect
-rounding in some edge cases. This means in rare cases, subnormal results will
-now be rounded up instead of down, changing the last bit (ULP) of the result.
-
-Signed zero when using divmod
------------------------------
-Starting in version `1.12.0`, numpy incorrectly returned a negatively signed zero
-when using the ``divmod`` and ``floor_divide`` functions when the result was
-zero. For example::
-
- >>> np.zeros(10)//1
- array([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0.])
-
-With this release, the result is correctly returned as a positively signed
-zero::
-
- >>> np.zeros(10)//1
- array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
-
-``MaskedArray.mask`` now returns a view of the mask, not the mask itself
-------------------------------------------------------------------------
-Returning the mask itself was unsafe, as it could be reshaped in place which
-would violate expectations of the masked array code. The behavior of `mask
-<ma.MaskedArray.mask>` is now consistent with `data <ma.MaskedArray.data>`,
-which also returns a view.
-
-The underlying mask can still be accessed with ``._mask`` if it is needed.
-Tests that contain ``assert x.mask is not y.mask`` or similar will need to be
-updated.
-
-Do not lookup ``__buffer__`` attribute in `numpy.frombuffer`
-------------------------------------------------------------
-Looking up ``__buffer__`` attribute in `numpy.frombuffer` was undocumented and
-non-functional. This code was removed. If needed, use
-``frombuffer(memoryview(obj), ...)`` instead.
-
-``out`` is buffered for memory overlaps in `take`, `choose`, `put`
-------------------------------------------------------------------
-If the out argument to these functions is provided and has memory overlap with
-the other arguments, it is now buffered to avoid order-dependent behavior.
-
-Unpickling while loading requires explicit opt-in
--------------------------------------------------
-The functions `load`, and ``lib.format.read_array`` take an
-``allow_pickle`` keyword which now defaults to ``False`` in response to
-`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
-
-
-.. currentmodule:: numpy.random.mtrand
-
-Potential changes to the random stream in old random module
------------------------------------------------------------
-Due to bugs in the application of ``log`` to random floating point numbers,
-the stream may change when sampling from `~RandomState.beta`, `~RandomState.binomial`,
-`~RandomState.laplace`, `~RandomState.logistic`, `~RandomState.logseries` or
-`~RandomState.multinomial` if a ``0`` is generated in the underlying `MT19937
-<~numpy.random.mt11937.MT19937>` random stream. There is a ``1`` in
-:math:`10^{53}` chance of this occurring, so the probability that the stream
-changes for any given seed is extremely small. If a ``0`` is encountered in the
-underlying generator, then the incorrect value produced (either `numpy.inf` or
-`numpy.nan`) is now dropped.
-
-.. currentmodule:: numpy
-
-`i0` now always returns a result with the same shape as the input
------------------------------------------------------------------
-Previously, the output was squeezed, such that, e.g., input with just a single
-element would lead to an array scalar being returned, and inputs with shapes
-such as ``(10, 1)`` would yield results that would not broadcast against the
-input.
-
-Note that we generally recommend the SciPy implementation over the numpy one:
-it is a proper ufunc written in C, and more than an order of magnitude faster.
-
-`can_cast` no longer assumes all unsafe casting is allowed
-----------------------------------------------------------
-Previously, `can_cast` returned `True` for almost all inputs for
-``casting='unsafe'``, even for cases where casting was not possible, such as
-from a structured dtype to a regular one. This has been fixed, making it
-more consistent with actual casting using, e.g., the `.astype <ndarray.astype>`
-method.
-
-``ndarray.flags.writeable`` can be switched to true slightly more often
------------------------------------------------------------------------
-
-In rare cases, it was not possible to switch an array from not writeable
-to writeable, although a base array is writeable. This can happen if an
-intermediate `ndarray.base` object is writeable. Previously, only the deepest
-base object was considered for this decision. However, in rare cases this
-object does not have the necessary information. In that case switching to
-writeable was never allowed. This has now been fixed.
-
-
-C API changes
-=============
-
-dimension or stride input arguments are now passed by ``npy_intp const*``
--------------------------------------------------------------------------
-Previously these function arguments were declared as the more strict
-``npy_intp*``, which prevented the caller passing constant data.
-This change is backwards compatible, but now allows code like::
-
- npy_intp const fixed_dims[] = {1, 2, 3};
- // no longer complains that the const-qualifier is discarded
- npy_intp size = PyArray_MultiplyList(fixed_dims, 3);
-
-
-New Features
-============
-
-.. currentmodule:: numpy.random
-
-New extensible `numpy.random` module with selectable random number generators
------------------------------------------------------------------------------
-A new extensible `numpy.random` module along with four selectable random number
-generators and improved seeding designed for use in parallel processes has been
-added. The currently available :ref:`Bit Generators <bit_generator>` are
-`~mt19937.MT19937`, `~pcg64.PCG64`, `~philox.Philox`, and `~sfc64.SFC64`.
-``PCG64`` is the new default while ``MT19937`` is retained for backwards
-compatibility. Note that the legacy random module is unchanged and is now
-frozen, your current results will not change. More information is available in
-the :ref:`API change description <new-or-different>` and in the `top-level view
-<numpy.random>` documentation.
-
-.. currentmodule:: numpy
-
-libFLAME
---------
-Support for building NumPy with the libFLAME linear algebra package as the LAPACK,
-implementation, see
-`libFLAME <https://www.cs.utexas.edu/~flame/web/libFLAME.html>`_ for details.
-
-User-defined BLAS detection order
----------------------------------
-`distutils` now uses an environment variable, comma-separated and case
-insensitive, to determine the detection order for BLAS libraries.
-By default ``NPY_BLAS_ORDER=mkl,blis,openblas,atlas,accelerate,blas``.
-However, to force the use of OpenBLAS simply do::
-
- NPY_BLAS_ORDER=openblas python setup.py build
-
-which forces the use of OpenBLAS.
-This may be helpful for users which have a MKL installation but wishes to try
-out different implementations.
-
-User-defined LAPACK detection order
------------------------------------
-``numpy.distutils`` now uses an environment variable, comma-separated and case
-insensitive, to determine the detection order for LAPACK libraries.
-By default ``NPY_LAPACK_ORDER=mkl,openblas,flame,atlas,accelerate,lapack``.
-However, to force the use of OpenBLAS simply do::
-
- NPY_LAPACK_ORDER=openblas python setup.py build
-
-which forces the use of OpenBLAS.
-This may be helpful for users which have a MKL installation but wishes to try
-out different implementations.
-
-`ufunc.reduce` and related functions now accept a ``where`` mask
-----------------------------------------------------------------
-`ufunc.reduce`, `sum`, `prod`, `min`, `max` all
-now accept a ``where`` keyword argument, which can be used to tell which
-elements to include in the reduction. For reductions that do not have an
-identity, it is necessary to also pass in an initial value (e.g.,
-``initial=np.inf`` for `min`). For instance, the equivalent of
-`nansum` would be ``np.sum(a, where=~np.isnan(a))``.
-
-Timsort and radix sort have replaced mergesort for stable sorting
------------------------------------------------------------------
-Both radix sort and timsort have been implemented and are now used in place of
-mergesort. Due to the need to maintain backward compatibility, the sorting
-``kind`` options ``"stable"`` and ``"mergesort"`` have been made aliases of
-each other with the actual sort implementation depending on the array type.
-Radix sort is used for small integer types of 16 bits or less and timsort for
-the remaining types. Timsort features improved performace on data containing
-already or nearly sorted data and performs like mergesort on random data and
-requires :math:`O(n/2)` working space. Details of the timsort algorithm can be
-found at `CPython listsort.txt
-<https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
-
-`packbits` and `unpackbits` accept an ``order`` keyword
--------------------------------------------------------
-The ``order`` keyword defaults to ``big``, and will order the **bits**
-accordingly. For ``'order=big'`` 3 will become ``[0, 0, 0, 0, 0, 0, 1, 1]``,
-and ``[1, 1, 0, 0, 0, 0, 0, 0]`` for ``order=little``
-
-`unpackbits` now accepts a ``count`` parameter
-----------------------------------------------
-``count`` allows subsetting the number of bits that will be unpacked up-front,
-rather than reshaping and subsetting later, making the `packbits` operation
-invertible, and the unpacking less wasteful. Counts larger than the number of
-available bits add zero padding. Negative counts trim bits off the end instead
-of counting from the beginning. None counts implement the existing behavior of
-unpacking everything.
-
-`linalg.svd` and `linalg.pinv` can be faster on hermitian inputs
-----------------------------------------------------------------
-These functions now accept a ``hermitian`` argument, matching the one added
-to `linalg.matrix_rank` in 1.14.0.
-
-divmod operation is now supported for two ``timedelta64`` operands
-------------------------------------------------------------------
-The divmod operator now handles two ``timedelta64`` operands, with
-type signature ``mm->qm``.
-
-`fromfile` now takes an ``offset`` argument
--------------------------------------------
-This function now takes an ``offset`` keyword argument for binary files,
-which specifics the offset (in bytes) from the file's current position.
-Defaults to ``0``.
-
-New mode "empty" for `pad`
---------------------------
-This mode pads an array to a desired shape without initializing the new
-entries.
-
-`empty_like` and related functions now accept a ``shape`` argument
-------------------------------------------------------------------
-`empty_like`, `full_like`, `ones_like` and `zeros_like` now accept a ``shape``
-keyword argument, which can be used to create a new array
-as the prototype, overriding its shape as well. This is particularly useful
-when combined with the ``__array_function__`` protocol, allowing the creation
-of new arbitrary-shape arrays from NumPy-like libraries when such an array
-is used as the prototype.
-
-Floating point scalars implement ``as_integer_ratio`` to match the builtin float
---------------------------------------------------------------------------------
-This returns a (numerator, denominator) pair, which can be used to construct a
-`fractions.Fraction`.
-
-Structured ``dtype`` objects can be indexed with multiple fields names
-----------------------------------------------------------------------
-``arr.dtype[['a', 'b']]`` now returns a dtype that is equivalent to
-``arr[['a', 'b']].dtype``, for consistency with
-``arr.dtype['a'] == arr['a'].dtype``.
-
-Like the dtype of structured arrays indexed with a list of fields, this dtype
-has the same ``itemsize`` as the original, but only keeps a subset of the fields.
-
-This means that ``arr[['a', 'b']]`` and ``arr.view(arr.dtype[['a', 'b']])`` are
-equivalent.
-
-``.npy`` files support unicode field names
-------------------------------------------
-A new format version of 3.0 has been introduced, which enables structured types
-with non-latin1 field names. This is used automatically when needed.
-
-
-Improvements
-============
-
-Array comparison assertions include maximum differences
--------------------------------------------------------
-Error messages from array comparison tests such as
-`testing.assert_allclose` now include "max absolute difference" and
-"max relative difference," in addition to the previous "mismatch" percentage.
-This information makes it easier to update absolute and relative error
-tolerances.
-
-Replacement of the fftpack based `fft` module by the pocketfft library
-----------------------------------------------------------------------
-Both implementations have the same ancestor (Fortran77 FFTPACK by Paul N.
-Swarztrauber), but pocketfft contains additional modifications which improve
-both accuracy and performance in some circumstances. For FFT lengths containing
-large prime factors, pocketfft uses Bluestein's algorithm, which maintains
-:math:`O(N log N)` run time complexity instead of deteriorating towards
-:math:`O(N*N)` for prime lengths. Also, accuracy for real valued FFTs with near
-prime lengths has improved and is on par with complex valued FFTs.
-
-Further improvements to ``ctypes`` support in `numpy.ctypeslib`
----------------------------------------------------------------
-A new `numpy.ctypeslib.as_ctypes_type` function has been added, which can be
-used to converts a `dtype` into a best-guess `ctypes` type. Thanks to this
-new function, `numpy.ctypeslib.as_ctypes` now supports a much wider range of
-array types, including structures, booleans, and integers of non-native
-endianness.
-
-`numpy.errstate` is now also a function decorator
--------------------------------------------------
-Currently, if you have a function like::
-
- def foo():
- pass
-
-and you want to wrap the whole thing in `errstate`, you have to rewrite it
-like so::
-
- def foo():
- with np.errstate(...):
- pass
-
-but with this change, you can do::
-
- @np.errstate(...)
- def foo():
- pass
-
-thereby saving a level of indentation
-
-`numpy.exp` and `numpy.log` speed up for float32 implementation
----------------------------------------------------------------
-float32 implementation of `exp` and `log` now benefit from AVX2/AVX512
-instruction set which are detected during runtime. `exp` has a max ulp
-error of 2.52 and `log` has a max ulp error or 3.83.
-
-Improve performance of `numpy.pad`
-----------------------------------
-The performance of the function has been improved for most cases by filling in
-a preallocated array with the desired padded shape instead of using
-concatenation.
-
-`numpy.interp` handles infinities more robustly
------------------------------------------------
-In some cases where `interp` would previously return `nan`, it now
-returns an appropriate infinity.
-
-Pathlib support for `fromfile`, `tofile` and `ndarray.dump`
------------------------------------------------------------
-`fromfile`, `ndarray.ndarray.tofile` and `ndarray.dump` now support
-the `pathlib.Path` type for the ``file``/``fid`` parameter.
-
-Specialized `isnan`, `isinf`, and `isfinite` ufuncs for bool and int types
---------------------------------------------------------------------------
-The boolean and integer types are incapable of storing `nan` and `inf` values,
-which allows us to provide specialized ufuncs that are up to 250x faster than
-the previous approach.
-
-`isfinite` supports ``datetime64`` and ``timedelta64`` types
------------------------------------------------------------------
-Previously, `isfinite` used to raise a `TypeError` on being used on these
-two types.
-
-New keywords added to `nan_to_num`
-----------------------------------
-`nan_to_num` now accepts keywords ``nan``, ``posinf`` and ``neginf``
-allowing the user to define the value to replace the ``nan``, positive and
-negative ``np.inf`` values respectively.
-
-MemoryErrors caused by allocated overly large arrays are more descriptive
--------------------------------------------------------------------------
-Often the cause of a MemoryError is incorrect broadcasting, which results in a
-very large and incorrect shape. The message of the error now includes this
-shape to help diagnose the cause of failure.
-
-`floor`, `ceil`, and `trunc` now respect builtin magic methods
---------------------------------------------------------------
-These ufuncs now call the ``__floor__``, ``__ceil__``, and ``__trunc__``
-methods when called on object arrays, making them compatible with
-`decimal.Decimal` and `fractions.Fraction` objects.
-
-`quantile` now works on `fraction.Fraction` and `decimal.Decimal` objects
--------------------------------------------------------------------------
-In general, this handles object arrays more gracefully, and avoids floating-
-point operations if exact arithmetic types are used.
-
-Support of object arrays in `matmul`
-------------------------------------
-It is now possible to use `matmul` (or the ``@`` operator) with object arrays.
-For instance, it is now possible to do::
-
- from fractions import Fraction
- a = np.array([[Fraction(1, 2), Fraction(1, 3)], [Fraction(1, 3), Fraction(1, 2)]])
- b = a @ a
-
-
-Changes
-=======
-
-`median` and `percentile` family of functions no longer warn about ``nan``
---------------------------------------------------------------------------
-`numpy.median`, `numpy.percentile`, and `numpy.quantile` used to emit a
-``RuntimeWarning`` when encountering an `nan`. Since they return the
-``nan`` value, the warning is redundant and has been removed.
-
-``timedelta64 % 0`` behavior adjusted to return ``NaT``
--------------------------------------------------------
-The modulus operation with two ``np.timedelta64`` operands now returns
-``NaT`` in the case of division by zero, rather than returning zero
-
-NumPy functions now always support overrides with ``__array_function__``
-------------------------------------------------------------------------
-NumPy now always checks the ``__array_function__`` method to implement overrides
-of NumPy functions on non-NumPy arrays, as described in `NEP 18`_. The feature
-was available for testing with NumPy 1.16 if appropriate environment variables
-are set, but is now always enabled.
-
-.. _`NEP 18` : http://www.numpy.org/neps/nep-0018-array-function-protocol.html
-
-``lib.recfunctions.structured_to_unstructured`` does not squeeze single-field views
------------------------------------------------------------------------------------
-Previously ``structured_to_unstructured(arr[['a']])`` would produce a squeezed
-result inconsistent with ``structured_to_unstructured(arr[['a', b']])``. This
-was accidental. The old behavior can be retained with
-``structured_to_unstructured(arr[['a']]).squeeze(axis=-1)`` or far more simply,
-``arr['a']``.
-
-`clip` now uses a ufunc under the hood
---------------------------------------
-This means that registering clip functions for custom dtypes in C via
-``descr->f->fastclip`` is deprecated - they should use the ufunc registration
-mechanism instead, attaching to the ``np.core.umath.clip`` ufunc.
-
-It also means that ``clip`` accepts ``where`` and ``casting`` arguments,
-and can be override with ``__array_ufunc__``.
-
-A consequence of this change is that some behaviors of the old ``clip`` have
-been deprecated:
-
-* Passing ``nan`` to mean "do not clip" as one or both bounds. This didn't work
- in all cases anyway, and can be better handled by passing infinities of the
- appropriate sign.
-* Using "unsafe" casting by default when an ``out`` argument is passed. Using
- ``casting="unsafe"`` explicitly will silence this warning.
-
-Additionally, there are some corner cases with behavior changes:
-
-* Padding ``max < min`` has changed to be more consistent across dtypes, but
- should not be relied upon.
-* Scalar ``min`` and ``max`` take part in promotion rules like they do in all
- other ufuncs.
-
-``__array_interface__`` offset now works as documented
-------------------------------------------------------
-The interface may use an ``offset`` value that was mistakenly ignored.
-
-Pickle protocol in `savez` set to 3 for ``force zip64`` flag
------------------------------------------------------------------
-`savez` was not using the ``force_zip64`` flag, which limited the size of
-the archive to 2GB. But using the flag requires us to use pickle protocol 3 to
-write ``object`` arrays. The protocol used was bumped to 3, meaning the archive
-will be unreadable by Python2.
-
-Structured arrays indexed with non-existent fields raise ``KeyError`` not ``ValueError``
-----------------------------------------------------------------------------------------
-``arr['bad_field']`` on a structured type raises ``KeyError``, for consistency
-with ``dict['bad_field']``.
-
+++ /dev/null
-.. currentmodule:: numpy
-
-==========================
-NumPy 1.17.1 Release Notes
-==========================
-
-This release contains a number of fixes for bugs reported against NumPy 1.17.0
-along with a few documentation and build improvements. The Python versions
-supported are 3.5-3.7, note that Python 2.7 has been dropped. Python 3.8b3
-should work with the released source packages, but there are no future
-guarantees.
-
-Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
-OpenBLAS >= 3.7 to avoid problems on the Skylake architecture. The NumPy wheels
-on PyPI are built from the OpenBLAS development branch in order to avoid those
-problems.
-
-
-Contributors
-============
-
-A total of 17 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Alexander Jung +
-* Allan Haldane
-* Charles Harris
-* Eric Wieser
-* Giuseppe Cuccu +
-* Hiroyuki V. Yamazaki
-* Jérémie du Boisberranger
-* Kmol Yuan +
-* Matti Picus
-* Max Bolingbroke +
-* Maxwell Aladago +
-* Oleksandr Pavlyk
-* Peter Andreas Entschev
-* Sergei Lebedev
-* Seth Troisi +
-* Vladimir Pershin +
-* Warren Weckesser
-
-
-Pull requests merged
-====================
-
-A total of 24 pull requests were merged for this release.
-
-* `#14156 <https://github.com/numpy/numpy/pull/14156>`__: TST: Allow fuss in testing strided/non-strided exp/log loops
-* `#14157 <https://github.com/numpy/numpy/pull/14157>`__: BUG: avx2_scalef_ps must be static
-* `#14158 <https://github.com/numpy/numpy/pull/14158>`__: BUG: Remove stray print that causes a SystemError on python 3.7.
-* `#14159 <https://github.com/numpy/numpy/pull/14159>`__: BUG: Fix DeprecationWarning in python 3.8.
-* `#14160 <https://github.com/numpy/numpy/pull/14160>`__: BLD: Add missing gcd/lcm definitions to npy_math.h
-* `#14161 <https://github.com/numpy/numpy/pull/14161>`__: DOC, BUILD: cleanups and fix (again) 'build dist'
-* `#14166 <https://github.com/numpy/numpy/pull/14166>`__: TST: Add 3.8-dev to travisCI testing.
-* `#14194 <https://github.com/numpy/numpy/pull/14194>`__: BUG: Remove the broken clip wrapper (Backport)
-* `#14198 <https://github.com/numpy/numpy/pull/14198>`__: DOC: Fix hermitian argument docs in svd.
-* `#14199 <https://github.com/numpy/numpy/pull/14199>`__: MAINT: Workaround for Intel compiler bug leading to failing test
-* `#14200 <https://github.com/numpy/numpy/pull/14200>`__: TST: Clean up of test_pocketfft.py
-* `#14201 <https://github.com/numpy/numpy/pull/14201>`__: BUG: Make advanced indexing result on read-only subclass writeable...
-* `#14236 <https://github.com/numpy/numpy/pull/14236>`__: BUG: Fixed default BitGenerator name
-* `#14237 <https://github.com/numpy/numpy/pull/14237>`__: ENH: add c-imported modules for freeze analysis in np.random
-* `#14296 <https://github.com/numpy/numpy/pull/14296>`__: TST: Pin pytest version to 5.0.1
-* `#14301 <https://github.com/numpy/numpy/pull/14301>`__: BUG: Fix leak in the f2py-generated module init and `PyMem_Del`...
-* `#14302 <https://github.com/numpy/numpy/pull/14302>`__: BUG: Fix formatting error in exception message
-* `#14307 <https://github.com/numpy/numpy/pull/14307>`__: MAINT: random: Match type of SeedSequence.pool_size to DEFAULT_POOL_SIZE.
-* `#14308 <https://github.com/numpy/numpy/pull/14308>`__: BUG: Fix numpy.random bug in platform detection
-* `#14309 <https://github.com/numpy/numpy/pull/14309>`__: ENH: Enable huge pages in all Linux builds
-* `#14330 <https://github.com/numpy/numpy/pull/14330>`__: BUG: Fix segfault in `random.permutation(x)` when x is a string.
-* `#14338 <https://github.com/numpy/numpy/pull/14338>`__: BUG: don't fail when lexsorting some empty arrays (#14228)
-* `#14339 <https://github.com/numpy/numpy/pull/14339>`__: BUG: Fix misuse of .names and .fields in various places (backport...
-* `#14345 <https://github.com/numpy/numpy/pull/14345>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
-* `#14350 <https://github.com/numpy/numpy/pull/14350>`__: REL: Prepare 1.17.1 release
+++ /dev/null
-.. currentmodule:: numpy
-
-==========================
-NumPy 1.17.2 Release Notes
-==========================
-
-This release contains fixes for bugs reported against NumPy 1.17.1 along with a
-some documentation improvements. The most important fix is for lexsort when the
-keys are of type (u)int8 or (u)int16. If you are currently using 1.17 you
-should upgrade.
-
-The Python versions supported in this release are 3.5-3.7, Python 2.7 has been
-dropped. Python 3.8b4 should work with the released source packages, but there
-are no future guarantees.
-
-Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
-OpenBLAS >= 3.7 to avoid errors on the Skylake architecture. The NumPy wheels
-on PyPI are built from the OpenBLAS development branch in order to avoid those
-errors.
-
-
-Contributors
-============
-
-A total of 7 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* CakeWithSteak +
-* Charles Harris
-* Dan Allan
-* Hameer Abbasi
-* Lars Grueter
-* Matti Picus
-* Sebastian Berg
-
-
-Pull requests merged
-====================
-
-A total of 8 pull requests were merged for this release.
-
-* `#14418 <https://github.com/numpy/numpy/pull/14418>`__: BUG: Fix aradixsort indirect indexing.
-* `#14420 <https://github.com/numpy/numpy/pull/14420>`__: DOC: Fix a minor typo in dispatch documentation.
-* `#14421 <https://github.com/numpy/numpy/pull/14421>`__: BUG: test, fix regression in converting to ctypes
-* `#14430 <https://github.com/numpy/numpy/pull/14430>`__: BUG: Do not show Override module in private error classes.
-* `#14432 <https://github.com/numpy/numpy/pull/14432>`__: BUG: Fixed maximum relative error reporting in assert_allclose.
-* `#14433 <https://github.com/numpy/numpy/pull/14433>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative...
-* `#14436 <https://github.com/numpy/numpy/pull/14436>`__: BUG: Update 1.17.x with 1.18.0-dev pocketfft.py.
-* `#14446 <https://github.com/numpy/numpy/pull/14446>`__: REL: Prepare for NumPy 1.17.2 release.
+++ /dev/null
-.. currentmodule:: numpy
-
-==========================
-NumPy 1.17.3 Release Notes
-==========================
-
-This release contains fixes for bugs reported against NumPy 1.17.2 along with a
-some documentation improvements. The Python versions supported in this release
-are 3.5-3.8.
-
-Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
-OpenBLAS >= 3.7 to avoid errors on the Skylake architecture.
-
-
-Highlights
-==========
-
-- Wheels for Python 3.8
-- Boolean ``matmul`` fixed to use booleans instead of integers.
-
-
-Compatibility notes
-===================
-
-- The seldom used ``PyArray_DescrCheck`` macro has been changed/fixed.
-- The use of the new ``numpy.random`` features from Cython and Numba
- was not well documented and parts have been removed or refactored.
- We plan to finish the refactor and fully document it in 1.18.0
-
-
-
-Contributors
-============
-
-A total of 7 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Allan Haldane
-* Charles Harris
-* Kevin Sheppard
-* Matti Picus
-* Ralf Gommers
-* Sebastian Berg
-* Warren Weckesser
-
-
-Pull requests merged
-====================
-
-A total of 12 pull requests were merged for this release.
-
-* `#14456 <https://github.com/numpy/numpy/pull/14456>`__: MAINT: clean up pocketfft modules inside numpy.fft namespace.
-* `#14463 <https://github.com/numpy/numpy/pull/14463>`__: BUG: random.hypergeometic assumes npy_long is npy_int64, hung...
-* `#14502 <https://github.com/numpy/numpy/pull/14502>`__: BUG: random: Revert gh-14458 and refix gh-14557.
-* `#14504 <https://github.com/numpy/numpy/pull/14504>`__: BUG: add a specialized loop for boolean matmul.
-* `#14506 <https://github.com/numpy/numpy/pull/14506>`__: MAINT: Update pytest version for Python 3.8
-* `#14512 <https://github.com/numpy/numpy/pull/14512>`__: DOC: random: fix doc linking, was referencing private submodules.
-* `#14513 <https://github.com/numpy/numpy/pull/14513>`__: BUG,MAINT: Some fixes and minor cleanup based on clang analysis
-* `#14515 <https://github.com/numpy/numpy/pull/14515>`__: BUG: Fix randint when range is 2**32
-* `#14519 <https://github.com/numpy/numpy/pull/14519>`__: MAINT: remove the entropy c-extension module
-* `#14563 <https://github.com/numpy/numpy/pull/14563>`__: DOC: remove note about Pocketfft license file (non-existing here).
-* `#14578 <https://github.com/numpy/numpy/pull/14578>`__: BUG: random: Create a legacy implementation of random.binomial.
-* `#14687 <https://github.com/numpy/numpy/pull/14687>`__: BUG: properly define PyArray_DescrCheck
+++ /dev/null
-.. currentmodule:: numpy
-
-==========================
-NumPy 1.17.4 Release Notes
-==========================
-
-This release contains fixes for bugs reported against NumPy 1.17.3 along with
-some build improvements. The Python versions supported in this release
-are 3.5-3.8.
-
-Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
-OpenBLAS >= 3.7 to avoid errors on the Skylake architecture.
-
-
-Highlights
-==========
-
-- Fixed `random.random_integers` biased generation of 8 and 16 bit integers.
-- Fixed `np.einsum` regression on Power9 and z/Linux.
-- Fixed histogram problem with signed integer arrays.
-
-
-Contributors
-============
-
-A total of 5 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Charles Harris
-* Chris Burr +
-* Matti Picus
-* Qiming Sun +
-* Warren Weckesser
-
-
-Pull requests merged
-====================
-
-A total of 8 pull requests were merged for this release.
-
-* `#14758 <https://github.com/numpy/numpy/pull/14758>`__: BLD: declare support for python 3.8
-* `#14781 <https://github.com/numpy/numpy/pull/14781>`__: BUG: random: biased samples from integers() with 8 or 16 bit...
-* `#14851 <https://github.com/numpy/numpy/pull/14851>`__: BUG: Fix _ctypes class circular reference. (#13808)
-* `#14852 <https://github.com/numpy/numpy/pull/14852>`__: BLD: add 'apt update' to shippable
-* `#14855 <https://github.com/numpy/numpy/pull/14855>`__: BUG: Fix `np.einsum` errors on Power9 Linux and z/Linux
-* `#14857 <https://github.com/numpy/numpy/pull/14857>`__: BUG: lib: Fix histogram problem with signed integer arrays.
-* `#14858 <https://github.com/numpy/numpy/pull/14858>`__: BLD: Prevent -flto from optimising long double representation...
-* `#14866 <https://github.com/numpy/numpy/pull/14866>`__: MAINT: move buffer.h -> npy_buffer.h to avoid conflicts
-
+++ /dev/null
-.. currentmodule:: numpy
-
-==========================
-NumPy 1.17.5 Release Notes
-==========================
-
-This release contains fixes for bugs reported against NumPy 1.17.4 along with
-some build improvements. The Python versions supported in this release
-are 3.5-3.8.
-
-Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
-OpenBLAS >= 3.7 to avoid errors on the Skylake architecture.
-
-It is recommended that developers interested in the new random bit generators
-upgrade to the NumPy 1.18.x series, as it has updated documentation and
-many small improvements.
-
-
-Contributors
-============
-
-A total of 6 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Charles Harris
-* Eric Wieser
-* Ilhan Polat
-* Matti Picus
-* Michael Hudson-Doyle
-* Ralf Gommers
-
-
-Pull requests merged
-====================
-
-A total of 8 pull requests were merged for this release.
-
-* `#14593 <https://github.com/numpy/numpy/pull/14593>`__: MAINT: backport Cython API cleanup to 1.17.x, remove docs
-* `#14937 <https://github.com/numpy/numpy/pull/14937>`__: BUG: fix integer size confusion in handling array's ndmin argument
-* `#14939 <https://github.com/numpy/numpy/pull/14939>`__: BUILD: remove SSE2 flag from numpy.random builds
-* `#14993 <https://github.com/numpy/numpy/pull/14993>`__: MAINT: Added Python3.8 branch to dll lib discovery
-* `#15038 <https://github.com/numpy/numpy/pull/15038>`__: BUG: Fix refcounting in ufunc object loops
-* `#15067 <https://github.com/numpy/numpy/pull/15067>`__: BUG: Exceptions tracebacks are dropped
-* `#15175 <https://github.com/numpy/numpy/pull/15175>`__: ENH: Backport improvements to testing functions.
-* `#15213 <https://github.com/numpy/numpy/pull/15213>`__: REL: Prepare for the NumPy 1.17.5 release.
+++ /dev/null
-=========================
-NumPy 1.3.0 Release Notes
-=========================
-
-This minor includes numerous bug fixes, official python 2.6 support, and
-several new features such as generalized ufuncs.
-
-Highlights
-==========
-
-Python 2.6 support
-------------------
-
-Python 2.6 is now supported on all previously supported platforms, including
-windows.
-
-https://www.python.org/dev/peps/pep-0361/
-
-Generalized ufuncs
-------------------
-
-There is a general need for looping over not only functions on scalars but also
-over functions on vectors (or arrays), as explained on
-http://scipy.org/scipy/numpy/wiki/GeneralLoopingFunctions. We propose to
-realize this concept by generalizing the universal functions (ufuncs), and
-provide a C implementation that adds ~500 lines to the numpy code base. In
-current (specialized) ufuncs, the elementary function is limited to
-element-by-element operations, whereas the generalized version supports
-"sub-array" by "sub-array" operations. The Perl vector library PDL provides a
-similar functionality and its terms are re-used in the following.
-
-Each generalized ufunc has information associated with it that states what the
-"core" dimensionality of the inputs is, as well as the corresponding
-dimensionality of the outputs (the element-wise ufuncs have zero core
-dimensions). The list of the core dimensions for all arguments is called the
-"signature" of a ufunc. For example, the ufunc numpy.add has signature
-"(),()->()" defining two scalar inputs and one scalar output.
-
-Another example is (see the GeneralLoopingFunctions page) the function
-inner1d(a,b) with a signature of "(i),(i)->()". This applies the inner product
-along the last axis of each input, but keeps the remaining indices intact. For
-example, where a is of shape (3,5,N) and b is of shape (5,N), this will return
-an output of shape (3,5). The underlying elementary function is called 3*5
-times. In the signature, we specify one core dimension "(i)" for each input and
-zero core dimensions "()" for the output, since it takes two 1-d arrays and
-returns a scalar. By using the same name "i", we specify that the two
-corresponding dimensions should be of the same size (or one of them is of size
-1 and will be broadcasted).
-
-The dimensions beyond the core dimensions are called "loop" dimensions. In the
-above example, this corresponds to (3,5).
-
-The usual numpy "broadcasting" rules apply, where the signature determines how
-the dimensions of each input/output object are split into core and loop
-dimensions:
-
-While an input array has a smaller dimensionality than the corresponding number
-of core dimensions, 1's are pre-pended to its shape. The core dimensions are
-removed from all inputs and the remaining dimensions are broadcasted; defining
-the loop dimensions. The output is given by the loop dimensions plus the
-output core dimensions.
-
-Experimental Windows 64 bits support
-------------------------------------
-
-Numpy can now be built on windows 64 bits (amd64 only, not IA64), with both MS
-compilers and mingw-w64 compilers:
-
-This is *highly experimental*: DO NOT USE FOR PRODUCTION USE. See INSTALL.txt,
-Windows 64 bits section for more information on limitations and how to build it
-by yourself.
-
-New features
-============
-
-Formatting issues
------------------
-
-Float formatting is now handled by numpy instead of the C runtime: this enables
-locale independent formatting, more robust fromstring and related methods.
-Special values (inf and nan) are also more consistent across platforms (nan vs
-IND/NaN, etc...), and more consistent with recent python formatting work (in
-2.6 and later).
-
-Nan handling in max/min
------------------------
-
-The maximum/minimum ufuncs now reliably propagate nans. If one of the
-arguments is a nan, then nan is returned. This affects np.min/np.max, amin/amax
-and the array methods max/min. New ufuncs fmax and fmin have been added to deal
-with non-propagating nans.
-
-Nan handling in sign
---------------------
-
-The ufunc sign now returns nan for the sign of anan.
-
-
-New ufuncs
-----------
-
-#. fmax - same as maximum for integer types and non-nan floats. Returns the
- non-nan argument if one argument is nan and returns nan if both arguments
- are nan.
-#. fmin - same as minimum for integer types and non-nan floats. Returns the
- non-nan argument if one argument is nan and returns nan if both arguments
- are nan.
-#. deg2rad - converts degrees to radians, same as the radians ufunc.
-#. rad2deg - converts radians to degrees, same as the degrees ufunc.
-#. log2 - base 2 logarithm.
-#. exp2 - base 2 exponential.
-#. trunc - truncate floats to nearest integer towards zero.
-#. logaddexp - add numbers stored as logarithms and return the logarithm
- of the result.
-#. logaddexp2 - add numbers stored as base 2 logarithms and return the base 2
- logarithm of the result.
-
-Masked arrays
--------------
-
-Several new features and bug fixes, including:
-
- * structured arrays should now be fully supported by MaskedArray
- (r6463, r6324, r6305, r6300, r6294...)
- * Minor bug fixes (r6356, r6352, r6335, r6299, r6298)
- * Improved support for __iter__ (r6326)
- * made baseclass, sharedmask and hardmask accessible to the user (but
- read-only)
- * doc update
-
-gfortran support on windows
----------------------------
-
-Gfortran can now be used as a fortran compiler for numpy on windows, even when
-the C compiler is Visual Studio (VS 2005 and above; VS 2003 will NOT work).
-Gfortran + Visual studio does not work on windows 64 bits (but gcc + gfortran
-does). It is unclear whether it will be possible to use gfortran and visual
-studio at all on x64.
-
-Arch option for windows binary
-------------------------------
-
-Automatic arch detection can now be bypassed from the command line for the superpack installed:
-
- numpy-1.3.0-superpack-win32.exe /arch=nosse
-
-will install a numpy which works on any x86, even if the running computer
-supports SSE set.
-
-Deprecated features
-===================
-
-Histogram
----------
-
-The semantics of histogram has been modified to fix long-standing issues
-with outliers handling. The main changes concern
-
-#. the definition of the bin edges, now including the rightmost edge, and
-#. the handling of upper outliers, now ignored rather than tallied in the
- rightmost bin.
-
-The previous behavior is still accessible using `new=False`, but this is
-deprecated, and will be removed entirely in 1.4.0.
-
-Documentation changes
-=====================
-
-A lot of documentation has been added. Both user guide and references can be
-built from sphinx.
-
-New C API
-=========
-
-Multiarray API
---------------
-
-The following functions have been added to the multiarray C API:
-
- * PyArray_GetEndianness: to get runtime endianness
-
-Ufunc API
----------
-
-The following functions have been added to the ufunc API:
-
- * PyUFunc_FromFuncAndDataAndSignature: to declare a more general ufunc
- (generalized ufunc).
-
-
-New defines
------------
-
-New public C defines are available for ARCH specific code through numpy/npy_cpu.h:
-
- * NPY_CPU_X86: x86 arch (32 bits)
- * NPY_CPU_AMD64: amd64 arch (x86_64, NOT Itanium)
- * NPY_CPU_PPC: 32 bits ppc
- * NPY_CPU_PPC64: 64 bits ppc
- * NPY_CPU_SPARC: 32 bits sparc
- * NPY_CPU_SPARC64: 64 bits sparc
- * NPY_CPU_S390: S390
- * NPY_CPU_IA64: ia64
- * NPY_CPU_PARISC: PARISC
-
-New macros for CPU endianness has been added as well (see internal changes
-below for details):
-
- * NPY_BYTE_ORDER: integer
- * NPY_LITTLE_ENDIAN/NPY_BIG_ENDIAN defines
-
-Those provide portable alternatives to glibc endian.h macros for platforms
-without it.
-
-Portable NAN, INFINITY, etc...
-------------------------------
-
-npy_math.h now makes available several portable macro to get NAN, INFINITY:
-
- * NPY_NAN: equivalent to NAN, which is a GNU extension
- * NPY_INFINITY: equivalent to C99 INFINITY
- * NPY_PZERO, NPY_NZERO: positive and negative zero respectively
-
-Corresponding single and extended precision macros are available as well. All
-references to NAN, or home-grown computation of NAN on the fly have been
-removed for consistency.
-
-Internal changes
-================
-
-numpy.core math configuration revamp
-------------------------------------
-
-This should make the porting to new platforms easier, and more robust. In
-particular, the configuration stage does not need to execute any code on the
-target platform, which is a first step toward cross-compilation.
-
-https://www.numpy.org/neps/nep-0003-math_config_clean.html
-
-umath refactor
---------------
-
-A lot of code cleanup for umath/ufunc code (charris).
-
-Improvements to build warnings
-------------------------------
-
-Numpy can now build with -W -Wall without warnings
-
-https://www.numpy.org/neps/nep-0002-warnfix.html
-
-Separate core math library
---------------------------
-
-The core math functions (sin, cos, etc... for basic C types) have been put into
-a separate library; it acts as a compatibility layer, to support most C99 maths
-functions (real only for now). The library includes platform-specific fixes for
-various maths functions, such as using those versions should be more robust
-than using your platform functions directly. The API for existing functions is
-exactly the same as the C99 math functions API; the only difference is the npy
-prefix (npy_cos vs cos).
-
-The core library will be made available to any extension in 1.4.0.
-
-CPU arch detection
-------------------
-
-npy_cpu.h defines numpy specific CPU defines, such as NPY_CPU_X86, etc...
-Those are portable across OS and toolchains, and set up when the header is
-parsed, so that they can be safely used even in the case of cross-compilation
-(the values is not set when numpy is built), or for multi-arch binaries (e.g.
-fat binaries on Max OS X).
-
-npy_endian.h defines numpy specific endianness defines, modeled on the glibc
-endian.h. NPY_BYTE_ORDER is equivalent to BYTE_ORDER, and one of
-NPY_LITTLE_ENDIAN or NPY_BIG_ENDIAN is defined. As for CPU archs, those are set
-when the header is parsed by the compiler, and as such can be used for
-cross-compilation and multi-arch binaries.
+++ /dev/null
-=========================
-NumPy 1.4.0 Release Notes
-=========================
-
-This minor includes numerous bug fixes, as well as a few new features. It
-is backward compatible with 1.3.0 release.
-
-Highlights
-==========
-
-* New datetime dtype support to deal with dates in arrays
-
-* Faster import time
-
-* Extended array wrapping mechanism for ufuncs
-
-* New Neighborhood iterator (C-level only)
-
-* C99-like complex functions in npymath
-
-New features
-============
-
-Extended array wrapping mechanism for ufuncs
---------------------------------------------
-
-An __array_prepare__ method has been added to ndarray to provide subclasses
-greater flexibility to interact with ufuncs and ufunc-like functions. ndarray
-already provided __array_wrap__, which allowed subclasses to set the array type
-for the result and populate metadata on the way out of the ufunc (as seen in
-the implementation of MaskedArray). For some applications it is necessary to
-provide checks and populate metadata *on the way in*. __array_prepare__ is
-therefore called just after the ufunc has initialized the output array but
-before computing the results and populating it. This way, checks can be made
-and errors raised before operations which may modify data in place.
-
-Automatic detection of forward incompatibilities
-------------------------------------------------
-
-Previously, if an extension was built against a version N of NumPy, and used on
-a system with NumPy M < N, the import_array was successful, which could cause
-crashes because the version M does not have a function in N. Starting from
-NumPy 1.4.0, this will cause a failure in import_array, so the error will be
-caught early on.
-
-New iterators
--------------
-
-A new neighborhood iterator has been added to the C API. It can be used to
-iterate over the items in a neighborhood of an array, and can handle boundaries
-conditions automatically. Zero and one padding are available, as well as
-arbitrary constant value, mirror and circular padding.
-
-New polynomial support
-----------------------
-
-New modules chebyshev and polynomial have been added. The new polynomial module
-is not compatible with the current polynomial support in numpy, but is much
-like the new chebyshev module. The most noticeable difference to most will
-be that coefficients are specified from low to high power, that the low
-level functions do *not* work with the Chebyshev and Polynomial classes as
-arguments, and that the Chebyshev and Polynomial classes include a domain.
-Mapping between domains is a linear substitution and the two classes can be
-converted one to the other, allowing, for instance, a Chebyshev series in
-one domain to be expanded as a polynomial in another domain. The new classes
-should generally be used instead of the low level functions, the latter are
-provided for those who wish to build their own classes.
-
-The new modules are not automatically imported into the numpy namespace,
-they must be explicitly brought in with an "import numpy.polynomial"
-statement.
-
-New C API
----------
-
-The following C functions have been added to the C API:
-
- #. PyArray_GetNDArrayCFeatureVersion: return the *API* version of the
- loaded numpy.
- #. PyArray_Correlate2 - like PyArray_Correlate, but implements the usual
- definition of correlation. Inputs are not swapped, and conjugate is
- taken for complex arrays.
- #. PyArray_NeighborhoodIterNew - a new iterator to iterate over a
- neighborhood of a point, with automatic boundaries handling. It is
- documented in the iterators section of the C-API reference, and you can
- find some examples in the multiarray_test.c.src file in numpy.core.
-
-New ufuncs
-----------
-
-The following ufuncs have been added to the C API:
-
- #. copysign - return the value of the first argument with the sign copied
- from the second argument.
- #. nextafter - return the next representable floating point value of the
- first argument toward the second argument.
-
-New defines
------------
-
-The alpha processor is now defined and available in numpy/npy_cpu.h. The
-failed detection of the PARISC processor has been fixed. The defines are:
-
- #. NPY_CPU_HPPA: PARISC
- #. NPY_CPU_ALPHA: Alpha
-
-Testing
--------
-
- #. deprecated decorator: this decorator may be used to avoid cluttering
- testing output while testing DeprecationWarning is effectively raised by
- the decorated test.
- #. assert_array_almost_equal_nulps: new method to compare two arrays of
- floating point values. With this function, two values are considered
- close if there are not many representable floating point values in
- between, thus being more robust than assert_array_almost_equal when the
- values fluctuate a lot.
- #. assert_array_max_ulp: raise an assertion if there are more than N
- representable numbers between two floating point values.
- #. assert_warns: raise an AssertionError if a callable does not generate a
- warning of the appropriate class, without altering the warning state.
-
-Reusing npymath
----------------
-
-In 1.3.0, we started putting portable C math routines in npymath library, so
-that people can use those to write portable extensions. Unfortunately, it was
-not possible to easily link against this library: in 1.4.0, support has been
-added to numpy.distutils so that 3rd party can reuse this library. See coremath
-documentation for more information.
-
-Improved set operations
------------------------
-
-In previous versions of NumPy some set functions (intersect1d,
-setxor1d, setdiff1d and setmember1d) could return incorrect results if
-the input arrays contained duplicate items. These now work correctly
-for input arrays with duplicates. setmember1d has been renamed to
-in1d, as with the change to accept arrays with duplicates it is
-no longer a set operation, and is conceptually similar to an
-elementwise version of the Python operator 'in'. All of these
-functions now accept the boolean keyword assume_unique. This is False
-by default, but can be set True if the input arrays are known not
-to contain duplicates, which can increase the functions' execution
-speed.
-
-Improvements
-============
-
- #. numpy import is noticeably faster (from 20 to 30 % depending on the
- platform and computer)
-
- #. The sort functions now sort nans to the end.
-
- * Real sort order is [R, nan]
- * Complex sort order is [R + Rj, R + nanj, nan + Rj, nan + nanj]
-
- Complex numbers with the same nan placements are sorted according to
- the non-nan part if it exists.
- #. The type comparison functions have been made consistent with the new
- sort order of nans. Searchsorted now works with sorted arrays
- containing nan values.
- #. Complex division has been made more resistant to overflow.
- #. Complex floor division has been made more resistant to overflow.
-
-Deprecations
-============
-
-The following functions are deprecated:
-
- #. correlate: it takes a new keyword argument old_behavior. When True (the
- default), it returns the same result as before. When False, compute the
- conventional correlation, and take the conjugate for complex arrays. The
- old behavior will be removed in NumPy 1.5, and raises a
- DeprecationWarning in 1.4.
-
- #. unique1d: use unique instead. unique1d raises a deprecation
- warning in 1.4, and will be removed in 1.5.
-
- #. intersect1d_nu: use intersect1d instead. intersect1d_nu raises
- a deprecation warning in 1.4, and will be removed in 1.5.
-
- #. setmember1d: use in1d instead. setmember1d raises a deprecation
- warning in 1.4, and will be removed in 1.5.
-
-The following raise errors:
-
- #. When operating on 0-d arrays, ``numpy.max`` and other functions accept
- only ``axis=0``, ``axis=-1`` and ``axis=None``. Using an out-of-bounds
- axes is an indication of a bug, so Numpy raises an error for these cases
- now.
-
- #. Specifying ``axis > MAX_DIMS`` is no longer allowed; Numpy raises now an
- error instead of behaving similarly as for ``axis=None``.
-
-Internal changes
-================
-
-Use C99 complex functions when available
-----------------------------------------
-
-The numpy complex types are now guaranteed to be ABI compatible with C99
-complex type, if available on the platform. Moreover, the complex ufunc now use
-the platform C99 functions instead of our own.
-
-split multiarray and umath source code
---------------------------------------
-
-The source code of multiarray and umath has been split into separate logic
-compilation units. This should make the source code more amenable for
-newcomers.
-
-Separate compilation
---------------------
-
-By default, every file of multiarray (and umath) is merged into one for
-compilation as was the case before, but if NPY_SEPARATE_COMPILATION env
-variable is set to a non-negative value, experimental individual compilation of
-each file is enabled. This makes the compile/debug cycle much faster when
-working on core numpy.
-
-Separate core math library
---------------------------
-
-New functions which have been added:
-
- * npy_copysign
- * npy_nextafter
- * npy_cpack
- * npy_creal
- * npy_cimag
- * npy_cabs
- * npy_cexp
- * npy_clog
- * npy_cpow
- * npy_csqr
- * npy_ccos
- * npy_csin
+++ /dev/null
-=========================
-NumPy 1.5.0 Release Notes
-=========================
-
-
-Highlights
-==========
-
-Python 3 compatibility
-----------------------
-
-This is the first NumPy release which is compatible with Python 3. Support for
-Python 3 and Python 2 is done from a single code base. Extensive notes on
-changes can be found at
-`<http://projects.scipy.org/numpy/browser/trunk/doc/Py3K.txt>`_.
-
-Note that the Numpy testing framework relies on nose, which does not have a
-Python 3 compatible release yet. A working Python 3 branch of nose can be found
-at `<http://bitbucket.org/jpellerin/nose3/>`_ however.
-
-Porting of SciPy to Python 3 is expected to be completed soon.
-
-:pep:`3118` compatibility
--------------------------
-
-The new buffer protocol described by PEP 3118 is fully supported in this
-version of Numpy. On Python versions >= 2.6 Numpy arrays expose the buffer
-interface, and array(), asarray() and other functions accept new-style buffers
-as input.
-
-
-New features
-============
-
-Warning on casting complex to real
-----------------------------------
-
-Numpy now emits a `numpy.ComplexWarning` when a complex number is cast
-into a real number. For example:
-
- >>> x = np.array([1,2,3])
- >>> x[:2] = np.array([1+2j, 1-2j])
- ComplexWarning: Casting complex values to real discards the imaginary part
-
-The cast indeed discards the imaginary part, and this may not be the
-intended behavior in all cases, hence the warning. This warning can be
-turned off in the standard way:
-
- >>> import warnings
- >>> warnings.simplefilter("ignore", np.ComplexWarning)
-
-Dot method for ndarrays
------------------------
-
-Ndarrays now have the dot product also as a method, which allows writing
-chains of matrix products as
-
- >>> a.dot(b).dot(c)
-
-instead of the longer alternative
-
- >>> np.dot(a, np.dot(b, c))
-
-linalg.slogdet function
------------------------
-
-The slogdet function returns the sign and logarithm of the determinant
-of a matrix. Because the determinant may involve the product of many
-small/large values, the result is often more accurate than that obtained
-by simple multiplication.
-
-new header
-----------
-
-The new header file ndarraytypes.h contains the symbols from
-ndarrayobject.h that do not depend on the PY_ARRAY_UNIQUE_SYMBOL and
-NO_IMPORT/_ARRAY macros. Broadly, these symbols are types, typedefs,
-and enumerations; the array function calls are left in
-ndarrayobject.h. This allows users to include array-related types and
-enumerations without needing to concern themselves with the macro
-expansions and their side- effects.
-
-
-Changes
-=======
-
-polynomial.polynomial
----------------------
-
-* The polyint and polyder functions now check that the specified number
- integrations or derivations is a non-negative integer. The number 0 is
- a valid value for both functions.
-* A degree method has been added to the Polynomial class.
-* A trimdeg method has been added to the Polynomial class. It operates like
- truncate except that the argument is the desired degree of the result,
- not the number of coefficients.
-* Polynomial.fit now uses None as the default domain for the fit. The default
- Polynomial domain can be specified by using [] as the domain value.
-* Weights can be used in both polyfit and Polynomial.fit
-* A linspace method has been added to the Polynomial class to ease plotting.
-* The polymulx function was added.
-
-polynomial.chebyshev
---------------------
-
-* The chebint and chebder functions now check that the specified number
- integrations or derivations is a non-negative integer. The number 0 is
- a valid value for both functions.
-* A degree method has been added to the Chebyshev class.
-* A trimdeg method has been added to the Chebyshev class. It operates like
- truncate except that the argument is the desired degree of the result,
- not the number of coefficients.
-* Chebyshev.fit now uses None as the default domain for the fit. The default
- Chebyshev domain can be specified by using [] as the domain value.
-* Weights can be used in both chebfit and Chebyshev.fit
-* A linspace method has been added to the Chebyshev class to ease plotting.
-* The chebmulx function was added.
-* Added functions for the Chebyshev points of the first and second kind.
-
-
-histogram
----------
-
-After a two years transition period, the old behavior of the histogram function
-has been phased out, and the "new" keyword has been removed.
-
-correlate
----------
-
-The old behavior of correlate was deprecated in 1.4.0, the new behavior (the
-usual definition for cross-correlation) is now the default.
+++ /dev/null
-=========================
-NumPy 1.6.0 Release Notes
-=========================
-
-This release includes several new features as well as numerous bug fixes and
-improved documentation. It is backward compatible with the 1.5.0 release, and
-supports Python 2.4 - 2.7 and 3.1 - 3.2.
-
-
-Highlights
-==========
-
-* Re-introduction of datetime dtype support to deal with dates in arrays.
-
-* A new 16-bit floating point type.
-
-* A new iterator, which improves performance of many functions.
-
-
-New features
-============
-
-New 16-bit floating point type
-------------------------------
-
-This release adds support for the IEEE 754-2008 binary16 format, available as
-the data type ``numpy.half``. Within Python, the type behaves similarly to
-`float` or `double`, and C extensions can add support for it with the exposed
-half-float API.
-
-
-New iterator
-------------
-
-A new iterator has been added, replacing the functionality of the
-existing iterator and multi-iterator with a single object and API.
-This iterator works well with general memory layouts different from
-C or Fortran contiguous, and handles both standard NumPy and
-customized broadcasting. The buffering, automatic data type
-conversion, and optional output parameters, offered by
-ufuncs but difficult to replicate elsewhere, are now exposed by this
-iterator.
-
-
-Legendre, Laguerre, Hermite, HermiteE polynomials in ``numpy.polynomial``
--------------------------------------------------------------------------
-
-Extend the number of polynomials available in the polynomial package. In
-addition, a new ``window`` attribute has been added to the classes in
-order to specify the range the ``domain`` maps to. This is mostly useful
-for the Laguerre, Hermite, and HermiteE polynomials whose natural domains
-are infinite and provides a more intuitive way to get the correct mapping
-of values without playing unnatural tricks with the domain.
-
-
-Fortran assumed shape array and size function support in ``numpy.f2py``
------------------------------------------------------------------------
-
-F2py now supports wrapping Fortran 90 routines that use assumed shape
-arrays. Before such routines could be called from Python but the
-corresponding Fortran routines received assumed shape arrays as zero
-length arrays which caused unpredicted results. Thanks to Lorenz
-Hüdepohl for pointing out the correct way to interface routines with
-assumed shape arrays.
-
-In addition, f2py supports now automatic wrapping of Fortran routines
-that use two argument ``size`` function in dimension specifications.
-
-
-Other new functions
--------------------
-
-``numpy.ravel_multi_index`` : Converts a multi-index tuple into
-an array of flat indices, applying boundary modes to the indices.
-
-``numpy.einsum`` : Evaluate the Einstein summation convention. Using the
-Einstein summation convention, many common multi-dimensional array operations
-can be represented in a simple fashion. This function provides a way compute
-such summations.
-
-``numpy.count_nonzero`` : Counts the number of non-zero elements in an array.
-
-``numpy.result_type`` and ``numpy.min_scalar_type`` : These functions expose
-the underlying type promotion used by the ufuncs and other operations to
-determine the types of outputs. These improve upon the ``numpy.common_type``
-and ``numpy.mintypecode`` which provide similar functionality but do
-not match the ufunc implementation.
-
-
-Changes
-=======
-
-``default error handling``
---------------------------
-
-The default error handling has been change from ``print`` to ``warn`` for
-all except for ``underflow``, which remains as ``ignore``.
-
-
-``numpy.distutils``
--------------------
-
-Several new compilers are supported for building Numpy: the Portland Group
-Fortran compiler on OS X, the PathScale compiler suite and the 64-bit Intel C
-compiler on Linux.
-
-
-``numpy.testing``
------------------
-
-The testing framework gained ``numpy.testing.assert_allclose``, which provides
-a more convenient way to compare floating point arrays than
-`assert_almost_equal`, `assert_approx_equal` and `assert_array_almost_equal`.
-
-
-``C API``
----------
-
-In addition to the APIs for the new iterator and half data type, a number
-of other additions have been made to the C API. The type promotion
-mechanism used by ufuncs is exposed via ``PyArray_PromoteTypes``,
-``PyArray_ResultType``, and ``PyArray_MinScalarType``. A new enumeration
-``NPY_CASTING`` has been added which controls what types of casts are
-permitted. This is used by the new functions ``PyArray_CanCastArrayTo``
-and ``PyArray_CanCastTypeTo``. A more flexible way to handle
-conversion of arbitrary python objects into arrays is exposed by
-``PyArray_GetArrayParamsFromObject``.
-
-
-Deprecated features
-===================
-
-The "normed" keyword in ``numpy.histogram`` is deprecated. Its functionality
-will be replaced by the new "density" keyword.
-
-
-Removed features
-================
-
-``numpy.fft``
--------------
-
-The functions `refft`, `refft2`, `refftn`, `irefft`, `irefft2`, `irefftn`,
-which were aliases for the same functions without the 'e' in the name, were
-removed.
-
-
-``numpy.memmap``
-----------------
-
-The `sync()` and `close()` methods of memmap were removed. Use `flush()` and
-"del memmap" instead.
-
-
-``numpy.lib``
--------------
-
-The deprecated functions ``numpy.unique1d``, ``numpy.setmember1d``,
-``numpy.intersect1d_nu`` and ``numpy.lib.ufunclike.log2`` were removed.
-
-
-``numpy.ma``
-------------
-
-Several deprecated items were removed from the ``numpy.ma`` module::
-
- * ``numpy.ma.MaskedArray`` "raw_data" method
- * ``numpy.ma.MaskedArray`` constructor "flag" keyword
- * ``numpy.ma.make_mask`` "flag" keyword
- * ``numpy.ma.allclose`` "fill_value" keyword
-
-
-``numpy.distutils``
--------------------
-
-The ``numpy.get_numpy_include`` function was removed, use ``numpy.get_include``
-instead.
+++ /dev/null
-=========================
-NumPy 1.6.1 Release Notes
-=========================
-
-This is a bugfix only release in the 1.6.x series.
-
-
-Issues Fixed
-============
-
-* #1834: einsum fails for specific shapes
-* #1837: einsum throws nan or freezes python for specific array shapes
-* #1838: object <-> structured type arrays regression
-* #1851: regression for SWIG based code in 1.6.0
-* #1863: Buggy results when operating on array copied with astype()
-* #1870: Fix corner case of object array assignment
-* #1843: Py3k: fix error with recarray
-* #1885: nditer: Error in detecting double reduction loop
-* #1874: f2py: fix --include_paths bug
-* #1749: Fix ctypes.load_library()
-* #1895/1896: iter: writeonly operands weren't always being buffered correctly
+++ /dev/null
-=========================
-NumPy 1.6.2 Release Notes
-=========================
-
-This is a bugfix release in the 1.6.x series. Due to the delay of the NumPy
-1.7.0 release, this release contains far more fixes than a regular NumPy bugfix
-release. It also includes a number of documentation and build improvements.
-
-Issues fixed
-============
-
-``numpy.core``
---------------
-
-* #2063: make unique() return consistent index
-* #1138: allow creating arrays from empty buffers or empty slices
-* #1446: correct note about correspondence vstack and concatenate
-* #1149: make argmin() work for datetime
-* #1672: fix allclose() to work for scalar inf
-* #1747: make np.median() work for 0-D arrays
-* #1776: make complex division by zero to yield inf properly
-* #1675: add scalar support for the format() function
-* #1905: explicitly check for NaNs in allclose()
-* #1952: allow floating ddof in std() and var()
-* #1948: fix regression for indexing chararrays with empty list
-* #2017: fix type hashing
-* #2046: deleting array attributes causes segfault
-* #2033: a**2.0 has incorrect type
-* #2045: make attribute/iterator_element deletions not segfault
-* #2021: fix segfault in searchsorted()
-* #2073: fix float16 __array_interface__ bug
-
-
-``numpy.lib``
--------------
-
-* #2048: break reference cycle in NpzFile
-* #1573: savetxt() now handles complex arrays
-* #1387: allow bincount() to accept empty arrays
-* #1899: fixed histogramdd() bug with empty inputs
-* #1793: fix failing npyio test under py3k
-* #1936: fix extra nesting for subarray dtypes
-* #1848: make tril/triu return the same dtype as the original array
-* #1918: use Py_TYPE to access ob_type, so it works also on Py3
-
-
-``numpy.distutils``
--------------------
-
-* #1261: change compile flag on AIX from -O5 to -O3
-* #1377: update HP compiler flags
-* #1383: provide better support for C++ code on HPUX
-* #1857: fix build for py3k + pip
-* BLD: raise a clearer warning in case of building without cleaning up first
-* BLD: follow build_ext coding convention in build_clib
-* BLD: fix up detection of Intel CPU on OS X in system_info.py
-* BLD: add support for the new X11 directory structure on Ubuntu & co.
-* BLD: add ufsparse to the libraries search path.
-* BLD: add 'pgfortran' as a valid compiler in the Portland Group
-* BLD: update version match regexp for IBM AIX Fortran compilers.
-
-
-``numpy.random``
-----------------
-
-* BUG: Use npy_intp instead of long in mtrand
-
-Changes
-=======
-
-``numpy.f2py``
---------------
-
-* ENH: Introduce new options extra_f77_compiler_args and extra_f90_compiler_args
-* BLD: Improve reporting of fcompiler value
-* BUG: Fix f2py test_kind.py test
-
-
-``numpy.poly``
---------------
-
-* ENH: Add some tests for polynomial printing
-* ENH: Add companion matrix functions
-* DOC: Rearrange the polynomial documents
-* BUG: Fix up links to classes
-* DOC: Add version added to some of the polynomial package modules
-* DOC: Document xxxfit functions in the polynomial package modules
-* BUG: The polynomial convenience classes let different types interact
-* DOC: Document the use of the polynomial convenience classes
-* DOC: Improve numpy reference documentation of polynomial classes
-* ENH: Improve the computation of polynomials from roots
-* STY: Code cleanup in polynomial [*]fromroots functions
-* DOC: Remove references to cast and NA, which were added in 1.7
+++ /dev/null
-=========================
-NumPy 1.7.0 Release Notes
-=========================
-
-This release includes several new features as well as numerous bug fixes and
-refactorings. It supports Python 2.4 - 2.7 and 3.1 - 3.3 and is the last
-release that supports Python 2.4 - 2.5.
-
-Highlights
-==========
-
-* ``where=`` parameter to ufuncs (allows the use of boolean arrays to choose
- where a computation should be done)
-* ``vectorize`` improvements (added 'excluded' and 'cache' keyword, general
- cleanup and bug fixes)
-* ``numpy.random.choice`` (random sample generating function)
-
-
-Compatibility notes
-===================
-
-In a future version of numpy, the functions np.diag, np.diagonal, and the
-diagonal method of ndarrays will return a view onto the original array,
-instead of producing a copy as they do now. This makes a difference if you
-write to the array returned by any of these functions. To facilitate this
-transition, numpy 1.7 produces a FutureWarning if it detects that you may
-be attempting to write to such an array. See the documentation for
-np.diagonal for details.
-
-Similar to np.diagonal above, in a future version of numpy, indexing a
-record array by a list of field names will return a view onto the original
-array, instead of producing a copy as they do now. As with np.diagonal,
-numpy 1.7 produces a FutureWarning if it detects that you may be attempting
-to write to such an array. See the documentation for array indexing for
-details.
-
-In a future version of numpy, the default casting rule for UFunc out=
-parameters will be changed from 'unsafe' to 'same_kind'. (This also applies
-to in-place operations like a += b, which is equivalent to np.add(a, b,
-out=a).) Most usages which violate the 'same_kind' rule are likely bugs, so
-this change may expose previously undetected errors in projects that depend
-on NumPy. In this version of numpy, such usages will continue to succeed,
-but will raise a DeprecationWarning.
-
-Full-array boolean indexing has been optimized to use a different,
-optimized code path. This code path should produce the same results,
-but any feedback about changes to your code would be appreciated.
-
-Attempting to write to a read-only array (one with ``arr.flags.writeable``
-set to ``False``) used to raise either a RuntimeError, ValueError, or
-TypeError inconsistently, depending on which code path was taken. It now
-consistently raises a ValueError.
-
-The <ufunc>.reduce functions evaluate some reductions in a different order
-than in previous versions of NumPy, generally providing higher performance.
-Because of the nature of floating-point arithmetic, this may subtly change
-some results, just as linking NumPy to a different BLAS implementations
-such as MKL can.
-
-If upgrading from 1.5, then generally in 1.6 and 1.7 there have been
-substantial code added and some code paths altered, particularly in the
-areas of type resolution and buffered iteration over universal functions.
-This might have an impact on your code particularly if you relied on
-accidental behavior in the past.
-
-New features
-============
-
-Reduction UFuncs Generalize axis= Parameter
--------------------------------------------
-
-Any ufunc.reduce function call, as well as other reductions like sum, prod,
-any, all, max and min support the ability to choose a subset of the axes to
-reduce over. Previously, one could say axis=None to mean all the axes or
-axis=# to pick a single axis. Now, one can also say axis=(#,#) to pick a
-list of axes for reduction.
-
-Reduction UFuncs New keepdims= Parameter
-----------------------------------------
-
-There is a new keepdims= parameter, which if set to True, doesn't throw
-away the reduction axes but instead sets them to have size one. When this
-option is set, the reduction result will broadcast correctly to the
-original operand which was reduced.
-
-Datetime support
-----------------
-
-.. note:: The datetime API is *experimental* in 1.7.0, and may undergo changes
- in future versions of NumPy.
-
-There have been a lot of fixes and enhancements to datetime64 compared
-to NumPy 1.6:
-
-* the parser is quite strict about only accepting ISO 8601 dates, with a few
- convenience extensions
-* converts between units correctly
-* datetime arithmetic works correctly
-* business day functionality (allows the datetime to be used in contexts where
- only certain days of the week are valid)
-
-The notes in `doc/source/reference/arrays.datetime.rst <https://github.com/numpy/numpy/blob/maintenance/1.7.x/doc/source/reference/arrays.datetime.rst>`_
-(also available in the online docs at `arrays.datetime.html
-<https://docs.scipy.org/doc/numpy/reference/arrays.datetime.html>`_) should be
-consulted for more details.
-
-Custom formatter for printing arrays
-------------------------------------
-
-See the new ``formatter`` parameter of the ``numpy.set_printoptions``
-function.
-
-New function numpy.random.choice
---------------------------------
-
-A generic sampling function has been added which will generate samples from
-a given array-like. The samples can be with or without replacement, and
-with uniform or given non-uniform probabilities.
-
-New function isclose
---------------------
-
-Returns a boolean array where two arrays are element-wise equal within a
-tolerance. Both relative and absolute tolerance can be specified.
-
-Preliminary multi-dimensional support in the polynomial package
----------------------------------------------------------------
-
-Axis keywords have been added to the integration and differentiation
-functions and a tensor keyword was added to the evaluation functions.
-These additions allow multi-dimensional coefficient arrays to be used in
-those functions. New functions for evaluating 2-D and 3-D coefficient
-arrays on grids or sets of points were added together with 2-D and 3-D
-pseudo-Vandermonde matrices that can be used for fitting.
-
-
-Ability to pad rank-n arrays
-----------------------------
-
-A pad module containing functions for padding n-dimensional arrays has been
-added. The various private padding functions are exposed as options to a
-public 'pad' function. Example::
-
- pad(a, 5, mode='mean')
-
-Current modes are ``constant``, ``edge``, ``linear_ramp``, ``maximum``,
-``mean``, ``median``, ``minimum``, ``reflect``, ``symmetric``, ``wrap``, and
-``<function>``.
-
-
-New argument to searchsorted
-----------------------------
-
-The function searchsorted now accepts a 'sorter' argument that is a
-permutation array that sorts the array to search.
-
-Build system
-------------
-
-Added experimental support for the AArch64 architecture.
-
-C API
------
-
-New function ``PyArray_RequireWriteable`` provides a consistent interface
-for checking array writeability -- any C code which works with arrays whose
-WRITEABLE flag is not known to be True a priori, should make sure to call
-this function before writing.
-
-NumPy C Style Guide added (``doc/C_STYLE_GUIDE.rst.txt``).
-
-Changes
-=======
-
-General
--------
-
-The function np.concatenate tries to match the layout of its input arrays.
-Previously, the layout did not follow any particular reason, and depended
-in an undesirable way on the particular axis chosen for concatenation. A
-bug was also fixed which silently allowed out of bounds axis arguments.
-
-The ufuncs logical_or, logical_and, and logical_not now follow Python's
-behavior with object arrays, instead of trying to call methods on the
-objects. For example the expression (3 and 'test') produces the string
-'test', and now np.logical_and(np.array(3, 'O'), np.array('test', 'O'))
-produces 'test' as well.
-
-The ``.base`` attribute on ndarrays, which is used on views to ensure that the
-underlying array owning the memory is not deallocated prematurely, now
-collapses out references when you have a view-of-a-view. For example::
-
- a = np.arange(10)
- b = a[1:]
- c = b[1:]
-
-In numpy 1.6, ``c.base`` is ``b``, and ``c.base.base`` is ``a``. In numpy 1.7,
-``c.base`` is ``a``.
-
-To increase backwards compatibility for software which relies on the old
-behaviour of ``.base``, we only 'skip over' objects which have exactly the same
-type as the newly created view. This makes a difference if you use ``ndarray``
-subclasses. For example, if we have a mix of ``ndarray`` and ``matrix`` objects
-which are all views on the same original ``ndarray``::
-
- a = np.arange(10)
- b = np.asmatrix(a)
- c = b[0, 1:]
- d = c[0, 1:]
-
-then ``d.base`` will be ``b``. This is because ``d`` is a ``matrix`` object,
-and so the collapsing process only continues so long as it encounters other
-``matrix`` objects. It considers ``c``, ``b``, and ``a`` in that order, and
-``b`` is the last entry in that list which is a ``matrix`` object.
-
-Casting Rules
--------------
-
-Casting rules have undergone some changes in corner cases, due to the
-NA-related work. In particular for combinations of scalar+scalar:
-
-* the `longlong` type (`q`) now stays `longlong` for operations with any other
- number (`? b h i l q p B H I`), previously it was cast as `int_` (`l`). The
- `ulonglong` type (`Q`) now stays as `ulonglong` instead of `uint` (`L`).
-
-* the `timedelta64` type (`m`) can now be mixed with any integer type (`b h i l
- q p B H I L Q P`), previously it raised `TypeError`.
-
-For array + scalar, the above rules just broadcast except the case when
-the array and scalars are unsigned/signed integers, then the result gets
-converted to the array type (of possibly larger size) as illustrated by the
-following examples::
-
- >>> (np.zeros((2,), dtype=np.uint8) + np.int16(257)).dtype
- dtype('uint16')
- >>> (np.zeros((2,), dtype=np.int8) + np.uint16(257)).dtype
- dtype('int16')
- >>> (np.zeros((2,), dtype=np.int16) + np.uint32(2**17)).dtype
- dtype('int32')
-
-Whether the size gets increased depends on the size of the scalar, for
-example::
-
- >>> (np.zeros((2,), dtype=np.uint8) + np.int16(255)).dtype
- dtype('uint8')
- >>> (np.zeros((2,), dtype=np.uint8) + np.int16(256)).dtype
- dtype('uint16')
-
-Also a ``complex128`` scalar + ``float32`` array is cast to ``complex64``.
-
-In NumPy 1.7 the `datetime64` type (`M`) must be constructed by explicitly
-specifying the type as the second argument (e.g. ``np.datetime64(2000, 'Y')``).
-
-
-Deprecations
-============
-
-General
--------
-
-Specifying a custom string formatter with a `_format` array attribute is
-deprecated. The new `formatter` keyword in ``numpy.set_printoptions`` or
-``numpy.array2string`` can be used instead.
-
-The deprecated imports in the polynomial package have been removed.
-
-``concatenate`` now raises DepractionWarning for 1D arrays if ``axis != 0``.
-Versions of numpy < 1.7.0 ignored axis argument value for 1D arrays. We
-allow this for now, but in due course we will raise an error.
-
-C-API
------
-
-Direct access to the fields of PyArrayObject* has been deprecated. Direct
-access has been recommended against for many releases. Expect similar
-deprecations for PyArray_Descr* and other core objects in the future as
-preparation for NumPy 2.0.
-
-The macros in old_defines.h are deprecated and will be removed in the next
-major release (>= 2.0). The sed script tools/replace_old_macros.sed can be
-used to replace these macros with the newer versions.
-
-You can test your code against the deprecated C API by adding a line
-composed of ``#define NPY_NO_DEPRECATED_API`` and the target version number,
-such as ``NPY_1_7_API_VERSION``, before including any NumPy headers.
-
-The ``NPY_CHAR`` member of the ``NPY_TYPES`` enum is deprecated and will be
-removed in NumPy 1.8. See the discussion at
-`gh-2801 <https://github.com/numpy/numpy/issues/2801>`_ for more details.
+++ /dev/null
-=========================
-NumPy 1.7.1 Release Notes
-=========================
-
-This is a bugfix only release in the 1.7.x series.
-It supports Python 2.4 - 2.7 and 3.1 - 3.3 and is the last series that
-supports Python 2.4 - 2.5.
-
-
-Issues fixed
-============
-
-* gh-2973: Fix `1` is printed during numpy.test()
-* gh-2983: BUG: gh-2969: Backport memory leak fix 80b3a34.
-* gh-3007: Backport gh-3006
-* gh-2984: Backport fix complex polynomial fit
-* gh-2982: BUG: Make nansum work with booleans.
-* gh-2985: Backport large sort fixes
-* gh-3039: Backport object take
-* gh-3105: Backport nditer fix op axes initialization
-* gh-3108: BUG: npy-pkg-config ini files were missing after Bento build.
-* gh-3124: BUG: PyArray_LexSort allocates too much temporary memory.
-* gh-3131: BUG: Exported f2py_size symbol prevents linking multiple f2py modules.
-* gh-3117: Backport gh-2992
-* gh-3135: DOC: Add mention of PyArray_SetBaseObject stealing a reference
-* gh-3134: DOC: Fix typo in fft docs (the indexing variable is 'm', not 'n').
-* gh-3136: Backport #3128
+++ /dev/null
-=========================
-NumPy 1.7.2 Release Notes
-=========================
-
-This is a bugfix only release in the 1.7.x series.
-It supports Python 2.4 - 2.7 and 3.1 - 3.3 and is the last series that
-supports Python 2.4 - 2.5.
-
-
-Issues fixed
-============
-
-* gh-3153: Do not reuse nditer buffers when not filled enough
-* gh-3192: f2py crashes with UnboundLocalError exception
-* gh-442: Concatenate with axis=None now requires equal number of array elements
-* gh-2485: Fix for astype('S') string truncate issue
-* gh-3312: bug in count_nonzero
-* gh-2684: numpy.ma.average casts complex to float under certain conditions
-* gh-2403: masked array with named components does not behave as expected
-* gh-2495: np.ma.compress treated inputs in wrong order
-* gh-576: add __len__ method to ma.mvoid
-* gh-3364: reduce performance regression of mmap slicing
-* gh-3421: fix non-swapping strided copies in GetStridedCopySwap
-* gh-3373: fix small leak in datetime metadata initialization
-* gh-2791: add platform specific python include directories to search paths
-* gh-3168: fix undefined function and add integer divisions
-* gh-3301: memmap does not work with TemporaryFile in python3
-* gh-3057: distutils.misc_util.get_shared_lib_extension returns wrong debug extension
-* gh-3472: add module extensions to load_library search list
-* gh-3324: Make comparison function (gt, ge, ...) respect __array_priority__
-* gh-3497: np.insert behaves incorrectly with argument 'axis=-1'
-* gh-3541: make preprocessor tests consistent in halffloat.c
-* gh-3458: array_ass_boolean_subscript() writes 'non-existent' data to array
-* gh-2892: Regression in ufunc.reduceat with zero-sized index array
-* gh-3608: Regression when filling struct from tuple
-* gh-3701: add support for Python 3.4 ast.NameConstant
-* gh-3712: do not assume that GIL is enabled in xerbla
-* gh-3712: fix LAPACK error handling in lapack_litemodule
-* gh-3728: f2py fix decref on wrong object
-* gh-3743: Hash changed signature in Python 3.3
-* gh-3793: scalar int hashing broken on 64 bit python3
-* gh-3160: SandboxViolation easyinstalling 1.7.0 on Mac OS X 10.8.3
-* gh-3871: npy_math.h has invalid isinf for Solaris with SUNWspro12.2
-* gh-2561: Disable check for oldstyle classes in python3
-* gh-3900: Ensure NotImplemented is passed on in MaskedArray ufunc's
-* gh-2052: del scalar subscript causes segfault
-* gh-3832: fix a few uninitialized uses and memleaks
-* gh-3971: f2py changed string.lowercase to string.ascii_lowercase for python3
-* gh-3480: numpy.random.binomial raised ValueError for n == 0
-* gh-3992: hypot(inf, 0) shouldn't raise a warning, hypot(inf, inf) wrong result
-* gh-4018: Segmentation fault dealing with very large arrays
-* gh-4094: fix NaT handling in _strided_to_strided_string_to_datetime
-* gh-4051: fix uninitialized use in _strided_to_strided_string_to_datetime
-* gh-4123: lexsort segfault
-* gh-4141: Fix a few issues that show up with python 3.4b1
+++ /dev/null
-=========================
-NumPy 1.8.0 Release Notes
-=========================
-
-This release supports Python 2.6 -2.7 and 3.2 - 3.3.
-
-
-Highlights
-==========
-
-
-* New, no 2to3, Python 2 and Python 3 are supported by a common code base.
-* New, gufuncs for linear algebra, enabling operations on stacked arrays.
-* New, inplace fancy indexing for ufuncs with the ``.at`` method.
-* New, ``partition`` function, partial sorting via selection for fast median.
-* New, ``nanmean``, ``nanvar``, and ``nanstd`` functions skipping NaNs.
-* New, ``full`` and ``full_like`` functions to create value initialized arrays.
-* New, ``PyUFunc_RegisterLoopForDescr``, better ufunc support for user dtypes.
-* Numerous performance improvements in many areas.
-
-
-Dropped Support
-===============
-
-
-Support for Python versions 2.4 and 2.5 has been dropped,
-
-Support for SCons has been removed.
-
-
-Future Changes
-==============
-
-
-The Datetime64 type remains experimental in this release. In 1.9 there will
-probably be some changes to make it more useable.
-
-The diagonal method currently returns a new array and raises a
-FutureWarning. In 1.9 it will return a readonly view.
-
-Multiple field selection from an array of structured type currently
-returns a new array and raises a FutureWarning. In 1.9 it will return a
-readonly view.
-
-The numpy/oldnumeric and numpy/numarray compatibility modules will be
-removed in 1.9.
-
-
-Compatibility notes
-===================
-
-
-The doc/sphinxext content has been moved into its own github repository,
-and is included in numpy as a submodule. See the instructions in
-doc/HOWTO_BUILD_DOCS.rst.txt for how to access the content.
-
-.. _numpydoc: https://github.com/numpy/numpydoc
-
-The hash function of numpy.void scalars has been changed. Previously the
-pointer to the data was hashed as an integer. Now, the hash function uses
-the tuple-hash algorithm to combine the hash functions of the elements of
-the scalar, but only if the scalar is read-only.
-
-Numpy has switched its build system to using 'separate compilation' by
-default. In previous releases this was supported, but not default. This
-should produce the same results as the old system, but if you're trying to
-do something complicated like link numpy statically or using an unusual
-compiler, then it's possible you will encounter problems. If so, please
-file a bug and as a temporary workaround you can re-enable the old build
-system by exporting the shell variable NPY_SEPARATE_COMPILATION=0.
-
-For the AdvancedNew iterator the ``oa_ndim`` flag should now be -1 to indicate
-that no ``op_axes`` and ``itershape`` are passed in. The ``oa_ndim == 0``
-case, now indicates a 0-D iteration and ``op_axes`` being NULL and the old
-usage is deprecated. This does not effect the ``NpyIter_New`` or
-``NpyIter_MultiNew`` functions.
-
-The functions nanargmin and nanargmax now return np.iinfo['intp'].min for
-the index in all-NaN slices. Previously the functions would raise a ValueError
-for array returns and NaN for scalar returns.
-
-NPY_RELAXED_STRIDES_CHECKING
-----------------------------
-There is a new compile time environment variable
-``NPY_RELAXED_STRIDES_CHECKING``. If this variable is set to 1, then
-numpy will consider more arrays to be C- or F-contiguous -- for
-example, it becomes possible to have a column vector which is
-considered both C- and F-contiguous simultaneously. The new definition
-is more accurate, allows for faster code that makes fewer unnecessary
-copies, and simplifies numpy's code internally. However, it may also
-break third-party libraries that make too-strong assumptions about the
-stride values of C- and F-contiguous arrays. (It is also currently
-known that this breaks Cython code using memoryviews, which will be
-fixed in Cython.) THIS WILL BECOME THE DEFAULT IN A FUTURE RELEASE, SO
-PLEASE TEST YOUR CODE NOW AGAINST NUMPY BUILT WITH::
-
- NPY_RELAXED_STRIDES_CHECKING=1 python setup.py install
-
-You can check whether NPY_RELAXED_STRIDES_CHECKING is in effect by
-running::
-
- np.ones((10, 1), order="C").flags.f_contiguous
-
-This will be ``True`` if relaxed strides checking is enabled, and
-``False`` otherwise. The typical problem we've seen so far is C code
-that works with C-contiguous arrays, and assumes that the itemsize can
-be accessed by looking at the last element in the ``PyArray_STRIDES(arr)``
-array. When relaxed strides are in effect, this is not true (and in
-fact, it never was true in some corner cases). Instead, use
-``PyArray_ITEMSIZE(arr)``.
-
-For more information check the "Internal memory layout of an ndarray"
-section in the documentation.
-
-Binary operations with non-arrays as second argument
-----------------------------------------------------
-Binary operations of the form ``<array-or-subclass> * <non-array-subclass>``
-where ``<non-array-subclass>`` declares an ``__array_priority__`` higher than
-that of ``<array-or-subclass>`` will now unconditionally return
-*NotImplemented*, giving ``<non-array-subclass>`` a chance to handle the
-operation. Previously, `NotImplemented` would only be returned if
-``<non-array-subclass>`` actually implemented the reversed operation, and after
-a (potentially expensive) array conversion of ``<non-array-subclass>`` had been
-attempted. (`bug <https://github.com/numpy/numpy/issues/3375>`_, `pull request
-<https://github.com/numpy/numpy/pull/3501>`_)
-
-Function `median` used with `overwrite_input` only partially sorts array
-------------------------------------------------------------------------
-If `median` is used with `overwrite_input` option the input array will now only
-be partially sorted instead of fully sorted.
-
-Fix to financial.npv
---------------------
-The npv function had a bug. Contrary to what the documentation stated, it
-summed from indexes ``1`` to ``M`` instead of from ``0`` to ``M - 1``. The
-fix changes the returned value. The mirr function called the npv function,
-but worked around the problem, so that was also fixed and the return value
-of the mirr function remains unchanged.
-
-Runtime warnings when comparing NaN numbers
--------------------------------------------
-Comparing ``NaN`` floating point numbers now raises the ``invalid`` runtime
-warning. If a ``NaN`` is expected the warning can be ignored using np.errstate.
-E.g.::
-
- with np.errstate(invalid='ignore'):
- operation()
-
-
-New Features
-============
-
-
-Support for linear algebra on stacked arrays
---------------------------------------------
-The gufunc machinery is now used for np.linalg, allowing operations on
-stacked arrays and vectors. For example::
-
- >>> a
- array([[[ 1., 1.],
- [ 0., 1.]],
-
- [[ 1., 1.],
- [ 0., 1.]]])
-
- >>> np.linalg.inv(a)
- array([[[ 1., -1.],
- [ 0., 1.]],
-
- [[ 1., -1.],
- [ 0., 1.]]])
-
-In place fancy indexing for ufuncs
-----------------------------------
-The function ``at`` has been added to ufunc objects to allow in place
-ufuncs with no buffering when fancy indexing is used. For example, the
-following will increment the first and second items in the array, and will
-increment the third item twice: ``numpy.add.at(arr, [0, 1, 2, 2], 1)``
-
-This is what many have mistakenly thought ``arr[[0, 1, 2, 2]] += 1`` would do,
-but that does not work as the incremented value of ``arr[2]`` is simply copied
-into the third slot in ``arr`` twice, not incremented twice.
-
-New functions `partition` and `argpartition`
---------------------------------------------
-New functions to partially sort arrays via a selection algorithm.
-
-A ``partition`` by index ``k`` moves the ``k`` smallest element to the front of
-an array. All elements before ``k`` are then smaller or equal than the value
-in position ``k`` and all elements following ``k`` are then greater or equal
-than the value in position ``k``. The ordering of the values within these
-bounds is undefined.
-A sequence of indices can be provided to sort all of them into their sorted
-position at once iterative partitioning.
-This can be used to efficiently obtain order statistics like median or
-percentiles of samples.
-``partition`` has a linear time complexity of ``O(n)`` while a full sort has
-``O(n log(n))``.
-
-New functions `nanmean`, `nanvar` and `nanstd`
-----------------------------------------------
-New nan aware statistical functions are added. In these functions the
-results are what would be obtained if nan values were omitted from all
-computations.
-
-New functions `full` and `full_like`
-------------------------------------
-New convenience functions to create arrays filled with a specific value;
-complementary to the existing `zeros` and `zeros_like` functions.
-
-IO compatibility with large files
----------------------------------
-Large NPZ files >2GB can be loaded on 64-bit systems.
-
-Building against OpenBLAS
--------------------------
-It is now possible to build numpy against OpenBLAS by editing site.cfg.
-
-New constant
-------------
-Euler's constant is now exposed in numpy as euler_gamma.
-
-New modes for qr
-----------------
-New modes 'complete', 'reduced', and 'raw' have been added to the qr
-factorization and the old 'full' and 'economic' modes are deprecated.
-The 'reduced' mode replaces the old 'full' mode and is the default as was
-the 'full' mode, so backward compatibility can be maintained by not
-specifying the mode.
-
-The 'complete' mode returns a full dimensional factorization, which can be
-useful for obtaining a basis for the orthogonal complement of the range
-space. The 'raw' mode returns arrays that contain the Householder
-reflectors and scaling factors that can be used in the future to apply q
-without needing to convert to a matrix. The 'economic' mode is simply
-deprecated, there isn't much use for it and it isn't any more efficient
-than the 'raw' mode.
-
-New `invert` argument to `in1d`
--------------------------------
-The function `in1d` now accepts a `invert` argument which, when `True`,
-causes the returned array to be inverted.
-
-Advanced indexing using `np.newaxis`
-------------------------------------
-It is now possible to use `np.newaxis`/`None` together with index
-arrays instead of only in simple indices. This means that
-``array[np.newaxis, [0, 1]]`` will now work as expected and select the first
-two rows while prepending a new axis to the array.
-
-
-C-API
------
-New ufuncs can now be registered with builtin input types and a custom
-output type. Before this change, NumPy wouldn't be able to find the right
-ufunc loop function when the ufunc was called from Python, because the ufunc
-loop signature matching logic wasn't looking at the output operand type.
-Now the correct ufunc loop is found, as long as the user provides an output
-argument with the correct output type.
-
-runtests.py
------------
-A simple test runner script ``runtests.py`` was added. It also builds Numpy via
-``setup.py build`` and can be used to run tests easily during development.
-
-
-Improvements
-============
-
-IO performance improvements
----------------------------
-Performance in reading large files was improved by chunking (see also IO compatibility).
-
-Performance improvements to `pad`
----------------------------------
-The `pad` function has a new implementation, greatly improving performance for
-all inputs except `mode=<function>` (retained for backwards compatibility).
-Scaling with dimensionality is dramatically improved for rank >= 4.
-
-Performance improvements to `isnan`, `isinf`, `isfinite` and `byteswap`
------------------------------------------------------------------------
-`isnan`, `isinf`, `isfinite` and `byteswap` have been improved to take
-advantage of compiler builtins to avoid expensive calls to libc.
-This improves performance of these operations by about a factor of two on gnu
-libc systems.
-
-Performance improvements via SSE2 vectorization
------------------------------------------------
-Several functions have been optimized to make use of SSE2 CPU SIMD instructions.
-
-* Float32 and float64:
- * base math (`add`, `subtract`, `divide`, `multiply`)
- * `sqrt`
- * `minimum/maximum`
- * `absolute`
-* Bool:
- * `logical_or`
- * `logical_and`
- * `logical_not`
-
-This improves performance of these operations up to 4x/2x for float32/float64
-and up to 10x for bool depending on the location of the data in the CPU caches.
-The performance gain is greatest for in-place operations.
-
-In order to use the improved functions the SSE2 instruction set must be enabled
-at compile time. It is enabled by default on x86_64 systems. On x86_32 with a
-capable CPU it must be enabled by passing the appropriate flag to the CFLAGS
-build variable (-msse2 with gcc).
-
-Performance improvements to `median`
-------------------------------------
-`median` is now implemented in terms of `partition` instead of `sort` which
-reduces its time complexity from O(n log(n)) to O(n).
-If used with the `overwrite_input` option the array will now only be partially
-sorted instead of fully sorted.
-
-
-Overrideable operand flags in ufunc C-API
------------------------------------------
-When creating a ufunc, the default ufunc operand flags can be overridden
-via the new op_flags attribute of the ufunc object. For example, to set
-the operand flag for the first input to read/write:
-
-PyObject \*ufunc = PyUFunc_FromFuncAndData(...);
-ufunc->op_flags[0] = NPY_ITER_READWRITE;
-
-This allows a ufunc to perform an operation in place. Also, global nditer flags
-can be overridden via the new iter_flags attribute of the ufunc object.
-For example, to set the reduce flag for a ufunc:
-
-ufunc->iter_flags = NPY_ITER_REDUCE_OK;
-
-
-Changes
-=======
-
-
-General
--------
-The function np.take now allows 0-d arrays as indices.
-
-The separate compilation mode is now enabled by default.
-
-Several changes to np.insert and np.delete:
-
-* Previously, negative indices and indices that pointed past the end of
- the array were simply ignored. Now, this will raise a Future or Deprecation
- Warning. In the future they will be treated like normal indexing treats
- them -- negative indices will wrap around, and out-of-bound indices will
- generate an error.
-* Previously, boolean indices were treated as if they were integers (always
- referring to either the 0th or 1st item in the array). In the future, they
- will be treated as masks. In this release, they raise a FutureWarning
- warning of this coming change.
-* In Numpy 1.7. np.insert already allowed the syntax
- `np.insert(arr, 3, [1,2,3])` to insert multiple items at a single position.
- In Numpy 1.8. this is also possible for `np.insert(arr, [3], [1, 2, 3])`.
-
-Padded regions from np.pad are now correctly rounded, not truncated.
-
-C-API Array Additions
----------------------
-Four new functions have been added to the array C-API.
-
-* PyArray_Partition
-* PyArray_ArgPartition
-* PyArray_SelectkindConverter
-* PyDataMem_NEW_ZEROED
-
-C-API Ufunc Additions
----------------------
-One new function has been added to the ufunc C-API that allows to register
-an inner loop for user types using the descr.
-
-* PyUFunc_RegisterLoopForDescr
-
-C-API Developer Improvements
-----------------------------
-The ``PyArray_Type`` instance creation function ``tp_new`` now
-uses ``tp_basicsize`` to determine how much memory to allocate.
-In previous releases only ``sizeof(PyArrayObject)`` bytes of
-memory were allocated, often requiring C-API subtypes to
-reimplement ``tp_new``.
-
-Deprecations
-============
-
-The 'full' and 'economic' modes of qr factorization are deprecated.
-
-General
--------
-The use of non-integer for indices and most integer arguments has been
-deprecated. Previously float indices and function arguments such as axes or
-shapes were truncated to integers without warning. For example
-`arr.reshape(3., -1)` or `arr[0.]` will trigger a deprecation warning in
-NumPy 1.8., and in some future version of NumPy they will raise an error.
-
-
-Authors
-=======
-
-This release contains work by the following people who contributed at least
-one patch to this release. The names are in alphabetical order by first name:
-
-* 87
-* Adam Ginsburg +
-* Adam Griffiths +
-* Alexander Belopolsky +
-* Alex Barth +
-* Alex Ford +
-* Andreas Hilboll +
-* Andreas Kloeckner +
-* Andreas Schwab +
-* Andrew Horton +
-* argriffing +
-* Arink Verma +
-* Bago Amirbekian +
-* Bartosz Telenczuk +
-* bebert218 +
-* Benjamin Root +
-* Bill Spotz +
-* Bradley M. Froehle
-* Carwyn Pelley +
-* Charles Harris
-* Chris
-* Christian Brueffer +
-* Christoph Dann +
-* Christoph Gohlke
-* Dan Hipschman +
-* Daniel +
-* Dan Miller +
-* daveydave400 +
-* David Cournapeau
-* David Warde-Farley
-* Denis Laxalde
-* dmuellner +
-* Edward Catmur +
-* Egor Zindy +
-* endolith
-* Eric Firing
-* Eric Fode
-* Eric Moore +
-* Eric Price +
-* Fazlul Shahriar +
-* Félix Hartmann +
-* Fernando Perez
-* Frank B +
-* Frank Breitling +
-* Frederic
-* Gabriel
-* GaelVaroquaux
-* Guillaume Gay +
-* Han Genuit
-* HaroldMills +
-* hklemm +
-* jamestwebber +
-* Jason Madden +
-* Jay Bourque
-* jeromekelleher +
-* Jesús Gómez +
-* jmozmoz +
-* jnothman +
-* Johannes Schönberger +
-* John Benediktsson +
-* John Salvatier +
-* John Stechschulte +
-* Jonathan Waltman +
-* Joon Ro +
-* Jos de Kloe +
-* Joseph Martinot-Lagarde +
-* Josh Warner (Mac) +
-* Jostein Bø Fløystad +
-* Juan Luis Cano RodrÃguez +
-* Julian Taylor +
-* Julien Phalip +
-* K.-Michael Aye +
-* Kumar Appaiah +
-* Lars Buitinck
-* Leon Weber +
-* Luis Pedro Coelho
-* Marcin Juszkiewicz
-* Mark Wiebe
-* Marten van Kerkwijk +
-* Martin Baeuml +
-* Martin Spacek
-* Martin Teichmann +
-* Matt Davis +
-* Matthew Brett
-* Maximilian Albert +
-* m-d-w +
-* Michael Droettboom
-* mwtoews +
-* Nathaniel J. Smith
-* Nicolas Scheffer +
-* Nils Werner +
-* ochoadavid +
-* OndÅ™ej ÄŒertÃk
-* ovillellas +
-* Paul Ivanov
-* Pauli Virtanen
-* peterjc
-* Ralf Gommers
-* Raul Cota +
-* Richard Hattersley +
-* Robert Costa +
-* Robert Kern
-* Rob Ruana +
-* Ronan Lamy
-* Sandro Tosi
-* Sascha Peilicke +
-* Sebastian Berg
-* Skipper Seabold
-* Stefan van der Walt
-* Steve +
-* Takafumi Arakaki +
-* Thomas Robitaille +
-* Tomas Tomecek +
-* Travis E. Oliphant
-* Valentin Haenel
-* Vladimir Rutsky +
-* Warren Weckesser
-* Yaroslav Halchenko
-* Yury V. Zaytsev +
-
-A total of 119 people contributed to this release.
-People with a "+" by their names contributed a patch for the first time.
+++ /dev/null
-=========================
-NumPy 1.8.1 Release Notes
-=========================
-
-This is a bugfix only release in the 1.8.x series.
-
-
-Issues fixed
-============
-
-* gh-4276: Fix mean, var, std methods for object arrays
-* gh-4262: remove insecure mktemp usage
-* gh-2385: absolute(complex(inf)) raises invalid warning in python3
-* gh-4024: Sequence assignment doesn't raise exception on shape mismatch
-* gh-4027: Fix chunked reading of strings longer than BUFFERSIZE
-* gh-4109: Fix object scalar return type of 0-d array indices
-* gh-4018: fix missing check for memory allocation failure in ufuncs
-* gh-4156: high order linalg.norm discards imaginary elements of complex arrays
-* gh-4144: linalg: norm fails on longdouble, signed int
-* gh-4094: fix NaT handling in _strided_to_strided_string_to_datetime
-* gh-4051: fix uninitialized use in _strided_to_strided_string_to_datetime
-* gh-4093: Loading compressed .npz file fails under Python 2.6.6
-* gh-4138: segfault with non-native endian memoryview in python 3.4
-* gh-4123: Fix missing NULL check in lexsort
-* gh-4170: fix native-only long long check in memoryviews
-* gh-4187: Fix large file support on 32 bit
-* gh-4152: fromfile: ensure file handle positions are in sync in python3
-* gh-4176: clang compatibility: Typos in conversion_utils
-* gh-4223: Fetching a non-integer item caused array return
-* gh-4197: fix minor memory leak in memoryview failure case
-* gh-4206: fix build with single-threaded python
-* gh-4220: add versionadded:: 1.8.0 to ufunc.at docstring
-* gh-4267: improve handling of memory allocation failure
-* gh-4267: fix use of capi without gil in ufunc.at
-* gh-4261: Detect vendor versions of GNU Compilers
-* gh-4253: IRR was returning nan instead of valid negative answer
-* gh-4254: fix unnecessary byte order flag change for byte arrays
-* gh-3263: numpy.random.shuffle clobbers mask of a MaskedArray
-* gh-4270: np.random.shuffle not work with flexible dtypes
-* gh-3173: Segmentation fault when 'size' argument to random.multinomial
-* gh-2799: allow using unique with lists of complex
-* gh-3504: fix linspace truncation for integer array scalar
-* gh-4191: get_info('openblas') does not read libraries key
-* gh-3348: Access violation in _descriptor_from_pep3118_format
-* gh-3175: segmentation fault with numpy.array() from bytearray
-* gh-4266: histogramdd - wrong result for entries very close to last boundary
-* gh-4408: Fix stride_stricks.as_strided function for object arrays
-* gh-4225: fix log1p and exmp1 return for np.inf on windows compiler builds
-* gh-4359: Fix infinite recursion in str.format of flex arrays
-* gh-4145: Incorrect shape of broadcast result with the exponent operator
-* gh-4483: Fix commutativity of {dot,multiply,inner}(scalar, matrix_of_objs)
-* gh-4466: Delay npyiter size check when size may change
-* gh-4485: Buffered stride was erroneously marked fixed
-* gh-4354: byte_bounds fails with datetime dtypes
-* gh-4486: segfault/error converting from/to high-precision datetime64 objects
-* gh-4428: einsum(None, None, None, None) causes segfault
-* gh-4134: uninitialized use for for size 1 object reductions
-
-Changes
-=======
-
-NDIter
-------
-When ``NpyIter_RemoveAxis`` is now called, the iterator range will be reset.
-
-When a multi index is being tracked and an iterator is not buffered, it is
-possible to use ``NpyIter_RemoveAxis``. In this case an iterator can shrink
-in size. Because the total size of an iterator is limited, the iterator
-may be too large before these calls. In this case its size will be set to ``-1``
-and an error issued not at construction time but when removing the multi
-index, setting the iterator range, or getting the next function.
-
-This has no effect on currently working code, but highlights the necessity
-of checking for an error return if these conditions can occur. In most
-cases the arrays being iterated are as large as the iterator so that such
-a problem cannot occur.
-
-Optional reduced verbosity for np.distutils
--------------------------------------------
-Set ``numpy.distutils.system_info.system_info.verbosity = 0`` and then
-calls to ``numpy.distutils.system_info.get_info('blas_opt')`` will not
-print anything on the output. This is mostly for other packages using
-numpy.distutils.
-
-Deprecations
-============
-
-C-API
------
-
-The utility function npy_PyFile_Dup and npy_PyFile_DupClose are broken by the
-internal buffering python 3 applies to its file objects.
-To fix this two new functions npy_PyFile_Dup2 and npy_PyFile_DupClose2 are
-declared in npy_3kcompat.h and the old functions are deprecated.
-Due to the fragile nature of these functions it is recommended to instead use
-the python API when possible.
+++ /dev/null
-=========================
-NumPy 1.8.2 Release Notes
-=========================
-
-This is a bugfix only release in the 1.8.x series.
-
-Issues fixed
-============
-
-* gh-4836: partition produces wrong results for multiple selections in equal ranges
-* gh-4656: Make fftpack._raw_fft threadsafe
-* gh-4628: incorrect argument order to _copyto in in np.nanmax, np.nanmin
-* gh-4642: Hold GIL for converting dtypes types with fields
-* gh-4733: fix np.linalg.svd(b, compute_uv=False)
-* gh-4853: avoid unaligned simd load on reductions on i386
-* gh-4722: Fix seg fault converting empty string to object
-* gh-4613: Fix lack of NULL check in array_richcompare
-* gh-4774: avoid unaligned access for strided byteswap
-* gh-650: Prevent division by zero when creating arrays from some buffers
-* gh-4602: ifort has issues with optimization flag O2, use O1
+++ /dev/null
-=========================
-NumPy 1.9.0 Release Notes
-=========================
-
-This release supports Python 2.6 - 2.7 and 3.2 - 3.4.
-
-
-Highlights
-==========
-* Numerous performance improvements in various areas, most notably indexing and
- operations on small arrays are significantly faster.
- Indexing operations now also release the GIL.
-* Addition of `nanmedian` and `nanpercentile` rounds out the nanfunction set.
-
-
-Dropped Support
-===============
-
-* The oldnumeric and numarray modules have been removed.
-* The doc/pyrex and doc/cython directories have been removed.
-* The doc/numpybook directory has been removed.
-* The numpy/testing/numpytest.py file has been removed together with
- the importall function it contained.
-
-
-Future Changes
-==============
-
-* The numpy/polynomial/polytemplate.py file will be removed in NumPy 1.10.0.
-* Default casting for inplace operations will change to 'same_kind' in
- Numpy 1.10.0. This will certainly break some code that is currently
- ignoring the warning.
-* Relaxed stride checking will be the default in 1.10.0
-* String version checks will break because, e.g., '1.9' > '1.10' is True. A
- NumpyVersion class has been added that can be used for such comparisons.
-* The diagonal and diag functions will return writeable views in 1.10.0
-* The `S` and/or `a` dtypes may be changed to represent Python strings
- instead of bytes, in Python 3 these two types are very different.
-
-
-Compatibility notes
-===================
-
-The diagonal and diag functions return readonly views.
-------------------------------------------------------
-In NumPy 1.8, the diagonal and diag functions returned readonly copies, in
-NumPy 1.9 they return readonly views, and in 1.10 they will return writeable
-views.
-
-Special scalar float values don't cause upcast to double anymore
-----------------------------------------------------------------
-In previous numpy versions operations involving floating point scalars
-containing special values ``NaN``, ``Inf`` and ``-Inf`` caused the result
-type to be at least ``float64``. As the special values can be represented
-in the smallest available floating point type, the upcast is not performed
-anymore.
-
-For example the dtype of:
-
- ``np.array([1.], dtype=np.float32) * float('nan')``
-
-now remains ``float32`` instead of being cast to ``float64``.
-Operations involving non-special values have not been changed.
-
-Percentile output changes
--------------------------
-If given more than one percentile to compute numpy.percentile returns an
-array instead of a list. A single percentile still returns a scalar. The
-array is equivalent to converting the list returned in older versions
-to an array via ``np.array``.
-
-If the ``overwrite_input`` option is used the input is only partially
-instead of fully sorted.
-
-ndarray.tofile exception type
------------------------------
-All ``tofile`` exceptions are now ``IOError``, some were previously
-``ValueError``.
-
-Invalid fill value exceptions
------------------------------
-Two changes to numpy.ma.core._check_fill_value:
-
-* When the fill value is a string and the array type is not one of
- 'OSUV', TypeError is raised instead of the default fill value being used.
-
-* When the fill value overflows the array type, TypeError is raised instead
- of OverflowError.
-
-Polynomial Classes no longer derived from PolyBase
---------------------------------------------------
-This may cause problems with folks who depended on the polynomial classes
-being derived from PolyBase. They are now all derived from the abstract
-base class ABCPolyBase. Strictly speaking, there should be a deprecation
-involved, but no external code making use of the old baseclass could be
-found.
-
-Using numpy.random.binomial may change the RNG state vs. numpy < 1.9
---------------------------------------------------------------------
-A bug in one of the algorithms to generate a binomial random variate has
-been fixed. This change will likely alter the number of random draws
-performed, and hence the sequence location will be different after a
-call to distribution.c::rk_binomial_btpe. Any tests which rely on the RNG
-being in a known state should be checked and/or updated as a result.
-
-Random seed enforced to be a 32 bit unsigned integer
-----------------------------------------------------
-``np.random.seed`` and ``np.random.RandomState`` now throw a ``ValueError``
-if the seed cannot safely be converted to 32 bit unsigned integers.
-Applications that now fail can be fixed by masking the higher 32 bit values to
-zero: ``seed = seed & 0xFFFFFFFF``. This is what is done silently in older
-versions so the random stream remains the same.
-
-Argmin and argmax out argument
-------------------------------
-The ``out`` argument to ``np.argmin`` and ``np.argmax`` and their
-equivalent C-API functions is now checked to match the desired output shape
-exactly. If the check fails a ``ValueError`` instead of ``TypeError`` is
-raised.
-
-Einsum
-------
-Remove unnecessary broadcasting notation restrictions.
-``np.einsum('ijk,j->ijk', A, B)`` can also be written as
-``np.einsum('ij...,j->ij...', A, B)`` (ellipsis is no longer required on 'j')
-
-Indexing
---------
-
-The NumPy indexing has seen a complete rewrite in this version. This makes
-most advanced integer indexing operations much faster and should have no
-other implications. However some subtle changes and deprecations were
-introduced in advanced indexing operations:
-
-* Boolean indexing into scalar arrays will always return a new 1-d array.
- This means that ``array(1)[array(True)]`` gives ``array([1])`` and
- not the original array.
-
-* Advanced indexing into one dimensional arrays used to have
- (undocumented) special handling regarding repeating the value array in
- assignments when the shape of the value array was too small or did not
- match. Code using this will raise an error. For compatibility you can
- use ``arr.flat[index] = values``, which uses the old code branch. (for
- example ``a = np.ones(10); a[np.arange(10)] = [1, 2, 3]``)
-
-* The iteration order over advanced indexes used to be always C-order.
- In NumPy 1.9. the iteration order adapts to the inputs and is not
- guaranteed (with the exception of a *single* advanced index which is
- never reversed for compatibility reasons). This means that the result
- is undefined if multiple values are assigned to the same element. An
- example for this is ``arr[[0, 0], [1, 1]] = [1, 2]``, which may set
- ``arr[0, 1]`` to either 1 or 2.
-
-* Equivalent to the iteration order, the memory layout of the advanced
- indexing result is adapted for faster indexing and cannot be predicted.
-
-* All indexing operations return a view or a copy. No indexing operation
- will return the original array object. (For example ``arr[...]``)
-
-* In the future Boolean array-likes (such as lists of python bools) will
- always be treated as Boolean indexes and Boolean scalars (including
- python ``True``) will be a legal *boolean* index. At this time, this is
- already the case for scalar arrays to allow the general
- ``positive = a[a > 0]`` to work when ``a`` is zero dimensional.
-
-* In NumPy 1.8 it was possible to use ``array(True)`` and
- ``array(False)`` equivalent to 1 and 0 if the result of the operation
- was a scalar. This will raise an error in NumPy 1.9 and, as noted
- above, treated as a boolean index in the future.
-
-* All non-integer array-likes are deprecated, object arrays of custom
- integer like objects may have to be cast explicitly.
-
-* The error reporting for advanced indexing is more informative, however
- the error type has changed in some cases. (Broadcasting errors of
- indexing arrays are reported as ``IndexError``)
-
-* Indexing with more then one ellipsis (``...``) is deprecated.
-
-Non-integer reduction axis indexes are deprecated
--------------------------------------------------
-Non-integer axis indexes to reduction ufuncs like `add.reduce` or `sum` are
-deprecated.
-
-``promote_types`` and string dtype
-----------------------------------
-``promote_types`` function now returns a valid string length when given an
-integer or float dtype as one argument and a string dtype as another
-argument. Previously it always returned the input string dtype, even if it
-wasn't long enough to store the max integer/float value converted to a
-string.
-
-``can_cast`` and string dtype
------------------------------
-``can_cast`` function now returns False in "safe" casting mode for
-integer/float dtype and string dtype if the string dtype length is not long
-enough to store the max integer/float value converted to a string.
-Previously ``can_cast`` in "safe" mode returned True for integer/float
-dtype and a string dtype of any length.
-
-astype and string dtype
------------------------
-The ``astype`` method now returns an error if the string dtype to cast to
-is not long enough in "safe" casting mode to hold the max value of
-integer/float array that is being casted. Previously the casting was
-allowed even if the result was truncated.
-
-`npyio.recfromcsv` keyword arguments change
--------------------------------------------
-`npyio.recfromcsv` no longer accepts the undocumented `update` keyword,
-which used to override the `dtype` keyword.
-
-The ``doc/swig`` directory moved
---------------------------------
-The ``doc/swig`` directory has been moved to ``tools/swig``.
-
-The ``npy_3kcompat.h`` header changed
--------------------------------------
-The unused ``simple_capsule_dtor`` function has been removed from
-``npy_3kcompat.h``. Note that this header is not meant to be used outside
-of numpy; other projects should be using their own copy of this file when
-needed.
-
-Negative indices in C-Api ``sq_item`` and ``sq_ass_item`` sequence methods
---------------------------------------------------------------------------
-When directly accessing the ``sq_item`` or ``sq_ass_item`` PyObject slots
-for item getting, negative indices will not be supported anymore.
-``PySequence_GetItem`` and ``PySequence_SetItem`` however fix negative
-indices so that they can be used there.
-
-NDIter
-------
-When ``NpyIter_RemoveAxis`` is now called, the iterator range will be reset.
-
-When a multi index is being tracked and an iterator is not buffered, it is
-possible to use ``NpyIter_RemoveAxis``. In this case an iterator can shrink
-in size. Because the total size of an iterator is limited, the iterator
-may be too large before these calls. In this case its size will be set to ``-1``
-and an error issued not at construction time but when removing the multi
-index, setting the iterator range, or getting the next function.
-
-This has no effect on currently working code, but highlights the necessity
-of checking for an error return if these conditions can occur. In most
-cases the arrays being iterated are as large as the iterator so that such
-a problem cannot occur.
-
-This change was already applied to the 1.8.1 release.
-
-``zeros_like`` for string dtypes now returns empty strings
-----------------------------------------------------------
-To match the `zeros` function `zeros_like` now returns an array initialized
-with empty strings instead of an array filled with `'0'`.
-
-
-New Features
-============
-
-Percentile supports more interpolation options
-----------------------------------------------
-``np.percentile`` now has the interpolation keyword argument to specify in
-which way points should be interpolated if the percentiles fall between two
-values. See the documentation for the available options.
-
-Generalized axis support for median and percentile
---------------------------------------------------
-``np.median`` and ``np.percentile`` now support generalized axis arguments like
-ufunc reductions do since 1.7. One can now say axis=(index, index) to pick a
-list of axes for the reduction. The ``keepdims`` keyword argument was also
-added to allow convenient broadcasting to arrays of the original shape.
-
-Dtype parameter added to ``np.linspace`` and ``np.logspace``
-------------------------------------------------------------
-The returned data type from the ``linspace`` and ``logspace`` functions can
-now be specified using the dtype parameter.
-
-More general ``np.triu`` and ``np.tril`` broadcasting
------------------------------------------------------
-For arrays with ``ndim`` exceeding 2, these functions will now apply to the
-final two axes instead of raising an exception.
-
-``tobytes`` alias for ``tostring`` method
------------------------------------------
-``ndarray.tobytes`` and ``MaskedArray.tobytes`` have been added as aliases
-for ``tostring`` which exports arrays as ``bytes``. This is more consistent
-in Python 3 where ``str`` and ``bytes`` are not the same.
-
-Build system
-------------
-Added experimental support for the ppc64le and OpenRISC architecture.
-
-Compatibility to python ``numbers`` module
-------------------------------------------
-All numerical numpy types are now registered with the type hierarchy in
-the python ``numbers`` module.
-
-``increasing`` parameter added to ``np.vander``
------------------------------------------------
-The ordering of the columns of the Vandermonde matrix can be specified with
-this new boolean argument.
-
-``unique_counts`` parameter added to ``np.unique``
---------------------------------------------------
-The number of times each unique item comes up in the input can now be
-obtained as an optional return value.
-
-Support for median and percentile in nanfunctions
--------------------------------------------------
-The ``np.nanmedian`` and ``np.nanpercentile`` functions behave like
-the median and percentile functions except that NaNs are ignored.
-
-NumpyVersion class added
-------------------------
-The class may be imported from numpy.lib and can be used for version
-comparison when the numpy version goes to 1.10.devel. For example::
-
- >>> from numpy.lib import NumpyVersion
- >>> if NumpyVersion(np.__version__) < '1.10.0'):
- ... print('Wow, that is an old NumPy version!')
-
-Allow saving arrays with large number of named columns
-------------------------------------------------------
-The numpy storage format 1.0 only allowed the array header to have a total size
-of 65535 bytes. This can be exceeded by structured arrays with a large number
-of columns. A new format 2.0 has been added which extends the header size to 4
-GiB. `np.save` will automatically save in 2.0 format if the data requires it,
-else it will always use the more compatible 1.0 format.
-
-Full broadcasting support for ``np.cross``
-------------------------------------------
-``np.cross`` now properly broadcasts its two input arrays, even if they
-have different number of dimensions. In earlier versions this would result
-in either an error being raised, or wrong results computed.
-
-
-Improvements
-============
-
-Better numerical stability for sum in some cases
-------------------------------------------------
-Pairwise summation is now used in the sum method, but only along the fast
-axis and for groups of the values <= 8192 in length. This should also
-improve the accuracy of var and std in some common cases.
-
-Percentile implemented in terms of ``np.partition``
----------------------------------------------------
-``np.percentile`` has been implemented in terms of ``np.partition`` which
-only partially sorts the data via a selection algorithm. This improves the
-time complexity from ``O(nlog(n))`` to ``O(n)``.
-
-Performance improvement for ``np.array``
-----------------------------------------
-The performance of converting lists containing arrays to arrays using
-``np.array`` has been improved. It is now equivalent in speed to
-``np.vstack(list)``.
-
-Performance improvement for ``np.searchsorted``
------------------------------------------------
-For the built-in numeric types, ``np.searchsorted`` no longer relies on the
-data type's ``compare`` function to perform the search, but is now
-implemented by type specific functions. Depending on the size of the
-inputs, this can result in performance improvements over 2x.
-
-Optional reduced verbosity for np.distutils
--------------------------------------------
-Set ``numpy.distutils.system_info.system_info.verbosity = 0`` and then
-calls to ``numpy.distutils.system_info.get_info('blas_opt')`` will not
-print anything on the output. This is mostly for other packages using
-numpy.distutils.
-
-Covariance check in ``np.random.multivariate_normal``
------------------------------------------------------
-A ``RuntimeWarning`` warning is raised when the covariance matrix is not
-positive-semidefinite.
-
-Polynomial Classes no longer template based
--------------------------------------------
-The polynomial classes have been refactored to use an abstract base class
-rather than a template in order to implement a common interface. This makes
-importing the polynomial package faster as the classes do not need to be
-compiled on import.
-
-More GIL releases
------------------
-Several more functions now release the Global Interpreter Lock allowing more
-efficient parallelization using the ``threading`` module. Most notably the GIL is
-now released for fancy indexing, ``np.where`` and the ``random`` module now
-uses a per-state lock instead of the GIL.
-
-MaskedArray support for more complicated base classes
------------------------------------------------------
-Built-in assumptions that the baseclass behaved like a plain array are being
-removed. In particalur, ``repr`` and ``str`` should now work more reliably.
-
-
-C-API
------
-
-
-Deprecations
-============
-
-Non-integer scalars for sequence repetition
--------------------------------------------
-Using non-integer numpy scalars to repeat python sequences is deprecated.
-For example ``np.float_(2) * [1]`` will be an error in the future.
-
-``select`` input deprecations
------------------------------
-The integer and empty input to ``select`` is deprecated. In the future only
-boolean arrays will be valid conditions and an empty ``condlist`` will be
-considered an input error instead of returning the default.
-
-``rank`` function
------------------
-The ``rank`` function has been deprecated to avoid confusion with
-``numpy.linalg.matrix_rank``.
-
-Object array equality comparisons
----------------------------------
-In the future object array comparisons both `==` and `np.equal` will not
-make use of identity checks anymore. For example:
-
->>> a = np.array([np.array([1, 2, 3]), 1])
->>> b = np.array([np.array([1, 2, 3]), 1])
->>> a == b
-
-will consistently return False (and in the future an error) even if the array
-in `a` and `b` was the same object.
-
-The equality operator `==` will in the future raise errors like `np.equal`
-if broadcasting or element comparisons, etc. fails.
-
-Comparison with `arr == None` will in the future do an elementwise comparison
-instead of just returning False. Code should be using `arr is None`.
-
-All of these changes will give Deprecation- or FutureWarnings at this time.
-
-C-API
------
-
-The utility function npy_PyFile_Dup and npy_PyFile_DupClose are broken by the
-internal buffering python 3 applies to its file objects.
-To fix this two new functions npy_PyFile_Dup2 and npy_PyFile_DupClose2 are
-declared in npy_3kcompat.h and the old functions are deprecated.
-Due to the fragile nature of these functions it is recommended to instead use
-the python API when possible.
-
-This change was already applied to the 1.8.1 release.
+++ /dev/null
-=========================
-NumPy 1.9.1 Release Notes
-=========================
-
-This is a bugfix only release in the 1.9.x series.
-
-Issues fixed
-============
-
-* gh-5184: restore linear edge behaviour of gradient to as it was in < 1.9.
- The second order behaviour is available via the `edge_order` keyword
-* gh-4007: workaround Accelerate sgemv crash on OSX 10.9
-* gh-5100: restore object dtype inference from iterable objects without `len()`
-* gh-5163: avoid gcc-4.1.2 (red hat 5) miscompilation causing a crash
-* gh-5138: fix nanmedian on arrays containing inf
-* gh-5240: fix not returning out array from ufuncs with subok=False set
-* gh-5203: copy inherited masks in MaskedArray.__array_finalize__
-* gh-2317: genfromtxt did not handle filling_values=0 correctly
-* gh-5067: restore api of npy_PyFile_DupClose in python2
-* gh-5063: cannot convert invalid sequence index to tuple
-* gh-5082: Segmentation fault with argmin() on unicode arrays
-* gh-5095: don't propagate subtypes from np.where
-* gh-5104: np.inner segfaults with SciPy's sparse matrices
-* gh-5251: Issue with fromarrays not using correct format for unicode arrays
-* gh-5136: Import dummy_threading if importing threading fails
-* gh-5148: Make numpy import when run with Python flag '-OO'
-* gh-5147: Einsum double contraction in particular order causes ValueError
-* gh-479: Make f2py work with intent(in out)
-* gh-5170: Make python2 .npy files readable in python3
-* gh-5027: Use 'll' as the default length specifier for long long
-* gh-4896: fix build error with MSVC 2013 caused by C99 complex support
-* gh-4465: Make PyArray_PutTo respect writeable flag
-* gh-5225: fix crash when using arange on datetime without dtype set
-* gh-5231: fix build in c99 mode
+++ /dev/null
-=========================
-NumPy 1.9.2 Release Notes
-=========================
-
-This is a bugfix only release in the 1.9.x series.
-
-Issues fixed
-============
-
-* `#5316 <https://github.com/numpy/numpy/issues/5316>`__: fix too large dtype alignment of strings and complex types
-* `#5424 <https://github.com/numpy/numpy/issues/5424>`__: fix ma.median when used on ndarrays
-* `#5481 <https://github.com/numpy/numpy/issues/5481>`__: Fix astype for structured array fields of different byte order
-* `#5354 <https://github.com/numpy/numpy/issues/5354>`__: fix segfault when clipping complex arrays
-* `#5524 <https://github.com/numpy/numpy/issues/5524>`__: allow np.argpartition on non ndarrays
-* `#5612 <https://github.com/numpy/numpy/issues/5612>`__: Fixes ndarray.fill to accept full range of uint64
-* `#5155 <https://github.com/numpy/numpy/issues/5155>`__: Fix loadtxt with comments=None and a string None data
-* `#4476 <https://github.com/numpy/numpy/issues/4476>`__: Masked array view fails if structured dtype has datetime component
-* `#5388 <https://github.com/numpy/numpy/issues/5388>`__: Make RandomState.set_state and RandomState.get_state threadsafe
-* `#5390 <https://github.com/numpy/numpy/issues/5390>`__: make seed, randint and shuffle threadsafe
-* `#5374 <https://github.com/numpy/numpy/issues/5374>`__: Fixed incorrect assert_array_almost_equal_nulp documentation
-* `#5393 <https://github.com/numpy/numpy/issues/5393>`__: Add support for ATLAS > 3.9.33.
-* `#5313 <https://github.com/numpy/numpy/issues/5313>`__: PyArray_AsCArray caused segfault for 3d arrays
-* `#5492 <https://github.com/numpy/numpy/issues/5492>`__: handle out of memory in rfftf
-* `#4181 <https://github.com/numpy/numpy/issues/4181>`__: fix a few bugs in the random.pareto docstring
-* `#5359 <https://github.com/numpy/numpy/issues/5359>`__: minor changes to linspace docstring
-* `#4723 <https://github.com/numpy/numpy/issues/4723>`__: fix a compile issues on AIX
+++ /dev/null
-==========================
-NumPy 1.xx.x Release Notes
-==========================
-
-
-Highlights
-==========
-
-
-New functions
-=============
-
-
-Deprecations
-============
-
-
-Future Changes
-==============
-
-
-Expired deprecations
-====================
-
-
-Compatibility notes
-===================
-
-
-C API changes
-=============
-
-
-New Features
-============
-
-
-Improvements
-============
-
-
-Changes
-=======
+++ /dev/null
-.. vim:syntax=rst
-
-Introduction
-============
-
-This document proposes some enhancements for numpy and scipy releases.
-Successive numpy and scipy releases are too far apart from a time point of
-view - some people who are in the numpy release team feel that it cannot
-improve without a bit more formal release process. The main proposal is to
-follow a time-based release, with expected dates for code freeze, beta and rc.
-The goal is two folds: make release more predictable, and move the code forward.
-
-Rationale
-=========
-
-Right now, the release process of numpy is relatively organic. When some
-features are there, we may decide to make a new release. Because there is not
-fixed schedule, people don't really know when new features and bug fixes will
-go into a release. More significantly, having an expected release schedule
-helps to *coordinate* efforts: at the beginning of a cycle, everybody can jump
-in and put new code, even break things if needed. But after some point, only
-bug fixes are accepted: this makes beta and RC releases much easier; calming
-things down toward the release date helps focusing on bugs and regressions
-
-Proposal
-========
-
-Time schedule
--------------
-
-The proposed schedule is to release numpy every 9 weeks - the exact period can
-be tweaked if it ends up not working as expected. There will be several stages
-for the cycle:
-
- * Development: anything can happen (by anything, we mean as currently
- done). The focus is on new features, refactoring, etc...
-
- * Beta: no new features. No bug fixing which requires heavy changes.
- regression fixes which appear on supported platforms and were not
- caught earlier.
-
- * Polish/RC: only docstring changes and blocker regressions are allowed.
-
-The schedule would be as follows:
-
- +------+-----------------+-----------------+------------------+
- | Week | 1.3.0 | 1.4.0 | Release time |
- +======+=================+=================+==================+
- | 1 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 2 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 3 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 4 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 5 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 6 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 7 | Beta | | |
- +------+-----------------+-----------------+------------------+
- | 8 | Beta | | |
- +------+-----------------+-----------------+------------------+
- | 9 | Beta | | 1.3.0 released |
- +------+-----------------+-----------------+------------------+
- | 10 | Polish | Development | |
- +------+-----------------+-----------------+------------------+
- | 11 | Polish | Development | |
- +------+-----------------+-----------------+------------------+
- | 12 | Polish | Development | |
- +------+-----------------+-----------------+------------------+
- | 13 | Polish | Development | |
- +------+-----------------+-----------------+------------------+
- | 14 | | Development | |
- +------+-----------------+-----------------+------------------+
- | 15 | | Development | |
- +------+-----------------+-----------------+------------------+
- | 16 | | Beta | |
- +------+-----------------+-----------------+------------------+
- | 17 | | Beta | |
- +------+-----------------+-----------------+------------------+
- | 18 | | Beta | 1.4.0 released |
- +------+-----------------+-----------------+------------------+
-
-Each stage can be defined as follows:
-
- +------------------+-------------+----------------+----------------+
- | | Development | Beta | Polish |
- +==================+=============+================+================+
- | Python Frozen | | slushy | Y |
- +------------------+-------------+----------------+----------------+
- | Docstring Frozen | | slushy | thicker slush |
- +------------------+-------------+----------------+----------------+
- | C code Frozen | | thicker slush | thicker slush |
- +------------------+-------------+----------------+----------------+
-
-Terminology:
-
- * slushy: you can change it if you beg the release team and it's really
- important and you coordinate with docs/translations; no "big"
- changes.
-
- * thicker slush: you can change it if it's an open bug marked
- showstopper for the Polish release, you beg the release team, the
- change is very very small yet very very important, and you feel
- extremely guilty about your transgressions.
-
-The different frozen states are intended to be gradients. The exact meaning is
-decided by the release manager: he has the last word on what's go in, what
-doesn't. The proposed schedule means that there would be at most 12 weeks
-between putting code into the source code repository and being released.
-
-Release team
-------------
-
-For every release, there would be at least one release manager. We propose to
-rotate the release manager: rotation means it is not always the same person
-doing the dirty job, and it should also keep the release manager honest.
-
-References
-==========
-
- * Proposed schedule for Gnome from Havoc Pennington (one of the core
- GTK and Gnome manager):
- https://mail.gnome.org/archives/gnome-hackers/2002-June/msg00041.html
- The proposed schedule is heavily based on this email
-
- * https://wiki.gnome.org/ReleasePlanning/Freezes
+++ /dev/null
-Remove ``numpy.random.entropy`` without a deprecation
------------------------------------------------------
-
-``numpy.random.entropy`` was added to the `numpy.random` namespace in 1.17.0.
-It was meant to be a private c-extension module, but was exposed as public.
-It has been replaced by `numpy.random.SeedSequence` so the module was
-completely removed.
+++ /dev/null
-`numpy.random.randint` produced incorrect value when the range was ``2**32``
-----------------------------------------------------------------------------
-The implementation introduced in 1.17.0 had an incorrect check when
-determining whether to use the 32-bit path or the full 64-bit
-path that incorrectly redirected random integer generation with a high - low
-range of ``2**32`` to the 64-bit generator.
--- /dev/null
+:orphan:
+
+Changelog
+=========
+
+This directory contains "news fragments" which are short files that contain a
+small **ReST**-formatted text that will be added to the next what's new page.
+
+Make sure to use full sentences with correct case and punctuation, and please
+try to use Sphinx intersphinx using backticks. The fragment should have a
+header line and an underline using ``------``
+
+Each file should be named like ``<PULL REQUEST>.<TYPE>.rst``, where
+``<PULL REQUEST>`` is a pull request number, and ``<TYPE>`` is one of:
+
+* ``new_function``: New user facing functions.
+* ``deprecation``: Changes existing code to emit a DeprecationWarning.
+* ``future``: Changes existing code to emit a FutureWarning.
+* ``expired``: Removal of a deprecated part of the API.
+* ``compatibility``: A change which requires users to change code and is not
+ backwards compatible. (Not to be used for removal of deprecated features.)
+* ``c_api``: Changes in the Numpy C-API exported functions
+* ``new_feature``: New user facing features like ``kwargs``.
+* ``improvement``: Performance and edge-case changes
+* ``change``: Other changes
+* ``highlight``: Adds a highlight bullet point to use as a possibly highlight
+ of the release.
+
+Most categories should be formatted as paragraphs with a heading.
+So for example: ``123.new_feature.rst`` would have the content::
+
+ ``my_new_feature`` option for `my_favorite_function`
+ ----------------------------------------------------
+ The ``my_new_feature`` option is now available for `my_favorite_function`.
+ To use it, write ``np.my_favorite_function(..., my_new_feature=True)``.
+
+``highlight`` is usually formatted as bulled points making the fragment
+``* This is a highlight``.
+
+Note the use of single-backticks to get an internal link (assuming
+``my_favorite_function`` is exported from the ``numpy`` namespace),
+and double-backticks for code.
+
+If you are unsure what pull request type to use, don't hesitate to ask in your
+PR.
+
+You can install ``towncrier`` and run ``towncrier --draft --version 1.18``
+if you want to get a preview of how your change will look in the final release
+notes.
+
+.. note::
+
+ This README was adapted from the pytest changelog readme under the terms of
+ the MIT licence.
+
--- /dev/null
+{% set title = "NumPy {} Release Notes".format(versiondata.version) %}
+{{ "=" * title|length }}
+{{ title }}
+{{ "=" * title|length }}
+
+{% for section, _ in sections.items() %}
+{% set underline = underlines[0] %}{% if section %}{{ section }}
+{{ underline * section|length }}{% set underline = underlines[1] %}
+
+{% endif %}
+{% if sections[section] %}
+{% for category, val in definitions.items() if category in sections[section] %}
+
+{{ definitions[category]['name'] }}
+{{ underline * definitions[category]['name']|length }}
+
+{% if definitions[category]['showcontent'] %}
+{% for text, values in sections[section][category].items() %}
+{{ text }}
+{{ get_indent(text) }}({{values|join(', ') }})
+
+{% endfor %}
+{% else %}
+- {{ sections[section][category]['']|join(', ') }}
+
+{% endif %}
+{% if sections[section][category]|length == 0 %}
+No significant changes.
+
+{% else %}
+{% endif %}
+{% endfor %}
+{% else %}
+No significant changes.
+
+
+{% endif %}
+{% endfor %}
--- /dev/null
+{% if objtype == 'property' %}
+:orphan:
+{% endif %}
+
+{{ fullname | escape | underline}}
+
+.. currentmodule:: {{ module }}
+
+{% if objtype == 'property' %}
+property
+{% endif %}
+
+.. auto{{ objtype }}:: {{ objname }}
+
<h3>Resources</h3>
<ul>
+ <li><a href="https://numpy.org/">NumPy.org website</a></li>
<li><a href="https://scipy.org/">Scipy.org website</a></li>
</ul>
{% extends "!layout.html" %}
+{%- block header %}
+<div class="container">
+ <div class="top-scipy-org-logo-header" style="background-color: #a2bae8;">
+ <a href="{{ pathto('index') }}">
+ <img border=0 alt="NumPy" src="{{ pathto('_static/numpy_logo.png', 1) }}"></a>
+ </div>
+ </div>
+</div>
+
+{% endblock %}
{% block rootrellink %}
{% if pagename != 'index' %}
<li class="active"><a href="{{ pathto('index') }}">{{ shorttitle|e }}</a></li>
import sys, os, re
-# Check Sphinx version
-import sphinx
-if sphinx.__version__ < "1.2.1":
- raise RuntimeError("Sphinx 1.2.1 or newer required")
-
-needs_sphinx = '1.0'
+# Minimum version, enforced by sphinx
+needs_sphinx = '2.2.0'
# -----------------------------------------------------------------------------
# General configuration
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
+ 'sphinx.ext.imgmath',
]
-if sphinx.__version__ >= "1.4":
- extensions.append('sphinx.ext.imgmath')
- imgmath_image_format = 'svg'
-else:
- extensions.append('sphinx.ext.pngmath')
+imgmath_image_format = 'svg'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
+master_doc = 'contents'
+
# General substitutions.
project = 'NumPy'
copyright = '2008-2019, The SciPy community'
def setup(app):
# add a config value for `ifconfig` directives
app.add_config_value('python_version_major', str(sys.version_info.major), 'env')
+ app.add_lexer('NumPyC', NumPyLexer(stripnl=False))
# -----------------------------------------------------------------------------
# HTML output
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
- "rootlinks": []
+ "rootlinks": [("https://numpy.org/", "NumPy.org"),
+ ("https://numpy.org/doc", "Docs"),
+ ]
}
html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
# not chapters.
#latex_use_parts = False
+latex_elements = {
+ 'fontenc': r'\usepackage[LGR,T1]{fontenc}'
+}
+
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
from pygments.lexers import CLexer
from pygments import token
-from sphinx.highlighting import lexers
import copy
class NumPyLexer(CLexer):
name = 'NUMPYLEXER'
- tokens = copy.deepcopy(lexers['c'].tokens)
+ tokens = copy.deepcopy(CLexer.tokens)
# Extend the regex for valid identifiers with @
for k, val in tokens.items():
for i, v in enumerate(val):
if isinstance(v, tuple):
if isinstance(v[0], str):
val[i] = (v[0].replace('a-zA-Z', 'a-zA-Z@'),) + v[1:]
-
-lexers['NumPyC'] = NumPyLexer(stripnl=False)
Setting up and using your development environment
=================================================
+.. _recommended-development-setup:
+
Recommended development setup
-----------------------------
Since NumPy contains parts written in C and Cython that need to be
compiled before use, make sure you have the necessary compilers and Python
development headers installed - see :ref:`building-from-source`. Building
-NumPy as of version ``1.17`` requires a C99 compliant compiler. For
-some older compilers this may require ``export CFLAGS='-std=c99'``.
+NumPy as of version ``1.17`` requires a C99 compliant compiler.
Having compiled code also means that importing NumPy from the development
sources needs some additional steps, which are explained below. For the rest
of this chapter we assume that you have set up your git repo as described in
:ref:`using-git`.
+.. _testing-builds:
+
+Testing builds
+--------------
+
To build the development version of NumPy and run tests, spawn
interactive shells with the Python import paths properly set up etc.,
do one of::
$ python runtests.py -v -t numpy/core/tests/test_multiarray.py -- -k "MatMul and not vector"
+.. note::
+
+ Remember that all tests of NumPy should pass before committing your changes.
+
Using ``runtests.py`` is the recommended approach to running tests.
There are also a number of alternatives to it, for example in-place
build or installing to a virtualenv. See the FAQ below for details.
+.. note::
+
+ Some of the tests in the test suite require a large amount of
+ memory, and are skipped if your system does not have enough.
+
+ To override the automatic detection of available memory, set the
+ environment variable ``NPY_AVAILABLE_MEM``, for example
+ ``NPY_AVAILABLE_MEM=32GB``, or using pytest ``--available-memory=32GB``
+ target option.
+
Building in-place
-----------------
Other build options
-------------------
+Build options can be discovered by running any of::
+
+ $ python setup.py --help
+ $ python setup.py --help-commands
+
It's possible to do a parallel build with ``numpy.distutils`` with the ``-j`` option;
see :ref:`parallel-builds` for more details.
-In order to install the development version of NumPy in ``site-packages``, use
-``python setup.py install --user``.
-
A similar approach to in-place builds and use of ``PYTHONPATH`` but outside the
source tree is to use::
- $ python setup.py install --prefix /some/owned/folder
+ $ pip install . --prefix /some/owned/folder
$ export PYTHONPATH=/some/owned/folder/lib/python3.4/site-packages
+NumPy uses a series of tests to probe the compiler and libc libraries for
+funtions. The results are stored in ``_numpyconfig.h`` and ``config.h`` files
+using ``HAVE_XXX`` definitions. These tests are run during the ``build_src``
+phase of the ``_multiarray_umath`` module in the ``generate_config_h`` and
+``generate_numpyconfig_h`` functions. Since the output of these calls includes
+many compiler warnings and errors, by default it is run quietly. If you wish
+to see this output, you can run the ``build_src`` stage verbosely::
+
+ $ python build build_src -v
+
Using virtualenvs
-----------------
Besides using ``runtests.py``, there are various ways to run the tests. Inside
the interpreter, tests can be run like this::
- >>> np.test()
+ >>> np.test() # doctest: +SKIPBLOCK
>>> np.test('full') # Also run tests marked as slow
>>> np.test('full', verbose=2) # Additionally print test name/file
- *Core developers* If you want to push changes without
further review, see the notes :ref:`below <pushing-to-main>`.
-
+
This way of working helps to keep work well organized and the history
as clear as possible.
git status # Optional
git diff # Optional
git add modified_file
- git commit
+ git commit
# push the branch to your own Github repo
git push origin my-new-feature
properly formatted and sufficiently detailed commit message. After saving
your message and closing the editor, your commit will be saved. For trivial
commits, a short commit message can be passed in through the command line
- using the ``-m`` flag. For example, ``git commit -am "ENH: Some message"``.
-
+ using the ``-m`` flag. For example, ``git commit -am "ENH: Some message"``.
+
In some cases, you will see this form of the commit command: ``git commit
-a``. The extra ``-a`` flag automatically commits all modified files and
removes all deleted files. This can save you some typing of numerous ``git
add`` commands; however, it can add unwanted changes to a commit if you're
not careful. For more information, see `why the -a flag?`_ - and the
- helpful use-case description in the `tangled working copy problem`_.
+ helpful use-case description in the `tangled working copy problem`_.
#. Push the changes to your forked repo on github_::
git push origin my-new-feature
For more information, see `git push`_.
-
+
.. note::
-
+
Assuming you have followed the instructions in these pages, git will create
a default link to your github_ repo called ``origin``. In git >= 1.7 you
can ensure that the link to origin is permanently set by using the
``--set-upstream`` option::
-
+
git push --set-upstream origin my-new-feature
-
+
From now on git_ will know that ``my-new-feature`` is related to the
``my-new-feature`` branch in your own github_ repo. Subsequent push calls
are then simplified to the following::
git push
-
+
You have to use ``--set-upstream`` for each new branch that you create.
-
+
It may be the case that while you were working on your edits, new commits have
been added to ``upstream`` that affect your work. In this case, follow the
=======================================================
When you feel your work is finished, you can create a pull request (PR). Github
-has a nice help page that outlines the process for `filing pull requests`_.
+has a nice help page that outlines the process for `filing pull requests`_.
If your changes involve modifications to the API or addition/modification of a
-function, you should initiate a code review. This involves sending an email to
-the `NumPy mailing list`_ with a link to your PR along with a description of
-and a motivation for your changes.
+function, you should
+
+- send an email to the `NumPy mailing list`_ with a link to your PR along with
+ a description of and a motivation for your changes. This may generate
+ changes and feedback. It might be prudent to start with this step if your
+ change may be controversial.
+- add a release note to the ``doc/release/upcoming_changes/`` directory,
+ following the instructions and format in the
+ ``doc/release/upcoming_changes/README.rst`` file.
.. _rebasing-on-master:
git push upstream my-feature-branch:master
-.. note::
+.. note::
It's usually a good idea to use the ``-n`` flag to ``git push`` to check
first that you're about to push the changes you want to the place you
page for more detail. We're repeating some of it here just to give the
specifics for the NumPy_ project, and to suggest some default names.
+.. _set-up-and-configure-a-github-account:
+
Set up and configure a github_ account
======================================
* Jaime Fernández del RÃo
-* Nathaniel Smith
+* Sebastian Berg
* External member: Thomas Caswell
Contributing to NumPy
#####################
+Not a coder? Not a problem! NumPy is multi-faceted, and we can use a lot of help.
+These are all activities we'd like to get help with (they're all important, so
+we list them in alphabetical order):
+
+- Code maintenance and development
+- Community coordination
+- DevOps
+- Developing educational content & narrative documentation
+- Writing technical documentation
+- Fundraising
+- Project management
+- Marketing
+- Translating content
+- Website design and development
+
+The rest of this document discusses working on the NumPy code base and documentation.
+We're in the process of updating our descriptions of other activities and roles.
+If you are interested in these other activities, please contact us!
+You can do this via
+the `numpy-discussion mailing list <https://scipy.org/scipylib/mailing-lists.html>`__,
+or on GitHub (open an issue or comment on a relevant issue). These are our preferred
+communication channels (open source is open by nature!), however if you prefer
+to discuss in private first, please reach out to our community coordinators
+at `numpy-team@googlegroups.com` or `numpy-team.slack.com` (send an email to
+`numpy-team@googlegroups.com` for an invite the first time).
+
+
Development process - summary
=============================
git checkout -b linspace-speedups
* Commit locally as you progress (``git add`` and ``git commit``)
- Use a `properly formatted <writing-the-commit-message>` commit message,
+ Use a :ref:`properly formatted<writing-the-commit-message>` commit message,
write tests that fail before your change and pass afterward, run all the
- `tests locally <development-environment>`. Be sure to document any
+ :ref:`tests locally<development-environment>`. Be sure to document any
changed behavior in docstrings, keeping to the NumPy docstring
- `standard <howto-document>`.
+ :ref:`standard<howto-document>`.
3. To submit your contribution:
git push origin linspace-speedups
* Enter your GitHub username and password (repeat contributors or advanced
- users can remove this step by connecting to GitHub with `SSH <set-up-and-
- configure-a-github-account>`.
+ users can remove this step by connecting to GitHub with
+ :ref:`SSH<set-up-and-configure-a-github-account>` .
* Go to GitHub. The new branch will show up with a green Pull Request
button. Make sure the title and message are clear, concise, and self-
coding style of your branch. The CI tests must pass before your PR can be
merged. If CI fails, you can find out why by clicking on the "failed"
icon (red cross) and inspecting the build and test log. To avoid overuse
- and waste of this resource, `test your work <recommended-development-
- setup>` locally before committing.
+ and waste of this resource,
+ :ref:`test your work<recommended-development-setup>` locally before
+ committing.
* A PR must be **approved** by at least one core team member before merging.
Approval means the core team member has carefully reviewed the changes,
Beyond changes to a functions docstring and possible description in the
general documentation, if your change introduces any user-facing
- modifications, update the current release notes under
- ``doc/release/X.XX-notes.rst``
+ modifications they may need to be mentioned in the release notes.
+ To add your change to the release notes, you need to create a short file
+ with a summary and place it in ``doc/release/upcoming_changes``.
+ The file ``doc/release/upcoming_changes/README.rst`` details the format and
+ filename conventions.
If your change introduces a deprecation, make sure to discuss this first on
GitHub or the mailing list first. If agreement on the deprecation is
If GitHub indicates that the branch of your Pull Request can no longer
be merged automatically, you have to incorporate changes that have been made
since you started into your branch. Our recommended way to do this is to
-`rebase on master <rebasing-on-master>`.
+:ref:`rebase on master<rebasing-on-master>`.
Guidelines
----------
* All code should have tests (see `test coverage`_ below for more details).
-* All code should be `documented <docstring-standard>`.
+* All code should be `documented <https://numpydoc.readthedocs.io/
+ en/latest/format.html#docstring-standard>`_.
* No changes are ever committed without review and approval by a core
team member.Please ask politely on the PR or on the `mailing list`_ if you
get no response to your pull request within a week.
import numpy as np
-* For C code, see the `numpy-c-style-guide`
+* For C code, see the :ref:`numpy-c-style-guide<style_guide>`
Test coverage
-------------
Pull requests (PRs) that modify code should either have new tests, or modify existing
-tests to fail before the PR and pass afterwards. You should `run the tests
+tests to fail before the PR and pass afterwards. You should :ref:`run the tests
<development-environment>` before pushing a PR.
Tests for a module should ideally cover all code in that module,
$ python runtests.py --coverage
-This will create a report in `build/coverage`, which can be viewed with::
+This will create a report in ``build/coverage``, which can be viewed with::
$ firefox build/coverage/index.html
~~~~~~~~~~~~
`Sphinx <http://www.sphinx-doc.org/en/stable/>`__ is needed to build
-the documentation. Matplotlib and SciPy are also required.
+the documentation. Matplotlib, SciPy, and IPython are also required.
Fixing Warnings
~~~~~~~~~~~~~~~
releasing
governance/index
-NumPy-specific workflow is in `numpy-development-workflow`.
+NumPy-specific workflow is in :ref:`numpy-development-workflow
+<development-workflow>`.
.. _`mailing list`: https://mail.python.org/mailman/listinfo/numpy-devel
=========================================
We currently use Sphinx_ for generating the API and reference
-documentation for NumPy. You will need Sphinx 1.8.3 or newer.
+documentation for NumPy. You will need Sphinx 1.8.3 <= 1.8.5.
If you only want to get the documentation, note that pre-built
versions can be found at
`plot_directive`, which is shipped with Matplotlib_. This Sphinx extension can
be installed by installing Matplotlib. You will also need python3.6.
-Since large parts of the main documentation are stored in
-docstrings, you will need to first build NumPy, and install it so
-that the correct version is imported by
-
- >>> import numpy
+Since large parts of the main documentation are obtained from numpy via
+``import numpy`` and examining the docstrings, you will need to first build
+NumPy, and install it so that the correct version is imported.
Note that you can eg. install NumPy to a temporary location and set
the PYTHONPATH environment variable appropriately.
make html
in the ``doc/`` directory. If all goes well, this will generate a
-``build/html`` subdirectory containing the built documentation. Note
-that building the documentation on Windows is currently not actively
+``build/html`` subdirectory containing the built documentation. If you get
+a message about ``installed numpy != current repo git version``, you must
+either override the check by setting ``GITVER`` or re-install NumPy.
+
+Note that building the documentation on Windows is currently not actively
supported, though it should be possible. (See Sphinx_ documentation
for more information.)
.. include:: var_session.dat
:literal:
+
+
+Dealing with KIND specifiers
+============================
+
+Currently, F2PY can handle only ``<type spec>(kind=<kindselector>)``
+declarations where ``<kindselector>`` is a numeric integer (e.g. 1, 2,
+4,...), but not a function call ``KIND(..)`` or any other
+expression. F2PY needs to know what would be the corresponding C type
+and a general solution for that would be too complicated to implement.
+
+However, F2PY provides a hook to overcome this difficulty, namely,
+users can define their own <Fortran type> to <C type> maps. For
+example, if Fortran 90 code contains::
+
+ REAL(kind=KIND(0.0D0)) ...
+
+then create a mapping file containing a Python dictionary::
+
+ {'real': {'KIND(0.0D0)': 'double'}}
+
+for instance.
+
+Use the ``--f2cmap`` command-line option to pass the file name to F2PY.
+By default, F2PY assumes file name is ``.f2py_f2cmap`` in the current
+working directory.
+
+Or more generally, the f2cmap file must contain a dictionary
+with items::
+
+ <Fortran typespec> : {<selector_expr>:<C type>}
+
+that defines mapping between Fortran type::
+
+ <Fortran typespec>([kind=]<selector_expr>)
+
+and the corresponding <C type>. <C type> can be one of the following::
+
+ char
+ signed_char
+ short
+ int
+ long_long
+ float
+ double
+ long_double
+ complex_float
+ complex_double
+ complex_long_double
+ string
+
+For more information, see F2Py source code ``numpy/f2py/capi_maps.py``.
:mod:`numpy.distutils` extends ``distutils`` with the following features:
-* ``Extension`` class argument ``sources`` may contain Fortran source
+* :class:`Extension` class argument ``sources`` may contain Fortran source
files. In addition, the list ``sources`` may contain at most one
F2PY signature file, and then the name of an Extension module must
match with the ``<modulename>`` used in signature file. It is
to scan Fortran source files for routine signatures to construct the
wrappers to Fortran codes.
- Additional options to F2PY process can be given using ``Extension``
+ Additional options to F2PY process can be given using :class:`Extension`
class argument ``f2py_options``.
* The following new ``distutils`` commands are defined:
field alignment. In either case ``dtype.isalignedstruct`` is also set to
True.
* ``IsUintAligned`` is used to determine if an ndarray is "uint aligned" in
- an analagous way to how ``IsAligned`` checks for true-alignment.
+ an analogous way to how ``IsAligned`` checks for true-alignment.
Consequences of alignment
-------------------------
.. versionadded:: 1.13
Any class, ndarray subclass or not, can define this method or set it to
- :obj:`None` in order to override the behavior of NumPy's ufuncs. This works
+ None in order to override the behavior of NumPy's ufuncs. This works
quite similarly to Python's ``__mul__`` and other binary operation routines.
- *ufunc* is the ufunc object that was called.
:func:`~numpy.matmul`, which currently is not a Ufunc, but could be
relatively easily be rewritten as a (set of) generalized Ufuncs. The
same may happen with functions such as :func:`~numpy.median`,
- :func:`~numpy.min`, and :func:`~numpy.argsort`.
+ :func:`~numpy.amin`, and :func:`~numpy.argsort`.
Like with some other special methods in python, such as ``__hash__`` and
``__iter__``, it is possible to indicate that your class does *not*
:class:`ndarray` handles binary operations like ``arr + obj`` and ``arr
< obj`` when ``arr`` is an :class:`ndarray` and ``obj`` is an instance
of a custom class. There are two possibilities. If
- ``obj.__array_ufunc__`` is present and not :obj:`None`, then
+ ``obj.__array_ufunc__`` is present and not None, then
``ndarray.__add__`` and friends will delegate to the ufunc machinery,
meaning that ``arr + obj`` becomes ``np.add(arr, obj)``, and then
:func:`~numpy.add` invokes ``obj.__array_ufunc__``. This is useful if you
want to define an object that acts like an array.
- Alternatively, if ``obj.__array_ufunc__`` is set to :obj:`None`, then as a
+ Alternatively, if ``obj.__array_ufunc__`` is set to None, then as a
special case, special methods like ``ndarray.__add__`` will notice this
and *unconditionally* raise :exc:`TypeError`. This is useful if you want to
create objects that interact with arrays via binary operations, but
place rather than separately by the ufunc machinery and by the binary
operation rules (which gives preference to special methods of
subclasses; the alternative way to enforce a one-place only hierarchy,
- of setting :func:`__array_ufunc__` to :obj:`None`, would seem very
+ of setting :func:`__array_ufunc__` to None, would seem very
unexpected and thus confusing, as then the subclass would not work at
all with ufuncs).
- :class:`ndarray` defines its own :func:`__array_ufunc__`, which,
.. py:method:: class.__array_prepare__(array, context=None)
- At the beginning of every :ref:`ufunc <ufuncs.output-type>`, this
+ At the beginning of every :ref:`ufunc <ufuncs-output-type>`, this
method is called on the input object with the highest array
priority, or the output object if one was specified. The output
array is passed in and whatever is returned is passed to the ufunc.
.. py:method:: class.__array_wrap__(array, context=None)
- At the end of every :ref:`ufunc <ufuncs.output-type>`, this method
+ At the end of every :ref:`ufunc <ufuncs-output-type>`, this method
is called on the input object with the highest array priority, or
the output object if one was specified. The ufunc-computed array
is passed in and whatever is returned is passed to the user.
If a class (ndarray subclass or not) having the :func:`__array__`
method is used as the output object of an :ref:`ufunc
- <ufuncs.output-type>`, results will be written to the object
+ <ufuncs-output-type>`, results will be written to the object
returned by :func:`__array__`. Similar conversion is done on
input arrays.
some code involving val
...
-calls ``val = myiter.next()`` repeatedly until :exc:`StopIteration` is
+calls ``val = next(myiter)`` repeatedly until :exc:`StopIteration` is
raised by the iterator. There are several ways to iterate over an
array that may be useful: default iteration, flat iteration, and
:math:`N`-dimensional enumeration.
:ref:`time unit <arrays.dtypes.timeunits>`. The date units are years ('Y'),
months ('M'), weeks ('W'), and days ('D'), while the time units are
hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and
-some additional SI-prefix seconds-based units.
+some additional SI-prefix seconds-based units. The datetime64 data type
+also accepts the string "NAT", in any combination of lowercase/uppercase
+letters, for a "Not A Time" value.
.. admonition:: Example
>>> np.datetime64('2005-02-25T03:30')
numpy.datetime64('2005-02-25T03:30')
+ NAT (not a time):
+
+ >>> numpy.datetime64('nat')
+ numpy.datetime64('NaT')
+
When creating an array of datetimes from a string, it is still possible
to automatically select the unit from the inputs, by using the
datetime type with generic units.
NumPy allows the subtraction of two Datetime values, an operation which
produces a number with a time unit. Because NumPy doesn't have a physical
quantities system in its core, the timedelta64 data type was created
-to complement datetime64.
+to complement datetime64. The arguments for timedelta64 are a number,
+to represent the number of units, and a date/time unit, such as
+(D)ay, (M)onth, (Y)ear, (h)ours, (m)inutes, or (s)econds. The timedelta64
+data type also accepts the string "NAT" in place of the number for a "Not A Time" value.
+
+.. admonition:: Example
+
+ >>> numpy.timedelta64(1, 'D')
+ numpy.timedelta64(1,'D')
+
+ >>> numpy.timedelta64(4, 'h')
+ numpy.timedelta64(4,'h')
+
+ >>> numpy.timedelta64('nAt')
+ numpy.timedelta64('NaT')
Datetimes and Timedeltas work together to provide ways for
simple datetime calculations.
>>> np.timedelta64(1,'W') % np.timedelta64(10,'D')
numpy.timedelta64(7,'D')
+ >>> numpy.datetime64('nat') - numpy.datetime64('2009-01-01')
+ numpy.timedelta64('NaT','D')
+
+ >>> numpy.datetime64('2009-01-01') + numpy.timedelta64('nat')
+ numpy.datetime64('NaT')
+
There are two Timedelta units ('Y', years and 'M', months) which are treated
specially, because how much time they represent changes depending
on when they are used. While a timedelta day unit is equivalent to
printing it would convert from or to local time::
# old behavior
- >>>> np.datetime64('2000-01-01T00:00:00')
+ >>> np.datetime64('2000-01-01T00:00:00')
numpy.datetime64('2000-01-01T00:00:00-0800') # note the timezone offset -08:00
A consensus of datetime64 users agreed that this behavior is undesirable
datetime64 no longer assumes that input is in local time, nor does it print
local times::
- >>>> np.datetime64('2000-01-01T00:00:00')
+ >>> np.datetime64('2000-01-01T00:00:00')
numpy.datetime64('2000-01-01T00:00:00')
For backwards compatibility, datetime64 still parses timezone offsets, which
the rule for casting from dates to times is no longer ambiguous.
.. _pandas: http://pandas.pydata.org
-
-
-Differences Between 1.6 and 1.7 Datetimes
-=========================================
-
-The NumPy 1.6 release includes a more primitive datetime data type
-than 1.7. This section documents many of the changes that have taken
-place.
-
-String Parsing
-``````````````
-
-The datetime string parser in NumPy 1.6 is very liberal in what it accepts,
-and silently allows invalid input without raising errors. The parser in
-NumPy 1.7 is quite strict about only accepting ISO 8601 dates, with a few
-convenience extensions. 1.6 always creates microsecond (us) units by
-default, whereas 1.7 detects a unit based on the format of the string.
-Here is a comparison.::
-
- # NumPy 1.6.1
- >>> np.datetime64('1979-03-22')
- 1979-03-22 00:00:00
- # NumPy 1.7.0
- >>> np.datetime64('1979-03-22')
- numpy.datetime64('1979-03-22')
-
- # NumPy 1.6.1, unit default microseconds
- >>> np.datetime64('1979-03-22').dtype
- dtype('datetime64[us]')
- # NumPy 1.7.0, unit of days detected from string
- >>> np.datetime64('1979-03-22').dtype
- dtype('<M8[D]')
-
- # NumPy 1.6.1, ignores invalid part of string
- >>> np.datetime64('1979-03-2corruptedstring')
- 1979-03-02 00:00:00
- # NumPy 1.7.0, raises error for invalid input
- >>> np.datetime64('1979-03-2corruptedstring')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ValueError: Error parsing datetime string "1979-03-2corruptedstring" at position 8
-
- # NumPy 1.6.1, 'nat' produces today's date
- >>> np.datetime64('nat')
- 2012-04-30 00:00:00
- # NumPy 1.7.0, 'nat' produces not-a-time
- >>> np.datetime64('nat')
- numpy.datetime64('NaT')
-
- # NumPy 1.6.1, 'garbage' produces today's date
- >>> np.datetime64('garbage')
- 2012-04-30 00:00:00
- # NumPy 1.7.0, 'garbage' raises an exception
- >>> np.datetime64('garbage')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ValueError: Error parsing datetime string "garbage" at position 0
-
- # NumPy 1.6.1, can't specify unit in scalar constructor
- >>> np.datetime64('1979-03-22T19:00', 'h')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- TypeError: function takes at most 1 argument (2 given)
- # NumPy 1.7.0, unit in scalar constructor
- >>> np.datetime64('1979-03-22T19:00', 'h')
- numpy.datetime64('1979-03-22T19:00-0500','h')
-
- # NumPy 1.6.1, reads ISO 8601 strings w/o TZ as UTC
- >>> np.array(['1979-03-22T19:00'], dtype='M8[h]')
- array([1979-03-22 19:00:00], dtype=datetime64[h])
- # NumPy 1.7.0, reads ISO 8601 strings w/o TZ as local (ISO specifies this)
- >>> np.array(['1979-03-22T19:00'], dtype='M8[h]')
- array(['1979-03-22T19-0500'], dtype='datetime64[h]')
-
- # NumPy 1.6.1, doesn't parse all ISO 8601 strings correctly
- >>> np.array(['1979-03-22T12'], dtype='M8[h]')
- array([1979-03-22 00:00:00], dtype=datetime64[h])
- >>> np.array(['1979-03-22T12:00'], dtype='M8[h]')
- array([1979-03-22 12:00:00], dtype=datetime64[h])
- # NumPy 1.7.0, handles this case correctly
- >>> np.array(['1979-03-22T12'], dtype='M8[h]')
- array(['1979-03-22T12-0500'], dtype='datetime64[h]')
- >>> np.array(['1979-03-22T12:00'], dtype='M8[h]')
- array(['1979-03-22T12-0500'], dtype='datetime64[h]')
-
-Unit Conversion
-```````````````
-
-The 1.6 implementation of datetime does not convert between units correctly.::
-
- # NumPy 1.6.1, the representation value is untouched
- >>> np.array(['1979-03-22'], dtype='M8[D]')
- array([1979-03-22 00:00:00], dtype=datetime64[D])
- >>> np.array(['1979-03-22'], dtype='M8[D]').astype('M8[M]')
- array([2250-08-01 00:00:00], dtype=datetime64[M])
- # NumPy 1.7.0, the representation is scaled accordingly
- >>> np.array(['1979-03-22'], dtype='M8[D]')
- array(['1979-03-22'], dtype='datetime64[D]')
- >>> np.array(['1979-03-22'], dtype='M8[D]').astype('M8[M]')
- array(['1979-03'], dtype='datetime64[M]')
-
-Datetime Arithmetic
-```````````````````
-
-The 1.6 implementation of datetime only works correctly for a small subset of
-arithmetic operations. Here we show some simple cases.::
-
- # NumPy 1.6.1, produces invalid results if units are incompatible
- >>> a = np.array(['1979-03-22T12'], dtype='M8[h]')
- >>> b = np.array([3*60], dtype='m8[m]')
- >>> a + b
- array([1970-01-01 00:00:00.080988], dtype=datetime64[us])
- # NumPy 1.7.0, promotes to higher-resolution unit
- >>> a = np.array(['1979-03-22T12'], dtype='M8[h]')
- >>> b = np.array([3*60], dtype='m8[m]')
- >>> a + b
- array(['1979-03-22T15:00-0500'], dtype='datetime64[m]')
-
- # NumPy 1.6.1, arithmetic works if everything is microseconds
- >>> a = np.array(['1979-03-22T12:00'], dtype='M8[us]')
- >>> b = np.array([3*60*60*1000000], dtype='m8[us]')
- >>> a + b
- array([1979-03-22 15:00:00], dtype=datetime64[us])
- # NumPy 1.7.0
- >>> a = np.array(['1979-03-22T12:00'], dtype='M8[us]')
- >>> b = np.array([3*60*60*1000000], dtype='m8[us]')
- >>> a + b
- array(['1979-03-22T15:00:00.000000-0500'], dtype='datetime64[us]')
Used as-is.
-:const:`None`
+None
.. index::
triple: dtype; construction; from None
their values must each be lists of the same length as the *names*
and *formats* lists. The *offsets* value is a list of byte offsets
(limited to `ctypes.c_int`) for each field, while the *titles* value is a
- list of titles for each field (:const:`None` can be used if no title is
+ list of titles for each field (None can be used if no title is
desired for that field). The *titles* can be any :class:`string`
or :class:`unicode` object and will add another entry to the
fields dictionary keyed by the title and referencing the same
Indexing
========
+.. seealso::
+
+ :ref:`Indexing basics <basics.indexing>`
+
.. sectionauthor:: adapted from "Guide to NumPy" by Travis E. Oliphant
.. currentmodule:: numpy
This attribute can also be an object exposing the
:c:func:`buffer interface <PyObject_AsCharBuffer>` which
will be used to share the data. If this key is not present (or
- returns :class:`None`), then memory sharing will be done
+ returns None), then memory sharing will be done
through the buffer interface of the object itself. In this
case, the offset key can be used to indicate the start of the
buffer. A reference to the object exposing the array interface
must be stored by the new object if the memory area is to be
secured.
- **Default**: :const:`None`
+ **Default**: None
**strides** (optional)
- Either :const:`None` to indicate a C-style contiguous array or
+ Either None to indicate a C-style contiguous array or
a Tuple of strides which provides the number of bytes needed
to jump to the next array element in the corresponding
dimension. Each entry must be an integer (a Python
be larger than can be represented by a C "int" or "long"; the
calling code should handle this appropriately, either by
raising an error, or by using :c:type:`Py_LONG_LONG` in C. The
- default is :const:`None` which implies a C-style contiguous
+ default is None which implies a C-style contiguous
memory buffer. In this model, the last dimension of the array
varies the fastest. For example, the default strides tuple
for an object whose array entries are 8 bytes long and whose
shape is (10,20,30) would be (4800, 240, 8)
- **Default**: :const:`None` (C-style contiguous)
+ **Default**: None (C-style contiguous)
**mask** (optional)
- :const:`None` or an object exposing the array interface. All
+ None or an object exposing the array interface. All
elements of the mask array should be interpreted only as true
or not true indicating which elements of this array are valid.
The shape of this object should be `"broadcastable"
<arrays.broadcasting.broadcastable>` to the shape of the
original array.
- **Default**: :const:`None` (All array values are valid)
+ **Default**: None (All array values are valid)
**offset** (optional)
An integer offset into the array data region. This can only be
- used when data is :const:`None` or returns a :class:`buffer`
+ used when data is None or returns a :class:`buffer`
object.
**Default**: 0.
-------------------------------
For array methods that take an *axis* keyword, it defaults to
-:const:`None`. If axis is *None*, then the array is treated as a 1-D
+*None*. If axis is *None*, then the array is treated as a 1-D
array. Any other value for *axis* represents the dimension along which
the operation should proceed.
array([[ 0, 2, 4],
[ 6, 8, 10]])
+If you are writing code that needs to support older versions of numpy,
+note that prior to 1.15, :class:`nditer` was not a context manager and
+did not have a `close` method. Instead it relied on the destructor to
+initiate the writeback of the buffer.
+
Using an External Loop
----------------------
In all the examples so far, the elements of `a` are provided by the
iterator one at a time, because all the looping logic is internal to the
-iterator. While this is simple and convenient, it is not very efficient. A
-better approach is to move the one-dimensional innermost loop into your
+iterator. While this is simple and convenient, it is not very efficient.
+A better approach is to move the one-dimensional innermost loop into your
code, external to the iterator. This way, NumPy's vectorized operations
can be used on larger chunks of the elements being visited.
elements of an array in memory order, but use a C-order, Fortran-order,
or multidimensional index to look up values in a different array.
-The Python iterator protocol doesn't have a natural way to query these
-additional values from the iterator, so we introduce an alternate syntax
-for iterating with an :class:`nditer`. This syntax explicitly works
-with the iterator object itself, so its properties are readily accessible
-during iteration. With this looping construct, the current value is
-accessible by indexing into the iterator, and the index being tracked
-is the property `index` or `multi_index` depending on what was requested.
-
-The Python interactive interpreter unfortunately prints out the
-values of expressions inside the while loop during each iteration of the
-loop. We have modified the output in the examples using this looping
-construct in order to be more readable.
+The index is tracked by the iterator object itself, and accessible
+through the `index` or `multi_index` properties, depending on what was
+requested. The examples below show printouts demonstrating the
+progression of the index:
.. admonition:: Example
>>> a = np.arange(6).reshape(2,3)
>>> it = np.nditer(a, flags=['f_index'])
- >>> while not it.finished:
- ... print("%d <%d>" % (it[0], it.index), end=' ')
- ... it.iternext()
+ >>> for x in it:
+ ... print("%d <%d>" % (x, it.index), end=' ')
...
0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5>
>>> it = np.nditer(a, flags=['multi_index'])
- >>> while not it.finished:
- ... print("%d <%s>" % (it[0], it.multi_index), end=' ')
- ... it.iternext()
+ >>> for x in it:
+ ... print("%d <%s>" % (x, it.multi_index), end=' ')
...
0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)>
- >>> it = np.nditer(a, flags=['multi_index'], op_flags=['writeonly'])
- >>> with it:
- .... while not it.finished:
- ... it[0] = it.multi_index[1] - it.multi_index[0]
- ... it.iternext()
+ >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it:
+ ... for x in it:
+ ... x[...] = it.multi_index[1] - it.multi_index[0]
...
>>> a
array([[ 0, 1, 2],
Tracking an index or multi-index is incompatible with using an external
loop, because it requires a different index value per element. If
you try to combine these flags, the :class:`nditer` object will
-raise an exception
+raise an exception.
.. admonition:: Example
File "<stdin>", line 1, in <module>
ValueError: Iterator flag EXTERNAL_LOOP cannot be used if an index or multi-index is being tracked
+Alternative Looping and Element Access
+--------------------------------------
+
+To make its properties more readily accessible during iteration,
+:class:`nditer` has an alternative syntax for iterating, which works
+explicitly with the iterator object itself. With this looping construct,
+the current value is accessible by indexing into the iterator. Other
+properties, such as tracked indices remain as before. The examples below
+produce identical results to the ones in the previous section.
+
+.. admonition:: Example
+
+ >>> a = np.arange(6).reshape(2,3)
+ >>> it = np.nditer(a, flags=['f_index'])
+ >>> while not it.finished:
+ ... print("%d <%d>" % (it[0], it.index), end=' ')
+ ... it.iternext()
+ ...
+ 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5>
+
+ >>> it = np.nditer(a, flags=['multi_index'])
+ >>> while not it.finished:
+ ... print("%d <%s>" % (it[0], it.multi_index), end=' ')
+ ... it.iternext()
+ ...
+ 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)>
+
+ >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it:
+ ... while not it.finished:
+ ... it[0] = it.multi_index[1] - it.multi_index[0]
+ ... it.iternext()
+ ...
+ >>> a
+ array([[ 0, 1, 2],
+ [-1, 0, 1]])
+
Buffering the Array Elements
----------------------------
+++ /dev/null
-Array API
-=========
-
-.. sectionauthor:: Travis E. Oliphant
-
-| The test of a first-rate intelligence is the ability to hold two
-| opposed ideas in the mind at the same time, and still retain the
-| ability to function.
-| --- *F. Scott Fitzgerald*
-
-| For a successful technology, reality must take precedence over public
-| relations, for Nature cannot be fooled.
-| --- *Richard P. Feynman*
-
-.. index::
- pair: ndarray; C-API
- pair: C-API; array
-
-
-Array structure and data access
--------------------------------
-
-These macros all access the :c:type:`PyArrayObject` structure members. The input
-argument, arr, can be any :c:type:`PyObject *<PyObject>` that is directly interpretable
-as a :c:type:`PyArrayObject *` (any instance of the :c:data:`PyArray_Type` and its
-sub-types).
-
-.. c:function:: int PyArray_NDIM(PyArrayObject *arr)
-
- The number of dimensions in the array.
-
-.. c:function:: npy_intp *PyArray_DIMS(PyArrayObject *arr)
-
- Returns a pointer to the dimensions/shape of the array. The
- number of elements matches the number of dimensions
- of the array. Can return ``NULL`` for 0-dimensional arrays.
-
-.. c:function:: npy_intp *PyArray_SHAPE(PyArrayObject *arr)
-
- .. versionadded:: 1.7
-
- A synonym for PyArray_DIMS, named to be consistent with the
- 'shape' usage within Python.
-
-.. c:function:: void *PyArray_DATA(PyArrayObject *arr)
-
-.. c:function:: char *PyArray_BYTES(PyArrayObject *arr)
-
- These two macros are similar and obtain the pointer to the
- data-buffer for the array. The first macro can (and should be)
- assigned to a particular pointer where the second is for generic
- processing. If you have not guaranteed a contiguous and/or aligned
- array then be sure you understand how to access the data in the
- array to avoid memory and/or alignment problems.
-
-.. c:function:: npy_intp *PyArray_STRIDES(PyArrayObject* arr)
-
- Returns a pointer to the strides of the array. The
- number of elements matches the number of dimensions
- of the array.
-
-.. c:function:: npy_intp PyArray_DIM(PyArrayObject* arr, int n)
-
- Return the shape in the *n* :math:`^{\textrm{th}}` dimension.
-
-.. c:function:: npy_intp PyArray_STRIDE(PyArrayObject* arr, int n)
-
- Return the stride in the *n* :math:`^{\textrm{th}}` dimension.
-
-.. c:function:: PyObject *PyArray_BASE(PyArrayObject* arr)
-
- This returns the base object of the array. In most cases, this
- means the object which owns the memory the array is pointing at.
-
- If you are constructing an array using the C API, and specifying
- your own memory, you should use the function :c:func:`PyArray_SetBaseObject`
- to set the base to an object which owns the memory.
-
- If the (deprecated) :c:data:`NPY_ARRAY_UPDATEIFCOPY` or the
- :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flags are set, it has a different
- meaning, namely base is the array into which the current array will
- be copied upon copy resolution. This overloading of the base property
- for two functions is likely to change in a future version of NumPy.
-
-.. c:function:: PyArray_Descr *PyArray_DESCR(PyArrayObject* arr)
-
- Returns a borrowed reference to the dtype property of the array.
-
-.. c:function:: PyArray_Descr *PyArray_DTYPE(PyArrayObject* arr)
-
- .. versionadded:: 1.7
-
- A synonym for PyArray_DESCR, named to be consistent with the
- 'dtype' usage within Python.
-
-.. c:function:: void PyArray_ENABLEFLAGS(PyArrayObject* arr, int flags)
-
- .. versionadded:: 1.7
-
- Enables the specified array flags. This function does no validation,
- and assumes that you know what you're doing.
-
-.. c:function:: void PyArray_CLEARFLAGS(PyArrayObject* arr, int flags)
-
- .. versionadded:: 1.7
-
- Clears the specified array flags. This function does no validation,
- and assumes that you know what you're doing.
-
-.. c:function:: int PyArray_FLAGS(PyArrayObject* arr)
-
-.. c:function:: npy_intp PyArray_ITEMSIZE(PyArrayObject* arr)
-
- Return the itemsize for the elements of this array.
-
- Note that, in the old API that was deprecated in version 1.7, this function
- had the return type ``int``.
-
-.. c:function:: int PyArray_TYPE(PyArrayObject* arr)
-
- Return the (builtin) typenumber for the elements of this array.
-
-.. c:function:: PyObject *PyArray_GETITEM(PyArrayObject* arr, void* itemptr)
-
- Get a Python object of a builtin type from the ndarray, *arr*,
- at the location pointed to by itemptr. Return ``NULL`` on failure.
-
- `numpy.ndarray.item` is identical to PyArray_GETITEM.
-
-.. c:function:: int PyArray_SETITEM( \
- PyArrayObject* arr, void* itemptr, PyObject* obj)
-
- Convert obj and place it in the ndarray, *arr*, at the place
- pointed to by itemptr. Return -1 if an error occurs or 0 on
- success.
-
-.. c:function:: npy_intp PyArray_SIZE(PyArrayObject* arr)
-
- Returns the total size (in number of elements) of the array.
-
-.. c:function:: npy_intp PyArray_Size(PyArrayObject* obj)
-
- Returns 0 if *obj* is not a sub-class of ndarray. Otherwise,
- returns the total number of elements in the array. Safer version
- of :c:func:`PyArray_SIZE` (*obj*).
-
-.. c:function:: npy_intp PyArray_NBYTES(PyArrayObject* arr)
-
- Returns the total number of bytes consumed by the array.
-
-
-Data access
-^^^^^^^^^^^
-
-These functions and macros provide easy access to elements of the
-ndarray from C. These work for all arrays. You may need to take care
-when accessing the data in the array, however, if it is not in machine
-byte-order, misaligned, or not writeable. In other words, be sure to
-respect the state of the flags unless you know what you are doing, or
-have previously guaranteed an array that is writeable, aligned, and in
-machine byte-order using :c:func:`PyArray_FromAny`. If you wish to handle all
-types of arrays, the copyswap function for each type is useful for
-handling misbehaved arrays. Some platforms (e.g. Solaris) do not like
-misaligned data and will crash if you de-reference a misaligned
-pointer. Other platforms (e.g. x86 Linux) will just work more slowly
-with misaligned data.
-
-.. c:function:: void* PyArray_GetPtr(PyArrayObject* aobj, npy_intp* ind)
-
- Return a pointer to the data of the ndarray, *aobj*, at the
- N-dimensional index given by the c-array, *ind*, (which must be
- at least *aobj* ->nd in size). You may want to typecast the
- returned pointer to the data type of the ndarray.
-
-.. c:function:: void* PyArray_GETPTR1(PyArrayObject* obj, npy_intp i)
-
-.. c:function:: void* PyArray_GETPTR2( \
- PyArrayObject* obj, npy_intp i, npy_intp j)
-
-.. c:function:: void* PyArray_GETPTR3( \
- PyArrayObject* obj, npy_intp i, npy_intp j, npy_intp k)
-
-.. c:function:: void* PyArray_GETPTR4( \
- PyArrayObject* obj, npy_intp i, npy_intp j, npy_intp k, npy_intp l)
-
- Quick, inline access to the element at the given coordinates in
- the ndarray, *obj*, which must have respectively 1, 2, 3, or 4
- dimensions (this is not checked). The corresponding *i*, *j*,
- *k*, and *l* coordinates can be any integer but will be
- interpreted as ``npy_intp``. You may want to typecast the
- returned pointer to the data type of the ndarray.
-
-
-Creating arrays
----------------
-
-
-From scratch
-^^^^^^^^^^^^
-
-.. c:function:: PyObject* PyArray_NewFromDescr( \
- PyTypeObject* subtype, PyArray_Descr* descr, int nd, npy_intp const* dims, \
- npy_intp const* strides, void* data, int flags, PyObject* obj)
-
- This function steals a reference to *descr*. The easiest way to get one
- is using :c:func:`PyArray_DescrFromType`.
-
- This is the main array creation function. Most new arrays are
- created with this flexible function.
-
- The returned object is an object of Python-type *subtype*, which
- must be a subtype of :c:data:`PyArray_Type`. The array has *nd*
- dimensions, described by *dims*. The data-type descriptor of the
- new array is *descr*.
-
- If *subtype* is of an array subclass instead of the base
- :c:data:`&PyArray_Type<PyArray_Type>`, then *obj* is the object to pass to
- the :obj:`~numpy.class.__array_finalize__` method of the subclass.
-
- If *data* is ``NULL``, then new unitinialized memory will be allocated and
- *flags* can be non-zero to indicate a Fortran-style contiguous array. Use
- :c:func:`PyArray_FILLWBYTE` to initialize the memory.
-
- If *data* is not ``NULL``, then it is assumed to point to the memory
- to be used for the array and the *flags* argument is used as the
- new flags for the array (except the state of :c:data:`NPY_OWNDATA`,
- :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` and :c:data:`NPY_ARRAY_UPDATEIFCOPY`
- flags of the new array will be reset).
-
- In addition, if *data* is non-NULL, then *strides* can
- also be provided. If *strides* is ``NULL``, then the array strides
- are computed as C-style contiguous (default) or Fortran-style
- contiguous (*flags* is nonzero for *data* = ``NULL`` or *flags* &
- :c:data:`NPY_ARRAY_F_CONTIGUOUS` is nonzero non-NULL *data*). Any
- provided *dims* and *strides* are copied into newly allocated
- dimension and strides arrays for the new array object.
-
- :c:func:`PyArray_CheckStrides` can help verify non- ``NULL`` stride
- information.
-
- If ``data`` is provided, it must stay alive for the life of the array. One
- way to manage this is through :c:func:`PyArray_SetBaseObject`
-
-.. c:function:: PyObject* PyArray_NewLikeArray( \
- PyArrayObject* prototype, NPY_ORDER order, PyArray_Descr* descr, \
- int subok)
-
- .. versionadded:: 1.6
-
- This function steals a reference to *descr* if it is not NULL.
-
- This array creation routine allows for the convenient creation of
- a new array matching an existing array's shapes and memory layout,
- possibly changing the layout and/or data type.
-
- When *order* is :c:data:`NPY_ANYORDER`, the result order is
- :c:data:`NPY_FORTRANORDER` if *prototype* is a fortran array,
- :c:data:`NPY_CORDER` otherwise. When *order* is
- :c:data:`NPY_KEEPORDER`, the result order matches that of *prototype*, even
- when the axes of *prototype* aren't in C or Fortran order.
-
- If *descr* is NULL, the data type of *prototype* is used.
-
- If *subok* is 1, the newly created array will use the sub-type of
- *prototype* to create the new array, otherwise it will create a
- base-class array.
-
-.. c:function:: PyObject* PyArray_New( \
- PyTypeObject* subtype, int nd, npy_intp const* dims, int type_num, \
- npy_intp const* strides, void* data, int itemsize, int flags, \
- PyObject* obj)
-
- This is similar to :c:func:`PyArray_NewFromDescr` (...) except you
- specify the data-type descriptor with *type_num* and *itemsize*,
- where *type_num* corresponds to a builtin (or user-defined)
- type. If the type always has the same number of bytes, then
- itemsize is ignored. Otherwise, itemsize specifies the particular
- size of this array.
-
-
-
-.. warning::
-
- If data is passed to :c:func:`PyArray_NewFromDescr` or :c:func:`PyArray_New`,
- this memory must not be deallocated until the new array is
- deleted. If this data came from another Python object, this can
- be accomplished using :c:func:`Py_INCREF` on that object and setting the
- base member of the new array to point to that object. If strides
- are passed in they must be consistent with the dimensions, the
- itemsize, and the data of the array.
-
-.. c:function:: PyObject* PyArray_SimpleNew(int nd, npy_intp const* dims, int typenum)
-
- Create a new uninitialized array of type, *typenum*, whose size in
- each of *nd* dimensions is given by the integer array, *dims*.The memory
- for the array is uninitialized (unless typenum is :c:data:`NPY_OBJECT`
- in which case each element in the array is set to NULL). The
- *typenum* argument allows specification of any of the builtin
- data-types such as :c:data:`NPY_FLOAT` or :c:data:`NPY_LONG`. The
- memory for the array can be set to zero if desired using
- :c:func:`PyArray_FILLWBYTE` (return_object, 0).This function cannot be
- used to create a flexible-type array (no itemsize given).
-
-.. c:function:: PyObject* PyArray_SimpleNewFromData( \
- int nd, npy_intp const* dims, int typenum, void* data)
-
- Create an array wrapper around *data* pointed to by the given
- pointer. The array flags will have a default that the data area is
- well-behaved and C-style contiguous. The shape of the array is
- given by the *dims* c-array of length *nd*. The data-type of the
- array is indicated by *typenum*. If data comes from another
- reference-counted Python object, the reference count on this object
- should be increased after the pointer is passed in, and the base member
- of the returned ndarray should point to the Python object that owns
- the data. This will ensure that the provided memory is not
- freed while the returned array is in existence. To free memory as soon
- as the ndarray is deallocated, set the OWNDATA flag on the returned ndarray.
-
-.. c:function:: PyObject* PyArray_SimpleNewFromDescr( \
- int nd, npy_int const* dims, PyArray_Descr* descr)
-
- This function steals a reference to *descr*.
-
- Create a new array with the provided data-type descriptor, *descr*,
- of the shape determined by *nd* and *dims*.
-
-.. c:function:: PyArray_FILLWBYTE(PyObject* obj, int val)
-
- Fill the array pointed to by *obj* ---which must be a (subclass
- of) ndarray---with the contents of *val* (evaluated as a byte).
- This macro calls memset, so obj must be contiguous.
-
-.. c:function:: PyObject* PyArray_Zeros( \
- int nd, npy_intp const* dims, PyArray_Descr* dtype, int fortran)
-
- Construct a new *nd* -dimensional array with shape given by *dims*
- and data type given by *dtype*. If *fortran* is non-zero, then a
- Fortran-order array is created, otherwise a C-order array is
- created. Fill the memory with zeros (or the 0 object if *dtype*
- corresponds to :c:type:`NPY_OBJECT` ).
-
-.. c:function:: PyObject* PyArray_ZEROS( \
- int nd, npy_intp const* dims, int type_num, int fortran)
-
- Macro form of :c:func:`PyArray_Zeros` which takes a type-number instead
- of a data-type object.
-
-.. c:function:: PyObject* PyArray_Empty( \
- int nd, npy_intp const* dims, PyArray_Descr* dtype, int fortran)
-
- Construct a new *nd* -dimensional array with shape given by *dims*
- and data type given by *dtype*. If *fortran* is non-zero, then a
- Fortran-order array is created, otherwise a C-order array is
- created. The array is uninitialized unless the data type
- corresponds to :c:type:`NPY_OBJECT` in which case the array is
- filled with :c:data:`Py_None`.
-
-.. c:function:: PyObject* PyArray_EMPTY( \
- int nd, npy_intp const* dims, int typenum, int fortran)
-
- Macro form of :c:func:`PyArray_Empty` which takes a type-number,
- *typenum*, instead of a data-type object.
-
-.. c:function:: PyObject* PyArray_Arange( \
- double start, double stop, double step, int typenum)
-
- Construct a new 1-dimensional array of data-type, *typenum*, that
- ranges from *start* to *stop* (exclusive) in increments of *step*
- . Equivalent to **arange** (*start*, *stop*, *step*, dtype).
-
-.. c:function:: PyObject* PyArray_ArangeObj( \
- PyObject* start, PyObject* stop, PyObject* step, PyArray_Descr* descr)
-
- Construct a new 1-dimensional array of data-type determined by
- ``descr``, that ranges from ``start`` to ``stop`` (exclusive) in
- increments of ``step``. Equivalent to arange( ``start``,
- ``stop``, ``step``, ``typenum`` ).
-
-.. c:function:: int PyArray_SetBaseObject(PyArrayObject* arr, PyObject* obj)
-
- .. versionadded:: 1.7
-
- This function **steals a reference** to ``obj`` and sets it as the
- base property of ``arr``.
-
- If you construct an array by passing in your own memory buffer as
- a parameter, you need to set the array's `base` property to ensure
- the lifetime of the memory buffer is appropriate.
-
- The return value is 0 on success, -1 on failure.
-
- If the object provided is an array, this function traverses the
- chain of `base` pointers so that each array points to the owner
- of the memory directly. Once the base is set, it may not be changed
- to another value.
-
-From other objects
-^^^^^^^^^^^^^^^^^^
-
-.. c:function:: PyObject* PyArray_FromAny( \
- PyObject* op, PyArray_Descr* dtype, int min_depth, int max_depth, \
- int requirements, PyObject* context)
-
- This is the main function used to obtain an array from any nested
- sequence, or object that exposes the array interface, *op*. The
- parameters allow specification of the required *dtype*, the
- minimum (*min_depth*) and maximum (*max_depth*) number of
- dimensions acceptable, and other *requirements* for the array. This
- function **steals a reference** to the dtype argument, which needs
- to be a :c:type:`PyArray_Descr` structure
- indicating the desired data-type (including required
- byteorder). The *dtype* argument may be ``NULL``, indicating that any
- data-type (and byteorder) is acceptable. Unless
- :c:data:`NPY_ARRAY_FORCECAST` is present in ``flags``,
- this call will generate an error if the data
- type cannot be safely obtained from the object. If you want to use
- ``NULL`` for the *dtype* and ensure the array is notswapped then
- use :c:func:`PyArray_CheckFromAny`. A value of 0 for either of the
- depth parameters causes the parameter to be ignored. Any of the
- following array flags can be added (*e.g.* using \|) to get the
- *requirements* argument. If your code can handle general (*e.g.*
- strided, byte-swapped, or unaligned arrays) then *requirements*
- may be 0. Also, if *op* is not already an array (or does not
- expose the array interface), then a new array will be created (and
- filled from *op* using the sequence protocol). The new array will
- have :c:data:`NPY_ARRAY_DEFAULT` as its flags member. The *context* argument
- is passed to the :obj:`~numpy.class.__array__` method of *op* and is only used if
- the array is constructed that way. Almost always this
- parameter is ``NULL``.
-
- .. c:var:: NPY_ARRAY_C_CONTIGUOUS
-
- Make sure the returned array is C-style contiguous
-
- .. c:var:: NPY_ARRAY_F_CONTIGUOUS
-
- Make sure the returned array is Fortran-style contiguous.
-
- .. c:var:: NPY_ARRAY_ALIGNED
-
- Make sure the returned array is aligned on proper boundaries for its
- data type. An aligned array has the data pointer and every strides
- factor as a multiple of the alignment factor for the data-type-
- descriptor.
-
- .. c:var:: NPY_ARRAY_WRITEABLE
-
- Make sure the returned array can be written to.
-
- .. c:var:: NPY_ARRAY_ENSURECOPY
-
- Make sure a copy is made of *op*. If this flag is not
- present, data is not copied if it can be avoided.
-
- .. c:var:: NPY_ARRAY_ENSUREARRAY
-
- Make sure the result is a base-class ndarray. By
- default, if *op* is an instance of a subclass of
- ndarray, an instance of that same subclass is returned. If
- this flag is set, an ndarray object will be returned instead.
-
- .. c:var:: NPY_ARRAY_FORCECAST
-
- Force a cast to the output type even if it cannot be done
- safely. Without this flag, a data cast will occur only if it
- can be done safely, otherwise an error is raised.
-
- .. c:var:: NPY_ARRAY_WRITEBACKIFCOPY
-
- If *op* is already an array, but does not satisfy the
- requirements, then a copy is made (which will satisfy the
- requirements). If this flag is present and a copy (of an object
- that is already an array) must be made, then the corresponding
- :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag is set in the returned
- copy and *op* is made to be read-only. You must be sure to call
- :c:func:`PyArray_ResolveWritebackIfCopy` to copy the contents
- back into *op* and the *op* array
- will be made writeable again. If *op* is not writeable to begin
- with, or if it is not already an array, then an error is raised.
-
- .. c:var:: NPY_ARRAY_UPDATEIFCOPY
-
- Deprecated. Use :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, which is similar.
- This flag "automatically" copies the data back when the returned
- array is deallocated, which is not supported in all python
- implementations.
-
- .. c:var:: NPY_ARRAY_BEHAVED
-
- :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE`
-
- .. c:var:: NPY_ARRAY_CARRAY
-
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_BEHAVED`
-
- .. c:var:: NPY_ARRAY_CARRAY_RO
-
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
-
- .. c:var:: NPY_ARRAY_FARRAY
-
- :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_BEHAVED`
-
- .. c:var:: NPY_ARRAY_FARRAY_RO
-
- :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
-
- .. c:var:: NPY_ARRAY_DEFAULT
-
- :c:data:`NPY_ARRAY_CARRAY`
-
- .. c:var:: NPY_ARRAY_IN_ARRAY
-
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
-
- .. c:var:: NPY_ARRAY_IN_FARRAY
-
- :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
-
- .. c:var:: NPY_OUT_ARRAY
-
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
- :c:data:`NPY_ARRAY_ALIGNED`
-
- .. c:var:: NPY_ARRAY_OUT_ARRAY
-
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` \|
- :c:data:`NPY_ARRAY_WRITEABLE`
-
- .. c:var:: NPY_ARRAY_OUT_FARRAY
-
- :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
- :c:data:`NPY_ARRAY_ALIGNED`
-
- .. c:var:: NPY_ARRAY_INOUT_ARRAY
-
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
- :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \|
- :c:data:`NPY_ARRAY_UPDATEIFCOPY`
-
- .. c:var:: NPY_ARRAY_INOUT_FARRAY
-
- :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
- :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \|
- :c:data:`NPY_ARRAY_UPDATEIFCOPY`
-
-.. c:function:: int PyArray_GetArrayParamsFromObject( \
- PyObject* op, PyArray_Descr* requested_dtype, npy_bool writeable, \
- PyArray_Descr** out_dtype, int* out_ndim, npy_intp* out_dims, \
- PyArrayObject** out_arr, PyObject* context)
-
- .. versionadded:: 1.6
-
- Retrieves the array parameters for viewing/converting an arbitrary
- PyObject* to a NumPy array. This allows the "innate type and shape"
- of Python list-of-lists to be discovered without
- actually converting to an array. PyArray_FromAny calls this function
- to analyze its input.
-
- In some cases, such as structured arrays and the :obj:`~numpy.class.__array__` interface,
- a data type needs to be used to make sense of the object. When
- this is needed, provide a Descr for 'requested_dtype', otherwise
- provide NULL. This reference is not stolen. Also, if the requested
- dtype doesn't modify the interpretation of the input, out_dtype will
- still get the "innate" dtype of the object, not the dtype passed
- in 'requested_dtype'.
-
- If writing to the value in 'op' is desired, set the boolean
- 'writeable' to 1. This raises an error when 'op' is a scalar, list
- of lists, or other non-writeable 'op'. This differs from passing
- :c:data:`NPY_ARRAY_WRITEABLE` to PyArray_FromAny, where the writeable array may
- be a copy of the input.
-
- When success (0 return value) is returned, either out_arr
- is filled with a non-NULL PyArrayObject and
- the rest of the parameters are untouched, or out_arr is
- filled with NULL, and the rest of the parameters are filled.
-
- Typical usage:
-
- .. code-block:: c
-
- PyArrayObject *arr = NULL;
- PyArray_Descr *dtype = NULL;
- int ndim = 0;
- npy_intp dims[NPY_MAXDIMS];
-
- if (PyArray_GetArrayParamsFromObject(op, NULL, 1, &dtype,
- &ndim, &dims, &arr, NULL) < 0) {
- return NULL;
- }
- if (arr == NULL) {
- /*
- ... validate/change dtype, validate flags, ndim, etc ...
- Could make custom strides here too */
- arr = PyArray_NewFromDescr(&PyArray_Type, dtype, ndim,
- dims, NULL,
- fortran ? NPY_ARRAY_F_CONTIGUOUS : 0,
- NULL);
- if (arr == NULL) {
- return NULL;
- }
- if (PyArray_CopyObject(arr, op) < 0) {
- Py_DECREF(arr);
- return NULL;
- }
- }
- else {
- /*
- ... in this case the other parameters weren't filled, just
- validate and possibly copy arr itself ...
- */
- }
- /*
- ... use arr ...
- */
-
-.. c:function:: PyObject* PyArray_CheckFromAny( \
- PyObject* op, PyArray_Descr* dtype, int min_depth, int max_depth, \
- int requirements, PyObject* context)
-
- Nearly identical to :c:func:`PyArray_FromAny` (...) except
- *requirements* can contain :c:data:`NPY_ARRAY_NOTSWAPPED` (over-riding the
- specification in *dtype*) and :c:data:`NPY_ARRAY_ELEMENTSTRIDES` which
- indicates that the array should be aligned in the sense that the
- strides are multiples of the element size.
-
- In versions 1.6 and earlier of NumPy, the following flags
- did not have the _ARRAY_ macro namespace in them. That form
- of the constant names is deprecated in 1.7.
-
-.. c:var:: NPY_ARRAY_NOTSWAPPED
-
- Make sure the returned array has a data-type descriptor that is in
- machine byte-order, over-riding any specification in the *dtype*
- argument. Normally, the byte-order requirement is determined by
- the *dtype* argument. If this flag is set and the dtype argument
- does not indicate a machine byte-order descriptor (or is NULL and
- the object is already an array with a data-type descriptor that is
- not in machine byte- order), then a new data-type descriptor is
- created and used with its byte-order field set to native.
-
-.. c:var:: NPY_ARRAY_BEHAVED_NS
-
- :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE` \| :c:data:`NPY_ARRAY_NOTSWAPPED`
-
-.. c:var:: NPY_ARRAY_ELEMENTSTRIDES
-
- Make sure the returned array has strides that are multiples of the
- element size.
-
-.. c:function:: PyObject* PyArray_FromArray( \
- PyArrayObject* op, PyArray_Descr* newtype, int requirements)
-
- Special case of :c:func:`PyArray_FromAny` for when *op* is already an
- array but it needs to be of a specific *newtype* (including
- byte-order) or has certain *requirements*.
-
-.. c:function:: PyObject* PyArray_FromStructInterface(PyObject* op)
-
- Returns an ndarray object from a Python object that exposes the
- :obj:`__array_struct__` attribute and follows the array interface
- protocol. If the object does not contain this attribute then a
- borrowed reference to :c:data:`Py_NotImplemented` is returned.
-
-.. c:function:: PyObject* PyArray_FromInterface(PyObject* op)
-
- Returns an ndarray object from a Python object that exposes the
- :obj:`__array_interface__` attribute following the array interface
- protocol. If the object does not contain this attribute then a
- borrowed reference to :c:data:`Py_NotImplemented` is returned.
-
-.. c:function:: PyObject* PyArray_FromArrayAttr( \
- PyObject* op, PyArray_Descr* dtype, PyObject* context)
-
- Return an ndarray object from a Python object that exposes the
- :obj:`~numpy.class.__array__` method. The :obj:`~numpy.class.__array__` method can take 0, 1, or 2
- arguments ([dtype, context]) where *context* is used to pass
- information about where the :obj:`~numpy.class.__array__` method is being called
- from (currently only used in ufuncs).
-
-.. c:function:: PyObject* PyArray_ContiguousFromAny( \
- PyObject* op, int typenum, int min_depth, int max_depth)
-
- This function returns a (C-style) contiguous and behaved function
- array from any nested sequence or array interface exporting
- object, *op*, of (non-flexible) type given by the enumerated
- *typenum*, of minimum depth *min_depth*, and of maximum depth
- *max_depth*. Equivalent to a call to :c:func:`PyArray_FromAny` with
- requirements set to :c:data:`NPY_ARRAY_DEFAULT` and the type_num member of the
- type argument set to *typenum*.
-
-.. c:function:: PyObject *PyArray_FromObject( \
- PyObject *op, int typenum, int min_depth, int max_depth)
-
- Return an aligned and in native-byteorder array from any nested
- sequence or array-interface exporting object, op, of a type given by
- the enumerated typenum. The minimum number of dimensions the array can
- have is given by min_depth while the maximum is max_depth. This is
- equivalent to a call to :c:func:`PyArray_FromAny` with requirements set to
- BEHAVED.
-
-.. c:function:: PyObject* PyArray_EnsureArray(PyObject* op)
-
- This function **steals a reference** to ``op`` and makes sure that
- ``op`` is a base-class ndarray. It special cases array scalars,
- but otherwise calls :c:func:`PyArray_FromAny` ( ``op``, NULL, 0, 0,
- :c:data:`NPY_ARRAY_ENSUREARRAY`, NULL).
-
-.. c:function:: PyObject* PyArray_FromString( \
- char* string, npy_intp slen, PyArray_Descr* dtype, npy_intp num, \
- char* sep)
-
- Construct a one-dimensional ndarray of a single type from a binary
- or (ASCII) text ``string`` of length ``slen``. The data-type of
- the array to-be-created is given by ``dtype``. If num is -1, then
- **copy** the entire string and return an appropriately sized
- array, otherwise, ``num`` is the number of items to **copy** from
- the string. If ``sep`` is NULL (or ""), then interpret the string
- as bytes of binary data, otherwise convert the sub-strings
- separated by ``sep`` to items of data-type ``dtype``. Some
- data-types may not be readable in text mode and an error will be
- raised if that occurs. All errors return NULL.
-
-.. c:function:: PyObject* PyArray_FromFile( \
- FILE* fp, PyArray_Descr* dtype, npy_intp num, char* sep)
-
- Construct a one-dimensional ndarray of a single type from a binary
- or text file. The open file pointer is ``fp``, the data-type of
- the array to be created is given by ``dtype``. This must match
- the data in the file. If ``num`` is -1, then read until the end of
- the file and return an appropriately sized array, otherwise,
- ``num`` is the number of items to read. If ``sep`` is NULL (or
- ""), then read from the file in binary mode, otherwise read from
- the file in text mode with ``sep`` providing the item
- separator. Some array types cannot be read in text mode in which
- case an error is raised.
-
-.. c:function:: PyObject* PyArray_FromBuffer( \
- PyObject* buf, PyArray_Descr* dtype, npy_intp count, npy_intp offset)
-
- Construct a one-dimensional ndarray of a single type from an
- object, ``buf``, that exports the (single-segment) buffer protocol
- (or has an attribute __buffer\__ that returns an object that
- exports the buffer protocol). A writeable buffer will be tried
- first followed by a read- only buffer. The :c:data:`NPY_ARRAY_WRITEABLE`
- flag of the returned array will reflect which one was
- successful. The data is assumed to start at ``offset`` bytes from
- the start of the memory location for the object. The type of the
- data in the buffer will be interpreted depending on the data- type
- descriptor, ``dtype.`` If ``count`` is negative then it will be
- determined from the size of the buffer and the requested itemsize,
- otherwise, ``count`` represents how many elements should be
- converted from the buffer.
-
-.. c:function:: int PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src)
-
- Copy from the source array, ``src``, into the destination array,
- ``dest``, performing a data-type conversion if necessary. If an
- error occurs return -1 (otherwise 0). The shape of ``src`` must be
- broadcastable to the shape of ``dest``. The data areas of dest
- and src must not overlap.
-
-.. c:function:: int PyArray_MoveInto(PyArrayObject* dest, PyArrayObject* src)
-
- Move data from the source array, ``src``, into the destination
- array, ``dest``, performing a data-type conversion if
- necessary. If an error occurs return -1 (otherwise 0). The shape
- of ``src`` must be broadcastable to the shape of ``dest``. The
- data areas of dest and src may overlap.
-
-.. c:function:: PyArrayObject* PyArray_GETCONTIGUOUS(PyObject* op)
-
- If ``op`` is already (C-style) contiguous and well-behaved then
- just return a reference, otherwise return a (contiguous and
- well-behaved) copy of the array. The parameter op must be a
- (sub-class of an) ndarray and no checking for that is done.
-
-.. c:function:: PyObject* PyArray_FROM_O(PyObject* obj)
-
- Convert ``obj`` to an ndarray. The argument can be any nested
- sequence or object that exports the array interface. This is a
- macro form of :c:func:`PyArray_FromAny` using ``NULL``, 0, 0, 0 for the
- other arguments. Your code must be able to handle any data-type
- descriptor and any combination of data-flags to use this macro.
-
-.. c:function:: PyObject* PyArray_FROM_OF(PyObject* obj, int requirements)
-
- Similar to :c:func:`PyArray_FROM_O` except it can take an argument
- of *requirements* indicating properties the resulting array must
- have. Available requirements that can be enforced are
- :c:data:`NPY_ARRAY_C_CONTIGUOUS`, :c:data:`NPY_ARRAY_F_CONTIGUOUS`,
- :c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_WRITEABLE`,
- :c:data:`NPY_ARRAY_NOTSWAPPED`, :c:data:`NPY_ARRAY_ENSURECOPY`,
- :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, :c:data:`NPY_ARRAY_UPDATEIFCOPY`,
- :c:data:`NPY_ARRAY_FORCECAST`, and
- :c:data:`NPY_ARRAY_ENSUREARRAY`. Standard combinations of flags can also
- be used:
-
-.. c:function:: PyObject* PyArray_FROM_OT(PyObject* obj, int typenum)
-
- Similar to :c:func:`PyArray_FROM_O` except it can take an argument of
- *typenum* specifying the type-number the returned array.
-
-.. c:function:: PyObject* PyArray_FROM_OTF( \
- PyObject* obj, int typenum, int requirements)
-
- Combination of :c:func:`PyArray_FROM_OF` and :c:func:`PyArray_FROM_OT`
- allowing both a *typenum* and a *flags* argument to be provided.
-
-.. c:function:: PyObject* PyArray_FROMANY( \
- PyObject* obj, int typenum, int min, int max, int requirements)
-
- Similar to :c:func:`PyArray_FromAny` except the data-type is
- specified using a typenumber. :c:func:`PyArray_DescrFromType`
- (*typenum*) is passed directly to :c:func:`PyArray_FromAny`. This
- macro also adds :c:data:`NPY_ARRAY_DEFAULT` to requirements if
- :c:data:`NPY_ARRAY_ENSURECOPY` is passed in as requirements.
-
-.. c:function:: PyObject *PyArray_CheckAxis( \
- PyObject* obj, int* axis, int requirements)
-
- Encapsulate the functionality of functions and methods that take
- the axis= keyword and work properly with None as the axis
- argument. The input array is ``obj``, while ``*axis`` is a
- converted integer (so that >=MAXDIMS is the None value), and
- ``requirements`` gives the needed properties of ``obj``. The
- output is a converted version of the input so that requirements
- are met and if needed a flattening has occurred. On output
- negative values of ``*axis`` are converted and the new value is
- checked to ensure consistency with the shape of ``obj``.
-
-
-Dealing with types
-------------------
-
-
-General check of Python Type
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. c:function:: PyArray_Check(PyObject *op)
-
- Evaluates true if *op* is a Python object whose type is a sub-type
- of :c:data:`PyArray_Type`.
-
-.. c:function:: PyArray_CheckExact(PyObject *op)
-
- Evaluates true if *op* is a Python object with type
- :c:data:`PyArray_Type`.
-
-.. c:function:: PyArray_HasArrayInterface(PyObject *op, PyObject *out)
-
- If ``op`` implements any part of the array interface, then ``out``
- will contain a new reference to the newly created ndarray using
- the interface or ``out`` will contain ``NULL`` if an error during
- conversion occurs. Otherwise, out will contain a borrowed
- reference to :c:data:`Py_NotImplemented` and no error condition is set.
-
-.. c:function:: PyArray_HasArrayInterfaceType(op, type, context, out)
-
- If ``op`` implements any part of the array interface, then ``out``
- will contain a new reference to the newly created ndarray using
- the interface or ``out`` will contain ``NULL`` if an error during
- conversion occurs. Otherwise, out will contain a borrowed
- reference to Py_NotImplemented and no error condition is set.
- This version allows setting of the type and context in the part of
- the array interface that looks for the :obj:`~numpy.class.__array__` attribute.
-
-.. c:function:: PyArray_IsZeroDim(op)
-
- Evaluates true if *op* is an instance of (a subclass of)
- :c:data:`PyArray_Type` and has 0 dimensions.
-
-.. c:function:: PyArray_IsScalar(op, cls)
-
- Evaluates true if *op* is an instance of :c:data:`Py{cls}ArrType_Type`.
-
-.. c:function:: PyArray_CheckScalar(op)
-
- Evaluates true if *op* is either an array scalar (an instance of a
- sub-type of :c:data:`PyGenericArr_Type` ), or an instance of (a
- sub-class of) :c:data:`PyArray_Type` whose dimensionality is 0.
-
-.. c:function:: PyArray_IsPythonNumber(op)
-
- Evaluates true if *op* is an instance of a builtin numeric type (int,
- float, complex, long, bool)
-
-.. c:function:: PyArray_IsPythonScalar(op)
-
- Evaluates true if *op* is a builtin Python scalar object (int,
- float, complex, str, unicode, long, bool).
-
-.. c:function:: PyArray_IsAnyScalar(op)
-
- Evaluates true if *op* is either a Python scalar object (see
- :c:func:`PyArray_IsPythonScalar`) or an array scalar (an instance of a sub-
- type of :c:data:`PyGenericArr_Type` ).
-
-.. c:function:: PyArray_CheckAnyScalar(op)
-
- Evaluates true if *op* is a Python scalar object (see
- :c:func:`PyArray_IsPythonScalar`), an array scalar (an instance of a
- sub-type of :c:data:`PyGenericArr_Type`) or an instance of a sub-type of
- :c:data:`PyArray_Type` whose dimensionality is 0.
-
-
-Data-type checking
-^^^^^^^^^^^^^^^^^^
-
-For the typenum macros, the argument is an integer representing an
-enumerated array data type. For the array type checking macros the
-argument must be a :c:type:`PyObject *<PyObject>` that can be directly interpreted as a
-:c:type:`PyArrayObject *`.
-
-.. c:function:: PyTypeNum_ISUNSIGNED(num)
-
-.. c:function:: PyDataType_ISUNSIGNED(descr)
-
-.. c:function:: PyArray_ISUNSIGNED(obj)
-
- Type represents an unsigned integer.
-
-.. c:function:: PyTypeNum_ISSIGNED(num)
-
-.. c:function:: PyDataType_ISSIGNED(descr)
-
-.. c:function:: PyArray_ISSIGNED(obj)
-
- Type represents a signed integer.
-
-.. c:function:: PyTypeNum_ISINTEGER(num)
-
-.. c:function:: PyDataType_ISINTEGER(descr)
-
-.. c:function:: PyArray_ISINTEGER(obj)
-
- Type represents any integer.
-
-.. c:function:: PyTypeNum_ISFLOAT(num)
-
-.. c:function:: PyDataType_ISFLOAT(descr)
-
-.. c:function:: PyArray_ISFLOAT(obj)
-
- Type represents any floating point number.
-
-.. c:function:: PyTypeNum_ISCOMPLEX(num)
-
-.. c:function:: PyDataType_ISCOMPLEX(descr)
-
-.. c:function:: PyArray_ISCOMPLEX(obj)
-
- Type represents any complex floating point number.
-
-.. c:function:: PyTypeNum_ISNUMBER(num)
-
-.. c:function:: PyDataType_ISNUMBER(descr)
-
-.. c:function:: PyArray_ISNUMBER(obj)
-
- Type represents any integer, floating point, or complex floating point
- number.
-
-.. c:function:: PyTypeNum_ISSTRING(num)
-
-.. c:function:: PyDataType_ISSTRING(descr)
-
-.. c:function:: PyArray_ISSTRING(obj)
-
- Type represents a string data type.
-
-.. c:function:: PyTypeNum_ISPYTHON(num)
-
-.. c:function:: PyDataType_ISPYTHON(descr)
-
-.. c:function:: PyArray_ISPYTHON(obj)
-
- Type represents an enumerated type corresponding to one of the
- standard Python scalar (bool, int, float, or complex).
-
-.. c:function:: PyTypeNum_ISFLEXIBLE(num)
-
-.. c:function:: PyDataType_ISFLEXIBLE(descr)
-
-.. c:function:: PyArray_ISFLEXIBLE(obj)
-
- Type represents one of the flexible array types ( :c:data:`NPY_STRING`,
- :c:data:`NPY_UNICODE`, or :c:data:`NPY_VOID` ).
-
-.. c:function:: PyDataType_ISUNSIZED(descr):
-
- Type has no size information attached, and can be resized. Should only be
- called on flexible dtypes. Types that are attached to an array will always
- be sized, hence the array form of this macro not existing.
-
-.. c:function:: PyTypeNum_ISUSERDEF(num)
-
-.. c:function:: PyDataType_ISUSERDEF(descr)
-
-.. c:function:: PyArray_ISUSERDEF(obj)
-
- Type represents a user-defined type.
-
-.. c:function:: PyTypeNum_ISEXTENDED(num)
-
-.. c:function:: PyDataType_ISEXTENDED(descr)
-
-.. c:function:: PyArray_ISEXTENDED(obj)
-
- Type is either flexible or user-defined.
-
-.. c:function:: PyTypeNum_ISOBJECT(num)
-
-.. c:function:: PyDataType_ISOBJECT(descr)
-
-.. c:function:: PyArray_ISOBJECT(obj)
-
- Type represents object data type.
-
-.. c:function:: PyTypeNum_ISBOOL(num)
-
-.. c:function:: PyDataType_ISBOOL(descr)
-
-.. c:function:: PyArray_ISBOOL(obj)
-
- Type represents Boolean data type.
-
-.. c:function:: PyDataType_HASFIELDS(descr)
-
-.. c:function:: PyArray_HASFIELDS(obj)
-
- Type has fields associated with it.
-
-.. c:function:: PyArray_ISNOTSWAPPED(m)
-
- Evaluates true if the data area of the ndarray *m* is in machine
- byte-order according to the array's data-type descriptor.
-
-.. c:function:: PyArray_ISBYTESWAPPED(m)
-
- Evaluates true if the data area of the ndarray *m* is **not** in
- machine byte-order according to the array's data-type descriptor.
-
-.. c:function:: Bool PyArray_EquivTypes( \
- PyArray_Descr* type1, PyArray_Descr* type2)
-
- Return :c:data:`NPY_TRUE` if *type1* and *type2* actually represent
- equivalent types for this platform (the fortran member of each
- type is ignored). For example, on 32-bit platforms,
- :c:data:`NPY_LONG` and :c:data:`NPY_INT` are equivalent. Otherwise
- return :c:data:`NPY_FALSE`.
-
-.. c:function:: Bool PyArray_EquivArrTypes( \
- PyArrayObject* a1, PyArrayObject * a2)
-
- Return :c:data:`NPY_TRUE` if *a1* and *a2* are arrays with equivalent
- types for this platform.
-
-.. c:function:: Bool PyArray_EquivTypenums(int typenum1, int typenum2)
-
- Special case of :c:func:`PyArray_EquivTypes` (...) that does not accept
- flexible data types but may be easier to call.
-
-.. c:function:: int PyArray_EquivByteorders({byteorder} b1, {byteorder} b2)
-
- True if byteorder characters ( :c:data:`NPY_LITTLE`,
- :c:data:`NPY_BIG`, :c:data:`NPY_NATIVE`, :c:data:`NPY_IGNORE` ) are
- either equal or equivalent as to their specification of a native
- byte order. Thus, on a little-endian machine :c:data:`NPY_LITTLE`
- and :c:data:`NPY_NATIVE` are equivalent where they are not
- equivalent on a big-endian machine.
-
-
-Converting data types
-^^^^^^^^^^^^^^^^^^^^^
-
-.. c:function:: PyObject* PyArray_Cast(PyArrayObject* arr, int typenum)
-
- Mainly for backwards compatibility to the Numeric C-API and for
- simple casts to non-flexible types. Return a new array object with
- the elements of *arr* cast to the data-type *typenum* which must
- be one of the enumerated types and not a flexible type.
-
-.. c:function:: PyObject* PyArray_CastToType( \
- PyArrayObject* arr, PyArray_Descr* type, int fortran)
-
- Return a new array of the *type* specified, casting the elements
- of *arr* as appropriate. The fortran argument specifies the
- ordering of the output array.
-
-.. c:function:: int PyArray_CastTo(PyArrayObject* out, PyArrayObject* in)
-
- As of 1.6, this function simply calls :c:func:`PyArray_CopyInto`,
- which handles the casting.
-
- Cast the elements of the array *in* into the array *out*. The
- output array should be writeable, have an integer-multiple of the
- number of elements in the input array (more than one copy can be
- placed in out), and have a data type that is one of the builtin
- types. Returns 0 on success and -1 if an error occurs.
-
-.. c:function:: PyArray_VectorUnaryFunc* PyArray_GetCastFunc( \
- PyArray_Descr* from, int totype)
-
- Return the low-level casting function to cast from the given
- descriptor to the builtin type number. If no casting function
- exists return ``NULL`` and set an error. Using this function
- instead of direct access to *from* ->f->cast will allow support of
- any user-defined casting functions added to a descriptors casting
- dictionary.
-
-.. c:function:: int PyArray_CanCastSafely(int fromtype, int totype)
-
- Returns non-zero if an array of data type *fromtype* can be cast
- to an array of data type *totype* without losing information. An
- exception is that 64-bit integers are allowed to be cast to 64-bit
- floating point values even though this can lose precision on large
- integers so as not to proliferate the use of long doubles without
- explicit requests. Flexible array types are not checked according
- to their lengths with this function.
-
-.. c:function:: int PyArray_CanCastTo( \
- PyArray_Descr* fromtype, PyArray_Descr* totype)
-
- :c:func:`PyArray_CanCastTypeTo` supersedes this function in
- NumPy 1.6 and later.
-
- Equivalent to PyArray_CanCastTypeTo(fromtype, totype, NPY_SAFE_CASTING).
-
-.. c:function:: int PyArray_CanCastTypeTo( \
- PyArray_Descr* fromtype, PyArray_Descr* totype, NPY_CASTING casting)
-
- .. versionadded:: 1.6
-
- Returns non-zero if an array of data type *fromtype* (which can
- include flexible types) can be cast safely to an array of data
- type *totype* (which can include flexible types) according to
- the casting rule *casting*. For simple types with :c:data:`NPY_SAFE_CASTING`,
- this is basically a wrapper around :c:func:`PyArray_CanCastSafely`, but
- for flexible types such as strings or unicode, it produces results
- taking into account their sizes. Integer and float types can only be cast
- to a string or unicode type using :c:data:`NPY_SAFE_CASTING` if the string
- or unicode type is big enough to hold the max value of the integer/float
- type being cast from.
-
-.. c:function:: int PyArray_CanCastArrayTo( \
- PyArrayObject* arr, PyArray_Descr* totype, NPY_CASTING casting)
-
- .. versionadded:: 1.6
-
- Returns non-zero if *arr* can be cast to *totype* according
- to the casting rule given in *casting*. If *arr* is an array
- scalar, its value is taken into account, and non-zero is also
- returned when the value will not overflow or be truncated to
- an integer when converting to a smaller type.
-
- This is almost the same as the result of
- PyArray_CanCastTypeTo(PyArray_MinScalarType(arr), totype, casting),
- but it also handles a special case arising because the set
- of uint values is not a subset of the int values for types with the
- same number of bits.
-
-.. c:function:: PyArray_Descr* PyArray_MinScalarType(PyArrayObject* arr)
-
- .. versionadded:: 1.6
-
- If *arr* is an array, returns its data type descriptor, but if
- *arr* is an array scalar (has 0 dimensions), it finds the data type
- of smallest size to which the value may be converted
- without overflow or truncation to an integer.
-
- This function will not demote complex to float or anything to
- boolean, but will demote a signed integer to an unsigned integer
- when the scalar value is positive.
-
-.. c:function:: PyArray_Descr* PyArray_PromoteTypes( \
- PyArray_Descr* type1, PyArray_Descr* type2)
-
- .. versionadded:: 1.6
-
- Finds the data type of smallest size and kind to which *type1* and
- *type2* may be safely converted. This function is symmetric and
- associative. A string or unicode result will be the proper size for
- storing the max value of the input types converted to a string or unicode.
-
-.. c:function:: PyArray_Descr* PyArray_ResultType( \
- npy_intp narrs, PyArrayObject**arrs, npy_intp ndtypes, \
- PyArray_Descr**dtypes)
-
- .. versionadded:: 1.6
-
- This applies type promotion to all the inputs,
- using the NumPy rules for combining scalars and arrays, to
- determine the output type of a set of operands. This is the
- same result type that ufuncs produce. The specific algorithm
- used is as follows.
-
- Categories are determined by first checking which of boolean,
- integer (int/uint), or floating point (float/complex) the maximum
- kind of all the arrays and the scalars are.
-
- If there are only scalars or the maximum category of the scalars
- is higher than the maximum category of the arrays,
- the data types are combined with :c:func:`PyArray_PromoteTypes`
- to produce the return value.
-
- Otherwise, PyArray_MinScalarType is called on each array, and
- the resulting data types are all combined with
- :c:func:`PyArray_PromoteTypes` to produce the return value.
-
- The set of int values is not a subset of the uint values for types
- with the same number of bits, something not reflected in
- :c:func:`PyArray_MinScalarType`, but handled as a special case in
- PyArray_ResultType.
-
-.. c:function:: int PyArray_ObjectType(PyObject* op, int mintype)
-
- This function is superceded by :c:func:`PyArray_MinScalarType` and/or
- :c:func:`PyArray_ResultType`.
-
- This function is useful for determining a common type that two or
- more arrays can be converted to. It only works for non-flexible
- array types as no itemsize information is passed. The *mintype*
- argument represents the minimum type acceptable, and *op*
- represents the object that will be converted to an array. The
- return value is the enumerated typenumber that represents the
- data-type that *op* should have.
-
-.. c:function:: void PyArray_ArrayType( \
- PyObject* op, PyArray_Descr* mintype, PyArray_Descr* outtype)
-
- This function is superceded by :c:func:`PyArray_ResultType`.
-
- This function works similarly to :c:func:`PyArray_ObjectType` (...)
- except it handles flexible arrays. The *mintype* argument can have
- an itemsize member and the *outtype* argument will have an
- itemsize member at least as big but perhaps bigger depending on
- the object *op*.
-
-.. c:function:: PyArrayObject** PyArray_ConvertToCommonType( \
- PyObject* op, int* n)
-
- The functionality this provides is largely superceded by iterator
- :c:type:`NpyIter` introduced in 1.6, with flag
- :c:data:`NPY_ITER_COMMON_DTYPE` or with the same dtype parameter for
- all operands.
-
- Convert a sequence of Python objects contained in *op* to an array
- of ndarrays each having the same data type. The type is selected
- based on the typenumber (larger type number is chosen over a
- smaller one) ignoring objects that are only scalars. The length of
- the sequence is returned in *n*, and an *n* -length array of
- :c:type:`PyArrayObject` pointers is the return value (or ``NULL`` if an
- error occurs). The returned array must be freed by the caller of
- this routine (using :c:func:`PyDataMem_FREE` ) and all the array objects
- in it ``DECREF`` 'd or a memory-leak will occur. The example
- template-code below shows a typically usage:
-
- .. code-block:: c
-
- mps = PyArray_ConvertToCommonType(obj, &n);
- if (mps==NULL) return NULL;
- {code}
- <before return>
- for (i=0; i<n; i++) Py_DECREF(mps[i]);
- PyDataMem_FREE(mps);
- {return}
-
-.. c:function:: char* PyArray_Zero(PyArrayObject* arr)
-
- A pointer to newly created memory of size *arr* ->itemsize that
- holds the representation of 0 for that type. The returned pointer,
- *ret*, **must be freed** using :c:func:`PyDataMem_FREE` (ret) when it is
- not needed anymore.
-
-.. c:function:: char* PyArray_One(PyArrayObject* arr)
-
- A pointer to newly created memory of size *arr* ->itemsize that
- holds the representation of 1 for that type. The returned pointer,
- *ret*, **must be freed** using :c:func:`PyDataMem_FREE` (ret) when it
- is not needed anymore.
-
-.. c:function:: int PyArray_ValidType(int typenum)
-
- Returns :c:data:`NPY_TRUE` if *typenum* represents a valid type-number
- (builtin or user-defined or character code). Otherwise, this
- function returns :c:data:`NPY_FALSE`.
-
-
-New data types
-^^^^^^^^^^^^^^
-
-.. c:function:: void PyArray_InitArrFuncs(PyArray_ArrFuncs* f)
-
- Initialize all function pointers and members to ``NULL``.
-
-.. c:function:: int PyArray_RegisterDataType(PyArray_Descr* dtype)
-
- Register a data-type as a new user-defined data type for
- arrays. The type must have most of its entries filled in. This is
- not always checked and errors can produce segfaults. In
- particular, the typeobj member of the ``dtype`` structure must be
- filled with a Python type that has a fixed-size element-size that
- corresponds to the elsize member of *dtype*. Also the ``f``
- member must have the required functions: nonzero, copyswap,
- copyswapn, getitem, setitem, and cast (some of the cast functions
- may be ``NULL`` if no support is desired). To avoid confusion, you
- should choose a unique character typecode but this is not enforced
- and not relied on internally.
-
- A user-defined type number is returned that uniquely identifies
- the type. A pointer to the new structure can then be obtained from
- :c:func:`PyArray_DescrFromType` using the returned type number. A -1 is
- returned if an error occurs. If this *dtype* has already been
- registered (checked only by the address of the pointer), then
- return the previously-assigned type-number.
-
-.. c:function:: int PyArray_RegisterCastFunc( \
- PyArray_Descr* descr, int totype, PyArray_VectorUnaryFunc* castfunc)
-
- Register a low-level casting function, *castfunc*, to convert
- from the data-type, *descr*, to the given data-type number,
- *totype*. Any old casting function is over-written. A ``0`` is
- returned on success or a ``-1`` on failure.
-
-.. c:function:: int PyArray_RegisterCanCast( \
- PyArray_Descr* descr, int totype, NPY_SCALARKIND scalar)
-
- Register the data-type number, *totype*, as castable from
- data-type object, *descr*, of the given *scalar* kind. Use
- *scalar* = :c:data:`NPY_NOSCALAR` to register that an array of data-type
- *descr* can be cast safely to a data-type whose type_number is
- *totype*.
-
-
-Special functions for NPY_OBJECT
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. c:function:: int PyArray_INCREF(PyArrayObject* op)
-
- Used for an array, *op*, that contains any Python objects. It
- increments the reference count of every object in the array
- according to the data-type of *op*. A -1 is returned if an error
- occurs, otherwise 0 is returned.
-
-.. c:function:: void PyArray_Item_INCREF(char* ptr, PyArray_Descr* dtype)
-
- A function to INCREF all the objects at the location *ptr*
- according to the data-type *dtype*. If *ptr* is the start of a
- structured type with an object at any offset, then this will (recursively)
- increment the reference count of all object-like items in the
- structured type.
-
-.. c:function:: int PyArray_XDECREF(PyArrayObject* op)
-
- Used for an array, *op*, that contains any Python objects. It
- decrements the reference count of every object in the array
- according to the data-type of *op*. Normal return value is 0. A
- -1 is returned if an error occurs.
-
-.. c:function:: void PyArray_Item_XDECREF(char* ptr, PyArray_Descr* dtype)
-
- A function to XDECREF all the object-like items at the location
- *ptr* as recorded in the data-type, *dtype*. This works
- recursively so that if ``dtype`` itself has fields with data-types
- that contain object-like items, all the object-like fields will be
- XDECREF ``'d``.
-
-.. c:function:: void PyArray_FillObjectArray(PyArrayObject* arr, PyObject* obj)
-
- Fill a newly created array with a single value obj at all
- locations in the structure with object data-types. No checking is
- performed but *arr* must be of data-type :c:type:`NPY_OBJECT` and be
- single-segment and uninitialized (no previous objects in
- position). Use :c:func:`PyArray_DECREF` (*arr*) if you need to
- decrement all the items in the object array prior to calling this
- function.
-
-.. c:function:: int PyArray_SetUpdateIfCopyBase(PyArrayObject* arr, PyArrayObject* base)
-
- Precondition: ``arr`` is a copy of ``base`` (though possibly with different
- strides, ordering, etc.) Set the UPDATEIFCOPY flag and ``arr->base`` so
- that when ``arr`` is destructed, it will copy any changes back to ``base``.
- DEPRECATED, use :c:func:`PyArray_SetWritebackIfCopyBase``.
-
- Returns 0 for success, -1 for failure.
-
-.. c:function:: int PyArray_SetWritebackIfCopyBase(PyArrayObject* arr, PyArrayObject* base)
-
- Precondition: ``arr`` is a copy of ``base`` (though possibly with different
- strides, ordering, etc.) Sets the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag
- and ``arr->base``, and set ``base`` to READONLY. Call
- :c:func:`PyArray_ResolveWritebackIfCopy` before calling
- `Py_DECREF`` in order copy any changes back to ``base`` and
- reset the READONLY flag.
-
- Returns 0 for success, -1 for failure.
-
-
-Array flags
------------
-
-The ``flags`` attribute of the ``PyArrayObject`` structure contains
-important information about the memory used by the array (pointed to
-by the data member) This flag information must be kept accurate or
-strange results and even segfaults may result.
-
-There are 6 (binary) flags that describe the memory area used by the
-data buffer. These constants are defined in ``arrayobject.h`` and
-determine the bit-position of the flag. Python exposes a nice
-attribute- based interface as well as a dictionary-like interface for
-getting (and, if appropriate, setting) these flags.
-
-Memory areas of all kinds can be pointed to by an ndarray, necessitating
-these flags. If you get an arbitrary ``PyArrayObject`` in C-code, you
-need to be aware of the flags that are set. If you need to guarantee
-a certain kind of array (like :c:data:`NPY_ARRAY_C_CONTIGUOUS` and
-:c:data:`NPY_ARRAY_BEHAVED`), then pass these requirements into the
-PyArray_FromAny function.
-
-
-Basic Array Flags
-^^^^^^^^^^^^^^^^^
-
-An ndarray can have a data segment that is not a simple contiguous
-chunk of well-behaved memory you can manipulate. It may not be aligned
-with word boundaries (very important on some platforms). It might have
-its data in a different byte-order than the machine recognizes. It
-might not be writeable. It might be in Fortan-contiguous order. The
-array flags are used to indicate what can be said about data
-associated with an array.
-
-In versions 1.6 and earlier of NumPy, the following flags
-did not have the _ARRAY_ macro namespace in them. That form
-of the constant names is deprecated in 1.7.
-
-.. c:var:: NPY_ARRAY_C_CONTIGUOUS
-
- The data area is in C-style contiguous order (last index varies the
- fastest).
-
-.. c:var:: NPY_ARRAY_F_CONTIGUOUS
-
- The data area is in Fortran-style contiguous order (first index varies
- the fastest).
-
-.. note::
-
- Arrays can be both C-style and Fortran-style contiguous simultaneously.
- This is clear for 1-dimensional arrays, but can also be true for higher
- dimensional arrays.
-
- Even for contiguous arrays a stride for a given dimension
- ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
- or the array has no elements.
- It does *not* generally hold that ``self.strides[-1] == self.itemsize``
- for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
- Fortran-style contiguous arrays is true. The correct way to access the
- ``itemsize`` of an array from the C API is ``PyArray_ITEMSIZE(arr)``.
-
- .. seealso:: :ref:`Internal memory layout of an ndarray <arrays.ndarray>`
-
-.. c:var:: NPY_ARRAY_OWNDATA
-
- The data area is owned by this array.
-
-.. c:var:: NPY_ARRAY_ALIGNED
-
- The data area and all array elements are aligned appropriately.
-
-.. c:var:: NPY_ARRAY_WRITEABLE
-
- The data area can be written to.
-
- Notice that the above 3 flags are defined so that a new, well-
- behaved array has these flags defined as true.
-
-.. c:var:: NPY_ARRAY_WRITEBACKIFCOPY
-
- The data area represents a (well-behaved) copy whose information
- should be transferred back to the original when
- :c:func:`PyArray_ResolveWritebackIfCopy` is called.
-
- This is a special flag that is set if this array represents a copy
- made because a user required certain flags in
- :c:func:`PyArray_FromAny` and a copy had to be made of some other
- array (and the user asked for this flag to be set in such a
- situation). The base attribute then points to the "misbehaved"
- array (which is set read_only). :c:func`PyArray_ResolveWritebackIfCopy`
- will copy its contents back to the "misbehaved"
- array (casting if necessary) and will reset the "misbehaved" array
- to :c:data:`NPY_ARRAY_WRITEABLE`. If the "misbehaved" array was not
- :c:data:`NPY_ARRAY_WRITEABLE` to begin with then :c:func:`PyArray_FromAny`
- would have returned an error because :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`
- would not have been possible.
-
-.. c:var:: NPY_ARRAY_UPDATEIFCOPY
-
- A deprecated version of :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` which
- depends upon ``dealloc`` to trigger the writeback. For backwards
- compatibility, :c:func:`PyArray_ResolveWritebackIfCopy` is called at
- ``dealloc`` but relying
- on that behavior is deprecated and not supported in PyPy.
-
-:c:func:`PyArray_UpdateFlags` (obj, flags) will update the ``obj->flags``
-for ``flags`` which can be any of :c:data:`NPY_ARRAY_C_CONTIGUOUS`,
-:c:data:`NPY_ARRAY_F_CONTIGUOUS`, :c:data:`NPY_ARRAY_ALIGNED`, or
-:c:data:`NPY_ARRAY_WRITEABLE`.
-
-
-Combinations of array flags
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. c:var:: NPY_ARRAY_BEHAVED
-
- :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE`
-
-.. c:var:: NPY_ARRAY_CARRAY
-
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_BEHAVED`
-
-.. c:var:: NPY_ARRAY_CARRAY_RO
-
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
-
-.. c:var:: NPY_ARRAY_FARRAY
-
- :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_BEHAVED`
-
-.. c:var:: NPY_ARRAY_FARRAY_RO
-
- :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
-
-.. c:var:: NPY_ARRAY_DEFAULT
-
- :c:data:`NPY_ARRAY_CARRAY`
-
-.. c:var:: NPY_ARRAY_UPDATE_ALL
-
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
-
-
-Flag-like constants
-^^^^^^^^^^^^^^^^^^^
-
-These constants are used in :c:func:`PyArray_FromAny` (and its macro forms) to
-specify desired properties of the new array.
-
-.. c:var:: NPY_ARRAY_FORCECAST
-
- Cast to the desired type, even if it can't be done without losing
- information.
-
-.. c:var:: NPY_ARRAY_ENSURECOPY
-
- Make sure the resulting array is a copy of the original.
-
-.. c:var:: NPY_ARRAY_ENSUREARRAY
-
- Make sure the resulting object is an actual ndarray, and not a sub-class.
-
-.. c:var:: NPY_ARRAY_NOTSWAPPED
-
- Only used in :c:func:`PyArray_CheckFromAny` to over-ride the byteorder
- of the data-type object passed in.
-
-.. c:var:: NPY_ARRAY_BEHAVED_NS
-
- :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE` \| :c:data:`NPY_ARRAY_NOTSWAPPED`
-
-
-Flag checking
-^^^^^^^^^^^^^
-
-For all of these macros *arr* must be an instance of a (subclass of)
-:c:data:`PyArray_Type`, but no checking is done.
-
-.. c:function:: PyArray_CHKFLAGS(arr, flags)
-
- The first parameter, arr, must be an ndarray or subclass. The
- parameter, *flags*, should be an integer consisting of bitwise
- combinations of the possible flags an array can have:
- :c:data:`NPY_ARRAY_C_CONTIGUOUS`, :c:data:`NPY_ARRAY_F_CONTIGUOUS`,
- :c:data:`NPY_ARRAY_OWNDATA`, :c:data:`NPY_ARRAY_ALIGNED`,
- :c:data:`NPY_ARRAY_WRITEABLE`, :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`,
- :c:data:`NPY_ARRAY_UPDATEIFCOPY`.
-
-.. c:function:: PyArray_IS_C_CONTIGUOUS(arr)
-
- Evaluates true if *arr* is C-style contiguous.
-
-.. c:function:: PyArray_IS_F_CONTIGUOUS(arr)
-
- Evaluates true if *arr* is Fortran-style contiguous.
-
-.. c:function:: PyArray_ISFORTRAN(arr)
-
- Evaluates true if *arr* is Fortran-style contiguous and *not*
- C-style contiguous. :c:func:`PyArray_IS_F_CONTIGUOUS`
- is the correct way to test for Fortran-style contiguity.
-
-.. c:function:: PyArray_ISWRITEABLE(arr)
-
- Evaluates true if the data area of *arr* can be written to
-
-.. c:function:: PyArray_ISALIGNED(arr)
-
- Evaluates true if the data area of *arr* is properly aligned on
- the machine.
-
-.. c:function:: PyArray_ISBEHAVED(arr)
-
- Evaluates true if the data area of *arr* is aligned and writeable
- and in machine byte-order according to its descriptor.
-
-.. c:function:: PyArray_ISBEHAVED_RO(arr)
-
- Evaluates true if the data area of *arr* is aligned and in machine
- byte-order.
-
-.. c:function:: PyArray_ISCARRAY(arr)
-
- Evaluates true if the data area of *arr* is C-style contiguous,
- and :c:func:`PyArray_ISBEHAVED` (*arr*) is true.
-
-.. c:function:: PyArray_ISFARRAY(arr)
-
- Evaluates true if the data area of *arr* is Fortran-style
- contiguous and :c:func:`PyArray_ISBEHAVED` (*arr*) is true.
-
-.. c:function:: PyArray_ISCARRAY_RO(arr)
-
- Evaluates true if the data area of *arr* is C-style contiguous,
- aligned, and in machine byte-order.
-
-.. c:function:: PyArray_ISFARRAY_RO(arr)
-
- Evaluates true if the data area of *arr* is Fortran-style
- contiguous, aligned, and in machine byte-order **.**
-
-.. c:function:: PyArray_ISONESEGMENT(arr)
-
- Evaluates true if the data area of *arr* consists of a single
- (C-style or Fortran-style) contiguous segment.
-
-.. c:function:: void PyArray_UpdateFlags(PyArrayObject* arr, int flagmask)
-
- The :c:data:`NPY_ARRAY_C_CONTIGUOUS`, :c:data:`NPY_ARRAY_ALIGNED`, and
- :c:data:`NPY_ARRAY_F_CONTIGUOUS` array flags can be "calculated" from the
- array object itself. This routine updates one or more of these
- flags of *arr* as specified in *flagmask* by performing the
- required calculation.
-
-
-.. warning::
-
- It is important to keep the flags updated (using
- :c:func:`PyArray_UpdateFlags` can help) whenever a manipulation with an
- array is performed that might cause them to change. Later
- calculations in NumPy that rely on the state of these flags do not
- repeat the calculation to update them.
-
-
-Array method alternative API
-----------------------------
-
-
-Conversion
-^^^^^^^^^^
-
-.. c:function:: PyObject* PyArray_GetField( \
- PyArrayObject* self, PyArray_Descr* dtype, int offset)
-
- Equivalent to :meth:`ndarray.getfield<numpy.ndarray.getfield>`
- (*self*, *dtype*, *offset*). This function `steals a reference
- <https://docs.python.org/3/c-api/intro.html?reference-count-details>`_
- to `PyArray_Descr` and returns a new array of the given `dtype` using
- the data in the current array at a specified `offset` in bytes. The
- `offset` plus the itemsize of the new array type must be less than ``self
- ->descr->elsize`` or an error is raised. The same shape and strides
- as the original array are used. Therefore, this function has the
- effect of returning a field from a structured array. But, it can also
- be used to select specific bytes or groups of bytes from any array
- type.
-
-.. c:function:: int PyArray_SetField( \
- PyArrayObject* self, PyArray_Descr* dtype, int offset, PyObject* val)
-
- Equivalent to :meth:`ndarray.setfield<numpy.ndarray.setfield>` (*self*, *val*, *dtype*, *offset*
- ). Set the field starting at *offset* in bytes and of the given
- *dtype* to *val*. The *offset* plus *dtype* ->elsize must be less
- than *self* ->descr->elsize or an error is raised. Otherwise, the
- *val* argument is converted to an array and copied into the field
- pointed to. If necessary, the elements of *val* are repeated to
- fill the destination array, But, the number of elements in the
- destination must be an integer multiple of the number of elements
- in *val*.
-
-.. c:function:: PyObject* PyArray_Byteswap(PyArrayObject* self, Bool inplace)
-
- Equivalent to :meth:`ndarray.byteswap<numpy.ndarray.byteswap>` (*self*, *inplace*). Return an array
- whose data area is byteswapped. If *inplace* is non-zero, then do
- the byteswap inplace and return a reference to self. Otherwise,
- create a byteswapped copy and leave self unchanged.
-
-.. c:function:: PyObject* PyArray_NewCopy(PyArrayObject* old, NPY_ORDER order)
-
- Equivalent to :meth:`ndarray.copy<numpy.ndarray.copy>` (*self*, *fortran*). Make a copy of the
- *old* array. The returned array is always aligned and writeable
- with data interpreted the same as the old array. If *order* is
- :c:data:`NPY_CORDER`, then a C-style contiguous array is returned. If
- *order* is :c:data:`NPY_FORTRANORDER`, then a Fortran-style contiguous
- array is returned. If *order is* :c:data:`NPY_ANYORDER`, then the array
- returned is Fortran-style contiguous only if the old one is;
- otherwise, it is C-style contiguous.
-
-.. c:function:: PyObject* PyArray_ToList(PyArrayObject* self)
-
- Equivalent to :meth:`ndarray.tolist<numpy.ndarray.tolist>` (*self*). Return a nested Python list
- from *self*.
-
-.. c:function:: PyObject* PyArray_ToString(PyArrayObject* self, NPY_ORDER order)
-
- Equivalent to :meth:`ndarray.tobytes<numpy.ndarray.tobytes>` (*self*, *order*). Return the bytes
- of this array in a Python string.
-
-.. c:function:: PyObject* PyArray_ToFile( \
- PyArrayObject* self, FILE* fp, char* sep, char* format)
-
- Write the contents of *self* to the file pointer *fp* in C-style
- contiguous fashion. Write the data as binary bytes if *sep* is the
- string ""or ``NULL``. Otherwise, write the contents of *self* as
- text using the *sep* string as the item separator. Each item will
- be printed to the file. If the *format* string is not ``NULL`` or
- "", then it is a Python print statement format string showing how
- the items are to be written.
-
-.. c:function:: int PyArray_Dump(PyObject* self, PyObject* file, int protocol)
-
- Pickle the object in *self* to the given *file* (either a string
- or a Python file object). If *file* is a Python string it is
- considered to be the name of a file which is then opened in binary
- mode. The given *protocol* is used (if *protocol* is negative, or
- the highest available is used). This is a simple wrapper around
- cPickle.dump(*self*, *file*, *protocol*).
-
-.. c:function:: PyObject* PyArray_Dumps(PyObject* self, int protocol)
-
- Pickle the object in *self* to a Python string and return it. Use
- the Pickle *protocol* provided (or the highest available if
- *protocol* is negative).
-
-.. c:function:: int PyArray_FillWithScalar(PyArrayObject* arr, PyObject* obj)
-
- Fill the array, *arr*, with the given scalar object, *obj*. The
- object is first converted to the data type of *arr*, and then
- copied into every location. A -1 is returned if an error occurs,
- otherwise 0 is returned.
-
-.. c:function:: PyObject* PyArray_View( \
- PyArrayObject* self, PyArray_Descr* dtype, PyTypeObject *ptype)
-
- Equivalent to :meth:`ndarray.view<numpy.ndarray.view>` (*self*, *dtype*). Return a new
- view of the array *self* as possibly a different data-type, *dtype*,
- and different array subclass *ptype*.
-
- If *dtype* is ``NULL``, then the returned array will have the same
- data type as *self*. The new data-type must be consistent with the
- size of *self*. Either the itemsizes must be identical, or *self* must
- be single-segment and the total number of bytes must be the same.
- In the latter case the dimensions of the returned array will be
- altered in the last (or first for Fortran-style contiguous arrays)
- dimension. The data area of the returned array and self is exactly
- the same.
-
-
-Shape Manipulation
-^^^^^^^^^^^^^^^^^^
-
-.. c:function:: PyObject* PyArray_Newshape( \
- PyArrayObject* self, PyArray_Dims* newshape, NPY_ORDER order)
-
- Result will be a new array (pointing to the same memory location
- as *self* if possible), but having a shape given by *newshape*.
- If the new shape is not compatible with the strides of *self*,
- then a copy of the array with the new specified shape will be
- returned.
-
-.. c:function:: PyObject* PyArray_Reshape(PyArrayObject* self, PyObject* shape)
-
- Equivalent to :meth:`ndarray.reshape<numpy.ndarray.reshape>` (*self*, *shape*) where *shape* is a
- sequence. Converts *shape* to a :c:type:`PyArray_Dims` structure and
- calls :c:func:`PyArray_Newshape` internally.
- For back-ward compatibility -- Not recommended
-
-.. c:function:: PyObject* PyArray_Squeeze(PyArrayObject* self)
-
- Equivalent to :meth:`ndarray.squeeze<numpy.ndarray.squeeze>` (*self*). Return a new view of *self*
- with all of the dimensions of length 1 removed from the shape.
-
-.. warning::
-
- matrix objects are always 2-dimensional. Therefore,
- :c:func:`PyArray_Squeeze` has no effect on arrays of matrix sub-class.
-
-.. c:function:: PyObject* PyArray_SwapAxes(PyArrayObject* self, int a1, int a2)
-
- Equivalent to :meth:`ndarray.swapaxes<numpy.ndarray.swapaxes>` (*self*, *a1*, *a2*). The returned
- array is a new view of the data in *self* with the given axes,
- *a1* and *a2*, swapped.
-
-.. c:function:: PyObject* PyArray_Resize( \
- PyArrayObject* self, PyArray_Dims* newshape, int refcheck, \
- NPY_ORDER fortran)
-
- Equivalent to :meth:`ndarray.resize<numpy.ndarray.resize>` (*self*, *newshape*, refcheck
- ``=`` *refcheck*, order= fortran ). This function only works on
- single-segment arrays. It changes the shape of *self* inplace and
- will reallocate the memory for *self* if *newshape* has a
- different total number of elements then the old shape. If
- reallocation is necessary, then *self* must own its data, have
- *self* - ``>base==NULL``, have *self* - ``>weakrefs==NULL``, and
- (unless refcheck is 0) not be referenced by any other array.
- The fortran argument can be :c:data:`NPY_ANYORDER`, :c:data:`NPY_CORDER`,
- or :c:data:`NPY_FORTRANORDER`. It currently has no effect. Eventually
- it could be used to determine how the resize operation should view
- the data when constructing a differently-dimensioned array.
- Returns None on success and NULL on error.
-
-.. c:function:: PyObject* PyArray_Transpose( \
- PyArrayObject* self, PyArray_Dims* permute)
-
- Equivalent to :meth:`ndarray.transpose<numpy.ndarray.transpose>` (*self*, *permute*). Permute the
- axes of the ndarray object *self* according to the data structure
- *permute* and return the result. If *permute* is ``NULL``, then
- the resulting array has its axes reversed. For example if *self*
- has shape :math:`10\times20\times30`, and *permute* ``.ptr`` is
- (0,2,1) the shape of the result is :math:`10\times30\times20.` If
- *permute* is ``NULL``, the shape of the result is
- :math:`30\times20\times10.`
-
-.. c:function:: PyObject* PyArray_Flatten(PyArrayObject* self, NPY_ORDER order)
-
- Equivalent to :meth:`ndarray.flatten<numpy.ndarray.flatten>` (*self*, *order*). Return a 1-d copy
- of the array. If *order* is :c:data:`NPY_FORTRANORDER` the elements are
- scanned out in Fortran order (first-dimension varies the
- fastest). If *order* is :c:data:`NPY_CORDER`, the elements of ``self``
- are scanned in C-order (last dimension varies the fastest). If
- *order* :c:data:`NPY_ANYORDER`, then the result of
- :c:func:`PyArray_ISFORTRAN` (*self*) is used to determine which order
- to flatten.
-
-.. c:function:: PyObject* PyArray_Ravel(PyArrayObject* self, NPY_ORDER order)
-
- Equivalent to *self*.ravel(*order*). Same basic functionality
- as :c:func:`PyArray_Flatten` (*self*, *order*) except if *order* is 0
- and *self* is C-style contiguous, the shape is altered but no copy
- is performed.
-
-
-Item selection and manipulation
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. c:function:: PyObject* PyArray_TakeFrom( \
- PyArrayObject* self, PyObject* indices, int axis, PyArrayObject* ret, \
- NPY_CLIPMODE clipmode)
-
- Equivalent to :meth:`ndarray.take<numpy.ndarray.take>` (*self*, *indices*, *axis*, *ret*,
- *clipmode*) except *axis* =None in Python is obtained by setting
- *axis* = :c:data:`NPY_MAXDIMS` in C. Extract the items from self
- indicated by the integer-valued *indices* along the given *axis.*
- The clipmode argument can be :c:data:`NPY_RAISE`, :c:data:`NPY_WRAP`, or
- :c:data:`NPY_CLIP` to indicate what to do with out-of-bound indices. The
- *ret* argument can specify an output array rather than having one
- created internally.
-
-.. c:function:: PyObject* PyArray_PutTo( \
- PyArrayObject* self, PyObject* values, PyObject* indices, \
- NPY_CLIPMODE clipmode)
-
- Equivalent to *self*.put(*values*, *indices*, *clipmode*
- ). Put *values* into *self* at the corresponding (flattened)
- *indices*. If *values* is too small it will be repeated as
- necessary.
-
-.. c:function:: PyObject* PyArray_PutMask( \
- PyArrayObject* self, PyObject* values, PyObject* mask)
-
- Place the *values* in *self* wherever corresponding positions
- (using a flattened context) in *mask* are true. The *mask* and
- *self* arrays must have the same total number of elements. If
- *values* is too small, it will be repeated as necessary.
-
-.. c:function:: PyObject* PyArray_Repeat( \
- PyArrayObject* self, PyObject* op, int axis)
-
- Equivalent to :meth:`ndarray.repeat<numpy.ndarray.repeat>` (*self*, *op*, *axis*). Copy the
- elements of *self*, *op* times along the given *axis*. Either
- *op* is a scalar integer or a sequence of length *self*
- ->dimensions[ *axis* ] indicating how many times to repeat each
- item along the axis.
-
-.. c:function:: PyObject* PyArray_Choose( \
- PyArrayObject* self, PyObject* op, PyArrayObject* ret, \
- NPY_CLIPMODE clipmode)
-
- Equivalent to :meth:`ndarray.choose<numpy.ndarray.choose>` (*self*, *op*, *ret*, *clipmode*).
- Create a new array by selecting elements from the sequence of
- arrays in *op* based on the integer values in *self*. The arrays
- must all be broadcastable to the same shape and the entries in
- *self* should be between 0 and len(*op*). The output is placed
- in *ret* unless it is ``NULL`` in which case a new output is
- created. The *clipmode* argument determines behavior for when
- entries in *self* are not between 0 and len(*op*).
-
- .. c:var:: NPY_RAISE
-
- raise a ValueError;
-
- .. c:var:: NPY_WRAP
-
- wrap values < 0 by adding len(*op*) and values >=len(*op*)
- by subtracting len(*op*) until they are in range;
-
- .. c:var:: NPY_CLIP
-
- all values are clipped to the region [0, len(*op*) ).
-
-
-.. c:function:: PyObject* PyArray_Sort(PyArrayObject* self, int axis, NPY_SORTKIND kind)
-
- Equivalent to :meth:`ndarray.sort<numpy.ndarray.sort>` (*self*, *axis*, *kind*).
- Return an array with the items of *self* sorted along *axis*. The array
- is sorted using the algorithm denoted by *kind* , which is an integer/enum pointing
- to the type of sorting algorithms used.
-
-.. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis)
-
- Equivalent to :meth:`ndarray.argsort<numpy.ndarray.argsort>` (*self*, *axis*).
- Return an array of indices such that selection of these indices
- along the given ``axis`` would return a sorted version of *self*. If *self* ->descr
- is a data-type with fields defined, then self->descr->names is used
- to determine the sort order. A comparison where the first field is equal
- will use the second field and so on. To alter the sort order of a
- structured array, create a new data-type with a different order of names
- and construct a view of the array with that new data-type.
-
-.. c:function:: PyObject* PyArray_LexSort(PyObject* sort_keys, int axis)
-
- Given a sequence of arrays (*sort_keys*) of the same shape,
- return an array of indices (similar to :c:func:`PyArray_ArgSort` (...))
- that would sort the arrays lexicographically. A lexicographic sort
- specifies that when two keys are found to be equal, the order is
- based on comparison of subsequent keys. A merge sort (which leaves
- equal entries unmoved) is required to be defined for the
- types. The sort is accomplished by sorting the indices first using
- the first *sort_key* and then using the second *sort_key* and so
- forth. This is equivalent to the lexsort(*sort_keys*, *axis*)
- Python command. Because of the way the merge-sort works, be sure
- to understand the order the *sort_keys* must be in (reversed from
- the order you would use when comparing two elements).
-
- If these arrays are all collected in a structured array, then
- :c:func:`PyArray_Sort` (...) can also be used to sort the array
- directly.
-
-.. c:function:: PyObject* PyArray_SearchSorted( \
- PyArrayObject* self, PyObject* values, NPY_SEARCHSIDE side, \
- PyObject* perm)
-
- Equivalent to :meth:`ndarray.searchsorted<numpy.ndarray.searchsorted>` (*self*, *values*, *side*,
- *perm*). Assuming *self* is a 1-d array in ascending order, then the
- output is an array of indices the same shape as *values* such that, if
- the elements in *values* were inserted before the indices, the order of
- *self* would be preserved. No checking is done on whether or not self is
- in ascending order.
-
- The *side* argument indicates whether the index returned should be that of
- the first suitable location (if :c:data:`NPY_SEARCHLEFT`) or of the last
- (if :c:data:`NPY_SEARCHRIGHT`).
-
- The *sorter* argument, if not ``NULL``, must be a 1D array of integer
- indices the same length as *self*, that sorts it into ascending order.
- This is typically the result of a call to :c:func:`PyArray_ArgSort` (...)
- Binary search is used to find the required insertion points.
-
-.. c:function:: int PyArray_Partition( \
- PyArrayObject *self, PyArrayObject * ktharray, int axis, \
- NPY_SELECTKIND which)
-
- Equivalent to :meth:`ndarray.partition<numpy.ndarray.partition>` (*self*, *ktharray*, *axis*,
- *kind*). Partitions the array so that the values of the element indexed by
- *ktharray* are in the positions they would be if the array is fully sorted
- and places all elements smaller than the kth before and all elements equal
- or greater after the kth element. The ordering of all elements within the
- partitions is undefined.
- If *self*->descr is a data-type with fields defined, then
- self->descr->names is used to determine the sort order. A comparison where
- the first field is equal will use the second field and so on. To alter the
- sort order of a structured array, create a new data-type with a different
- order of names and construct a view of the array with that new data-type.
- Returns zero on success and -1 on failure.
-
-.. c:function:: PyObject* PyArray_ArgPartition( \
- PyArrayObject *op, PyArrayObject * ktharray, int axis, \
- NPY_SELECTKIND which)
-
- Equivalent to :meth:`ndarray.argpartition<numpy.ndarray.argpartition>` (*self*, *ktharray*, *axis*,
- *kind*). Return an array of indices such that selection of these indices
- along the given ``axis`` would return a partitioned version of *self*.
-
-.. c:function:: PyObject* PyArray_Diagonal( \
- PyArrayObject* self, int offset, int axis1, int axis2)
-
- Equivalent to :meth:`ndarray.diagonal<numpy.ndarray.diagonal>` (*self*, *offset*, *axis1*, *axis2*
- ). Return the *offset* diagonals of the 2-d arrays defined by
- *axis1* and *axis2*.
-
-.. c:function:: npy_intp PyArray_CountNonzero(PyArrayObject* self)
-
- .. versionadded:: 1.6
-
- Counts the number of non-zero elements in the array object *self*.
-
-.. c:function:: PyObject* PyArray_Nonzero(PyArrayObject* self)
-
- Equivalent to :meth:`ndarray.nonzero<numpy.ndarray.nonzero>` (*self*). Returns a tuple of index
- arrays that select elements of *self* that are nonzero. If (nd=
- :c:func:`PyArray_NDIM` ( ``self`` ))==1, then a single index array is
- returned. The index arrays have data type :c:data:`NPY_INTP`. If a
- tuple is returned (nd :math:`\neq` 1), then its length is nd.
-
-.. c:function:: PyObject* PyArray_Compress( \
- PyArrayObject* self, PyObject* condition, int axis, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.compress<numpy.ndarray.compress>` (*self*, *condition*, *axis*
- ). Return the elements along *axis* corresponding to elements of
- *condition* that are true.
-
-
-Calculation
-^^^^^^^^^^^
-
-.. tip::
-
- Pass in :c:data:`NPY_MAXDIMS` for axis in order to achieve the same
- effect that is obtained by passing in *axis* = :const:`None` in Python
- (treating the array as a 1-d array).
-
-.. c:function:: PyObject* PyArray_ArgMax( \
- PyArrayObject* self, int axis, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.argmax<numpy.ndarray.argmax>` (*self*, *axis*). Return the index of
- the largest element of *self* along *axis*.
-
-.. c:function:: PyObject* PyArray_ArgMin( \
- PyArrayObject* self, int axis, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.argmin<numpy.ndarray.argmin>` (*self*, *axis*). Return the index of
- the smallest element of *self* along *axis*.
-
-
-
-
-.. note::
-
- The out argument specifies where to place the result. If out is
- NULL, then the output array is created, otherwise the output is
- placed in out which must be the correct size and type. A new
- reference to the output array is always returned even when out
- is not NULL. The caller of the routine has the responsibility
- to ``DECREF`` out if not NULL or a memory-leak will occur.
-
-.. c:function:: PyObject* PyArray_Max( \
- PyArrayObject* self, int axis, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.max<numpy.ndarray.max>` (*self*, *axis*). Returns the largest
- element of *self* along the given *axis*. When the result is a single
- element, returns a numpy scalar instead of an ndarray.
-
-.. c:function:: PyObject* PyArray_Min( \
- PyArrayObject* self, int axis, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.min<numpy.ndarray.min>` (*self*, *axis*). Return the smallest
- element of *self* along the given *axis*. When the result is a single
- element, returns a numpy scalar instead of an ndarray.
-
-
-.. c:function:: PyObject* PyArray_Ptp( \
- PyArrayObject* self, int axis, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.ptp<numpy.ndarray.ptp>` (*self*, *axis*). Return the difference
- between the largest element of *self* along *axis* and the
- smallest element of *self* along *axis*. When the result is a single
- element, returns a numpy scalar instead of an ndarray.
-
-
-
-
-.. note::
-
- The rtype argument specifies the data-type the reduction should
- take place over. This is important if the data-type of the array
- is not "large" enough to handle the output. By default, all
- integer data-types are made at least as large as :c:data:`NPY_LONG`
- for the "add" and "multiply" ufuncs (which form the basis for
- mean, sum, cumsum, prod, and cumprod functions).
-
-.. c:function:: PyObject* PyArray_Mean( \
- PyArrayObject* self, int axis, int rtype, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.mean<numpy.ndarray.mean>` (*self*, *axis*, *rtype*). Returns the
- mean of the elements along the given *axis*, using the enumerated
- type *rtype* as the data type to sum in. Default sum behavior is
- obtained using :c:data:`NPY_NOTYPE` for *rtype*.
-
-.. c:function:: PyObject* PyArray_Trace( \
- PyArrayObject* self, int offset, int axis1, int axis2, int rtype, \
- PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.trace<numpy.ndarray.trace>` (*self*, *offset*, *axis1*, *axis2*,
- *rtype*). Return the sum (using *rtype* as the data type of
- summation) over the *offset* diagonal elements of the 2-d arrays
- defined by *axis1* and *axis2* variables. A positive offset
- chooses diagonals above the main diagonal. A negative offset
- selects diagonals below the main diagonal.
-
-.. c:function:: PyObject* PyArray_Clip( \
- PyArrayObject* self, PyObject* min, PyObject* max)
-
- Equivalent to :meth:`ndarray.clip<numpy.ndarray.clip>` (*self*, *min*, *max*). Clip an array,
- *self*, so that values larger than *max* are fixed to *max* and
- values less than *min* are fixed to *min*.
-
-.. c:function:: PyObject* PyArray_Conjugate(PyArrayObject* self)
-
- Equivalent to :meth:`ndarray.conjugate<numpy.ndarray.conjugate>` (*self*).
- Return the complex conjugate of *self*. If *self* is not of
- complex data type, then return *self* with a reference.
-
-.. c:function:: PyObject* PyArray_Round( \
- PyArrayObject* self, int decimals, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.round<numpy.ndarray.round>` (*self*, *decimals*, *out*). Returns
- the array with elements rounded to the nearest decimal place. The
- decimal place is defined as the :math:`10^{-\textrm{decimals}}`
- digit so that negative *decimals* cause rounding to the nearest 10's, 100's, etc. If out is ``NULL``, then the output array is created, otherwise the output is placed in *out* which must be the correct size and type.
-
-.. c:function:: PyObject* PyArray_Std( \
- PyArrayObject* self, int axis, int rtype, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.std<numpy.ndarray.std>` (*self*, *axis*, *rtype*). Return the
- standard deviation using data along *axis* converted to data type
- *rtype*.
-
-.. c:function:: PyObject* PyArray_Sum( \
- PyArrayObject* self, int axis, int rtype, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.sum<numpy.ndarray.sum>` (*self*, *axis*, *rtype*). Return 1-d
- vector sums of elements in *self* along *axis*. Perform the sum
- after converting data to data type *rtype*.
-
-.. c:function:: PyObject* PyArray_CumSum( \
- PyArrayObject* self, int axis, int rtype, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.cumsum<numpy.ndarray.cumsum>` (*self*, *axis*, *rtype*). Return
- cumulative 1-d sums of elements in *self* along *axis*. Perform
- the sum after converting data to data type *rtype*.
-
-.. c:function:: PyObject* PyArray_Prod( \
- PyArrayObject* self, int axis, int rtype, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.prod<numpy.ndarray.prod>` (*self*, *axis*, *rtype*). Return 1-d
- products of elements in *self* along *axis*. Perform the product
- after converting data to data type *rtype*.
-
-.. c:function:: PyObject* PyArray_CumProd( \
- PyArrayObject* self, int axis, int rtype, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.cumprod<numpy.ndarray.cumprod>` (*self*, *axis*, *rtype*). Return
- 1-d cumulative products of elements in ``self`` along ``axis``.
- Perform the product after converting data to data type ``rtype``.
-
-.. c:function:: PyObject* PyArray_All( \
- PyArrayObject* self, int axis, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.all<numpy.ndarray.all>` (*self*, *axis*). Return an array with
- True elements for every 1-d sub-array of ``self`` defined by
- ``axis`` in which all the elements are True.
-
-.. c:function:: PyObject* PyArray_Any( \
- PyArrayObject* self, int axis, PyArrayObject* out)
-
- Equivalent to :meth:`ndarray.any<numpy.ndarray.any>` (*self*, *axis*). Return an array with
- True elements for every 1-d sub-array of *self* defined by *axis*
- in which any of the elements are True.
-
-Functions
----------
-
-
-Array Functions
-^^^^^^^^^^^^^^^
-
-.. c:function:: int PyArray_AsCArray( \
- PyObject** op, void* ptr, npy_intp* dims, int nd, int typenum, \
- int itemsize)
-
- Sometimes it is useful to access a multidimensional array as a
- C-style multi-dimensional array so that algorithms can be
- implemented using C's a[i][j][k] syntax. This routine returns a
- pointer, *ptr*, that simulates this kind of C-style array, for
- 1-, 2-, and 3-d ndarrays.
-
- :param op:
-
- The address to any Python object. This Python object will be replaced
- with an equivalent well-behaved, C-style contiguous, ndarray of the
- given data type specified by the last two arguments. Be sure that
- stealing a reference in this way to the input object is justified.
-
- :param ptr:
-
- The address to a (ctype* for 1-d, ctype** for 2-d or ctype*** for 3-d)
- variable where ctype is the equivalent C-type for the data type. On
- return, *ptr* will be addressable as a 1-d, 2-d, or 3-d array.
-
- :param dims:
-
- An output array that contains the shape of the array object. This
- array gives boundaries on any looping that will take place.
-
- :param nd:
-
- The dimensionality of the array (1, 2, or 3).
-
- :param typenum:
-
- The expected data type of the array.
-
- :param itemsize:
-
- This argument is only needed when *typenum* represents a
- flexible array. Otherwise it should be 0.
-
-.. note::
-
- The simulation of a C-style array is not complete for 2-d and 3-d
- arrays. For example, the simulated arrays of pointers cannot be passed
- to subroutines expecting specific, statically-defined 2-d and 3-d
- arrays. To pass to functions requiring those kind of inputs, you must
- statically define the required array and copy data.
-
-.. c:function:: int PyArray_Free(PyObject* op, void* ptr)
-
- Must be called with the same objects and memory locations returned
- from :c:func:`PyArray_AsCArray` (...). This function cleans up memory
- that otherwise would get leaked.
-
-.. c:function:: PyObject* PyArray_Concatenate(PyObject* obj, int axis)
-
- Join the sequence of objects in *obj* together along *axis* into a
- single array. If the dimensions or types are not compatible an
- error is raised.
-
-.. c:function:: PyObject* PyArray_InnerProduct(PyObject* obj1, PyObject* obj2)
-
- Compute a product-sum over the last dimensions of *obj1* and
- *obj2*. Neither array is conjugated.
-
-.. c:function:: PyObject* PyArray_MatrixProduct(PyObject* obj1, PyObject* obj)
-
- Compute a product-sum over the last dimension of *obj1* and the
- second-to-last dimension of *obj2*. For 2-d arrays this is a
- matrix-product. Neither array is conjugated.
-
-.. c:function:: PyObject* PyArray_MatrixProduct2( \
- PyObject* obj1, PyObject* obj, PyArrayObject* out)
-
- .. versionadded:: 1.6
-
- Same as PyArray_MatrixProduct, but store the result in *out*. The
- output array must have the correct shape, type, and be
- C-contiguous, or an exception is raised.
-
-.. c:function:: PyObject* PyArray_EinsteinSum( \
- char* subscripts, npy_intp nop, PyArrayObject** op_in, \
- PyArray_Descr* dtype, NPY_ORDER order, NPY_CASTING casting, \
- PyArrayObject* out)
-
- .. versionadded:: 1.6
-
- Applies the Einstein summation convention to the array operands
- provided, returning a new array or placing the result in *out*.
- The string in *subscripts* is a comma separated list of index
- letters. The number of operands is in *nop*, and *op_in* is an
- array containing those operands. The data type of the output can
- be forced with *dtype*, the output order can be forced with *order*
- (:c:data:`NPY_KEEPORDER` is recommended), and when *dtype* is specified,
- *casting* indicates how permissive the data conversion should be.
-
- See the :func:`~numpy.einsum` function for more details.
-
-.. c:function:: PyObject* PyArray_CopyAndTranspose(PyObject \* op)
-
- A specialized copy and transpose function that works only for 2-d
- arrays. The returned array is a transposed copy of *op*.
-
-.. c:function:: PyObject* PyArray_Correlate( \
- PyObject* op1, PyObject* op2, int mode)
-
- Compute the 1-d correlation of the 1-d arrays *op1* and *op2*
- . The correlation is computed at each output point by multiplying
- *op1* by a shifted version of *op2* and summing the result. As a
- result of the shift, needed values outside of the defined range of
- *op1* and *op2* are interpreted as zero. The mode determines how
- many shifts to return: 0 - return only shifts that did not need to
- assume zero- values; 1 - return an object that is the same size as
- *op1*, 2 - return all possible shifts (any overlap at all is
- accepted).
-
- .. rubric:: Notes
-
- This does not compute the usual correlation: if op2 is larger than op1, the
- arguments are swapped, and the conjugate is never taken for complex arrays.
- See PyArray_Correlate2 for the usual signal processing correlation.
-
-.. c:function:: PyObject* PyArray_Correlate2( \
- PyObject* op1, PyObject* op2, int mode)
-
- Updated version of PyArray_Correlate, which uses the usual definition of
- correlation for 1d arrays. The correlation is computed at each output point
- by multiplying *op1* by a shifted version of *op2* and summing the result.
- As a result of the shift, needed values outside of the defined range of
- *op1* and *op2* are interpreted as zero. The mode determines how many
- shifts to return: 0 - return only shifts that did not need to assume zero-
- values; 1 - return an object that is the same size as *op1*, 2 - return all
- possible shifts (any overlap at all is accepted).
-
- .. rubric:: Notes
-
- Compute z as follows::
-
- z[k] = sum_n op1[n] * conj(op2[n+k])
-
-.. c:function:: PyObject* PyArray_Where( \
- PyObject* condition, PyObject* x, PyObject* y)
-
- If both ``x`` and ``y`` are ``NULL``, then return
- :c:func:`PyArray_Nonzero` (*condition*). Otherwise, both *x* and *y*
- must be given and the object returned is shaped like *condition*
- and has elements of *x* and *y* where *condition* is respectively
- True or False.
-
-
-Other functions
-^^^^^^^^^^^^^^^
-
-.. c:function:: Bool PyArray_CheckStrides( \
- int elsize, int nd, npy_intp numbytes, npy_intp const* dims, \
- npy_intp const* newstrides)
-
- Determine if *newstrides* is a strides array consistent with the
- memory of an *nd* -dimensional array with shape ``dims`` and
- element-size, *elsize*. The *newstrides* array is checked to see
- if jumping by the provided number of bytes in each direction will
- ever mean jumping more than *numbytes* which is the assumed size
- of the available memory segment. If *numbytes* is 0, then an
- equivalent *numbytes* is computed assuming *nd*, *dims*, and
- *elsize* refer to a single-segment array. Return :c:data:`NPY_TRUE` if
- *newstrides* is acceptable, otherwise return :c:data:`NPY_FALSE`.
-
-.. c:function:: npy_intp PyArray_MultiplyList(npy_intp const* seq, int n)
-
-.. c:function:: int PyArray_MultiplyIntList(int const* seq, int n)
-
- Both of these routines multiply an *n* -length array, *seq*, of
- integers and return the result. No overflow checking is performed.
-
-.. c:function:: int PyArray_CompareLists(npy_intp const* l1, npy_intp const* l2, int n)
-
- Given two *n* -length arrays of integers, *l1*, and *l2*, return
- 1 if the lists are identical; otherwise, return 0.
-
-
-Auxiliary Data With Object Semantics
-------------------------------------
-
-.. versionadded:: 1.7.0
-
-.. c:type:: NpyAuxData
-
-When working with more complex dtypes which are composed of other dtypes,
-such as the struct dtype, creating inner loops that manipulate the dtypes
-requires carrying along additional data. NumPy supports this idea
-through a struct :c:type:`NpyAuxData`, mandating a few conventions so that
-it is possible to do this.
-
-Defining an :c:type:`NpyAuxData` is similar to defining a class in C++,
-but the object semantics have to be tracked manually since the API is in C.
-Here's an example for a function which doubles up an element using
-an element copier function as a primitive.::
-
- typedef struct {
- NpyAuxData base;
- ElementCopier_Func *func;
- NpyAuxData *funcdata;
- } eldoubler_aux_data;
-
- void free_element_doubler_aux_data(NpyAuxData *data)
- {
- eldoubler_aux_data *d = (eldoubler_aux_data *)data;
- /* Free the memory owned by this auxdata */
- NPY_AUXDATA_FREE(d->funcdata);
- PyArray_free(d);
- }
-
- NpyAuxData *clone_element_doubler_aux_data(NpyAuxData *data)
- {
- eldoubler_aux_data *ret = PyArray_malloc(sizeof(eldoubler_aux_data));
- if (ret == NULL) {
- return NULL;
- }
-
- /* Raw copy of all data */
- memcpy(ret, data, sizeof(eldoubler_aux_data));
-
- /* Fix up the owned auxdata so we have our own copy */
- ret->funcdata = NPY_AUXDATA_CLONE(ret->funcdata);
- if (ret->funcdata == NULL) {
- PyArray_free(ret);
- return NULL;
- }
-
- return (NpyAuxData *)ret;
- }
-
- NpyAuxData *create_element_doubler_aux_data(
- ElementCopier_Func *func,
- NpyAuxData *funcdata)
- {
- eldoubler_aux_data *ret = PyArray_malloc(sizeof(eldoubler_aux_data));
- if (ret == NULL) {
- PyErr_NoMemory();
- return NULL;
- }
- memset(&ret, 0, sizeof(eldoubler_aux_data));
- ret->base->free = &free_element_doubler_aux_data;
- ret->base->clone = &clone_element_doubler_aux_data;
- ret->func = func;
- ret->funcdata = funcdata;
-
- return (NpyAuxData *)ret;
- }
-
-.. c:type:: NpyAuxData_FreeFunc
-
- The function pointer type for NpyAuxData free functions.
-
-.. c:type:: NpyAuxData_CloneFunc
-
- The function pointer type for NpyAuxData clone functions. These
- functions should never set the Python exception on error, because
- they may be called from a multi-threaded context.
-
-.. c:function:: NPY_AUXDATA_FREE(auxdata)
-
- A macro which calls the auxdata's free function appropriately,
- does nothing if auxdata is NULL.
-
-.. c:function:: NPY_AUXDATA_CLONE(auxdata)
-
- A macro which calls the auxdata's clone function appropriately,
- returning a deep copy of the auxiliary data.
-
-Array Iterators
----------------
-
-As of NumPy 1.6.0, these array iterators are superceded by
-the new array iterator, :c:type:`NpyIter`.
-
-An array iterator is a simple way to access the elements of an
-N-dimensional array quickly and efficiently. Section `2
-<#sec-array-iterator>`__ provides more description and examples of
-this useful approach to looping over an array.
-
-.. c:function:: PyObject* PyArray_IterNew(PyObject* arr)
-
- Return an array iterator object from the array, *arr*. This is
- equivalent to *arr*. **flat**. The array iterator object makes
- it easy to loop over an N-dimensional non-contiguous array in
- C-style contiguous fashion.
-
-.. c:function:: PyObject* PyArray_IterAllButAxis(PyObject* arr, int \*axis)
-
- Return an array iterator that will iterate over all axes but the
- one provided in *\*axis*. The returned iterator cannot be used
- with :c:func:`PyArray_ITER_GOTO1D`. This iterator could be used to
- write something similar to what ufuncs do wherein the loop over
- the largest axis is done by a separate sub-routine. If *\*axis* is
- negative then *\*axis* will be set to the axis having the smallest
- stride and that axis will be used.
-
-.. c:function:: PyObject *PyArray_BroadcastToShape( \
- PyObject* arr, npy_intp *dimensions, int nd)
-
- Return an array iterator that is broadcast to iterate as an array
- of the shape provided by *dimensions* and *nd*.
-
-.. c:function:: int PyArrayIter_Check(PyObject* op)
-
- Evaluates true if *op* is an array iterator (or instance of a
- subclass of the array iterator type).
-
-.. c:function:: void PyArray_ITER_RESET(PyObject* iterator)
-
- Reset an *iterator* to the beginning of the array.
-
-.. c:function:: void PyArray_ITER_NEXT(PyObject* iterator)
-
- Incremement the index and the dataptr members of the *iterator* to
- point to the next element of the array. If the array is not
- (C-style) contiguous, also increment the N-dimensional coordinates
- array.
-
-.. c:function:: void *PyArray_ITER_DATA(PyObject* iterator)
-
- A pointer to the current element of the array.
-
-.. c:function:: void PyArray_ITER_GOTO( \
- PyObject* iterator, npy_intp* destination)
-
- Set the *iterator* index, dataptr, and coordinates members to the
- location in the array indicated by the N-dimensional c-array,
- *destination*, which must have size at least *iterator*
- ->nd_m1+1.
-
-.. c:function:: PyArray_ITER_GOTO1D(PyObject* iterator, npy_intp index)
-
- Set the *iterator* index and dataptr to the location in the array
- indicated by the integer *index* which points to an element in the
- C-styled flattened array.
-
-.. c:function:: int PyArray_ITER_NOTDONE(PyObject* iterator)
-
- Evaluates TRUE as long as the iterator has not looped through all of
- the elements, otherwise it evaluates FALSE.
-
-
-Broadcasting (multi-iterators)
-------------------------------
-
-.. c:function:: PyObject* PyArray_MultiIterNew(int num, ...)
-
- A simplified interface to broadcasting. This function takes the
- number of arrays to broadcast and then *num* extra ( :c:type:`PyObject *<PyObject>`
- ) arguments. These arguments are converted to arrays and iterators
- are created. :c:func:`PyArray_Broadcast` is then called on the resulting
- multi-iterator object. The resulting, broadcasted mult-iterator
- object is then returned. A broadcasted operation can then be
- performed using a single loop and using :c:func:`PyArray_MultiIter_NEXT`
- (..)
-
-.. c:function:: void PyArray_MultiIter_RESET(PyObject* multi)
-
- Reset all the iterators to the beginning in a multi-iterator
- object, *multi*.
-
-.. c:function:: void PyArray_MultiIter_NEXT(PyObject* multi)
-
- Advance each iterator in a multi-iterator object, *multi*, to its
- next (broadcasted) element.
-
-.. c:function:: void *PyArray_MultiIter_DATA(PyObject* multi, int i)
-
- Return the data-pointer of the *i* :math:`^{\textrm{th}}` iterator
- in a multi-iterator object.
-
-.. c:function:: void PyArray_MultiIter_NEXTi(PyObject* multi, int i)
-
- Advance the pointer of only the *i* :math:`^{\textrm{th}}` iterator.
-
-.. c:function:: void PyArray_MultiIter_GOTO( \
- PyObject* multi, npy_intp* destination)
-
- Advance each iterator in a multi-iterator object, *multi*, to the
- given :math:`N` -dimensional *destination* where :math:`N` is the
- number of dimensions in the broadcasted array.
-
-.. c:function:: void PyArray_MultiIter_GOTO1D(PyObject* multi, npy_intp index)
-
- Advance each iterator in a multi-iterator object, *multi*, to the
- corresponding location of the *index* into the flattened
- broadcasted array.
-
-.. c:function:: int PyArray_MultiIter_NOTDONE(PyObject* multi)
-
- Evaluates TRUE as long as the multi-iterator has not looped
- through all of the elements (of the broadcasted result), otherwise
- it evaluates FALSE.
-
-.. c:function:: int PyArray_Broadcast(PyArrayMultiIterObject* mit)
-
- This function encapsulates the broadcasting rules. The *mit*
- container should already contain iterators for all the arrays that
- need to be broadcast. On return, these iterators will be adjusted
- so that iteration over each simultaneously will accomplish the
- broadcasting. A negative number is returned if an error occurs.
-
-.. c:function:: int PyArray_RemoveSmallest(PyArrayMultiIterObject* mit)
-
- This function takes a multi-iterator object that has been
- previously "broadcasted," finds the dimension with the smallest
- "sum of strides" in the broadcasted result and adapts all the
- iterators so as not to iterate over that dimension (by effectively
- making them of length-1 in that dimension). The corresponding
- dimension is returned unless *mit* ->nd is 0, then -1 is
- returned. This function is useful for constructing ufunc-like
- routines that broadcast their inputs correctly and then call a
- strided 1-d version of the routine as the inner-loop. This 1-d
- version is usually optimized for speed and for this reason the
- loop should be performed over the axis that won't require large
- stride jumps.
-
-Neighborhood iterator
----------------------
-
-.. versionadded:: 1.4.0
-
-Neighborhood iterators are subclasses of the iterator object, and can be used
-to iter over a neighborhood of a point. For example, you may want to iterate
-over every voxel of a 3d image, and for every such voxel, iterate over an
-hypercube. Neighborhood iterator automatically handle boundaries, thus making
-this kind of code much easier to write than manual boundaries handling, at the
-cost of a slight overhead.
-
-.. c:function:: PyObject* PyArray_NeighborhoodIterNew( \
- PyArrayIterObject* iter, npy_intp bounds, int mode, \
- PyArrayObject* fill_value)
-
- This function creates a new neighborhood iterator from an existing
- iterator. The neighborhood will be computed relatively to the position
- currently pointed by *iter*, the bounds define the shape of the
- neighborhood iterator, and the mode argument the boundaries handling mode.
-
- The *bounds* argument is expected to be a (2 * iter->ao->nd) arrays, such
- as the range bound[2*i]->bounds[2*i+1] defines the range where to walk for
- dimension i (both bounds are included in the walked coordinates). The
- bounds should be ordered for each dimension (bounds[2*i] <= bounds[2*i+1]).
-
- The mode should be one of:
-
- * NPY_NEIGHBORHOOD_ITER_ZERO_PADDING: zero padding. Outside bounds values
- will be 0.
- * NPY_NEIGHBORHOOD_ITER_ONE_PADDING: one padding, Outside bounds values
- will be 1.
- * NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING: constant padding. Outside bounds
- values will be the same as the first item in fill_value.
- * NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING: mirror padding. Outside bounds
- values will be as if the array items were mirrored. For example, for the
- array [1, 2, 3, 4], x[-2] will be 2, x[-2] will be 1, x[4] will be 4,
- x[5] will be 1, etc...
- * NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING: circular padding. Outside bounds
- values will be as if the array was repeated. For example, for the
- array [1, 2, 3, 4], x[-2] will be 3, x[-2] will be 4, x[4] will be 1,
- x[5] will be 2, etc...
-
- If the mode is constant filling (NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING),
- fill_value should point to an array object which holds the filling value
- (the first item will be the filling value if the array contains more than
- one item). For other cases, fill_value may be NULL.
-
- - The iterator holds a reference to iter
- - Return NULL on failure (in which case the reference count of iter is not
- changed)
- - iter itself can be a Neighborhood iterator: this can be useful for .e.g
- automatic boundaries handling
- - the object returned by this function should be safe to use as a normal
- iterator
- - If the position of iter is changed, any subsequent call to
- PyArrayNeighborhoodIter_Next is undefined behavior, and
- PyArrayNeighborhoodIter_Reset must be called.
-
- .. code-block:: c
-
- PyArrayIterObject *iter;
- PyArrayNeighborhoodIterObject *neigh_iter;
- iter = PyArray_IterNew(x);
-
- /*For a 3x3 kernel */
- bounds = {-1, 1, -1, 1};
- neigh_iter = (PyArrayNeighborhoodIterObject*)PyArrayNeighborhoodIter_New(
- iter, bounds, NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, NULL);
-
- for(i = 0; i < iter->size; ++i) {
- for (j = 0; j < neigh_iter->size; ++j) {
- /* Walk around the item currently pointed by iter->dataptr */
- PyArrayNeighborhoodIter_Next(neigh_iter);
- }
-
- /* Move to the next point of iter */
- PyArrayIter_Next(iter);
- PyArrayNeighborhoodIter_Reset(neigh_iter);
- }
-
-.. c:function:: int PyArrayNeighborhoodIter_Reset( \
- PyArrayNeighborhoodIterObject* iter)
-
- Reset the iterator position to the first point of the neighborhood. This
- should be called whenever the iter argument given at
- PyArray_NeighborhoodIterObject is changed (see example)
-
-.. c:function:: int PyArrayNeighborhoodIter_Next( \
- PyArrayNeighborhoodIterObject* iter)
-
- After this call, iter->dataptr points to the next point of the
- neighborhood. Calling this function after every point of the
- neighborhood has been visited is undefined.
-
-Array Scalars
--------------
-
-.. c:function:: PyObject* PyArray_Return(PyArrayObject* arr)
-
- This function steals a reference to *arr*.
-
- This function checks to see if *arr* is a 0-dimensional array and,
- if so, returns the appropriate array scalar. It should be used
- whenever 0-dimensional arrays could be returned to Python.
-
-.. c:function:: PyObject* PyArray_Scalar( \
- void* data, PyArray_Descr* dtype, PyObject* itemsize)
-
- Return an array scalar object of the given enumerated *typenum*
- and *itemsize* by **copying** from memory pointed to by *data*
- . If *swap* is nonzero then this function will byteswap the data
- if appropriate to the data-type because array scalars are always
- in correct machine-byte order.
-
-.. c:function:: PyObject* PyArray_ToScalar(void* data, PyArrayObject* arr)
-
- Return an array scalar object of the type and itemsize indicated
- by the array object *arr* copied from the memory pointed to by
- *data* and swapping if the data in *arr* is not in machine
- byte-order.
-
-.. c:function:: PyObject* PyArray_FromScalar( \
- PyObject* scalar, PyArray_Descr* outcode)
-
- Return a 0-dimensional array of type determined by *outcode* from
- *scalar* which should be an array-scalar object. If *outcode* is
- NULL, then the type is determined from *scalar*.
-
-.. c:function:: void PyArray_ScalarAsCtype(PyObject* scalar, void* ctypeptr)
-
- Return in *ctypeptr* a pointer to the actual value in an array
- scalar. There is no error checking so *scalar* must be an
- array-scalar object, and ctypeptr must have enough space to hold
- the correct type. For flexible-sized types, a pointer to the data
- is copied into the memory of *ctypeptr*, for all other types, the
- actual data is copied into the address pointed to by *ctypeptr*.
-
-.. c:function:: void PyArray_CastScalarToCtype( \
- PyObject* scalar, void* ctypeptr, PyArray_Descr* outcode)
-
- Return the data (cast to the data type indicated by *outcode*)
- from the array-scalar, *scalar*, into the memory pointed to by
- *ctypeptr* (which must be large enough to handle the incoming
- memory).
-
-.. c:function:: PyObject* PyArray_TypeObjectFromType(int type)
-
- Returns a scalar type-object from a type-number, *type*
- . Equivalent to :c:func:`PyArray_DescrFromType` (*type*)->typeobj
- except for reference counting and error-checking. Returns a new
- reference to the typeobject on success or ``NULL`` on failure.
-
-.. c:function:: NPY_SCALARKIND PyArray_ScalarKind( \
- int typenum, PyArrayObject** arr)
-
- See the function :c:func:`PyArray_MinScalarType` for an alternative
- mechanism introduced in NumPy 1.6.0.
-
- Return the kind of scalar represented by *typenum* and the array
- in *\*arr* (if *arr* is not ``NULL`` ). The array is assumed to be
- rank-0 and only used if *typenum* represents a signed integer. If
- *arr* is not ``NULL`` and the first element is negative then
- :c:data:`NPY_INTNEG_SCALAR` is returned, otherwise
- :c:data:`NPY_INTPOS_SCALAR` is returned. The possible return values
- are :c:data:`NPY_{kind}_SCALAR` where ``{kind}`` can be **INTPOS**,
- **INTNEG**, **FLOAT**, **COMPLEX**, **BOOL**, or **OBJECT**.
- :c:data:`NPY_NOSCALAR` is also an enumerated value
- :c:type:`NPY_SCALARKIND` variables can take on.
-
-.. c:function:: int PyArray_CanCoerceScalar( \
- char thistype, char neededtype, NPY_SCALARKIND scalar)
-
- See the function :c:func:`PyArray_ResultType` for details of
- NumPy type promotion, updated in NumPy 1.6.0.
-
- Implements the rules for scalar coercion. Scalars are only
- silently coerced from thistype to neededtype if this function
- returns nonzero. If scalar is :c:data:`NPY_NOSCALAR`, then this
- function is equivalent to :c:func:`PyArray_CanCastSafely`. The rule is
- that scalars of the same KIND can be coerced into arrays of the
- same KIND. This rule means that high-precision scalars will never
- cause low-precision arrays of the same KIND to be upcast.
-
-
-Data-type descriptors
----------------------
-
-
-
-.. warning::
-
- Data-type objects must be reference counted so be aware of the
- action on the data-type reference of different C-API calls. The
- standard rule is that when a data-type object is returned it is a
- new reference. Functions that take :c:type:`PyArray_Descr *` objects and
- return arrays steal references to the data-type their inputs
- unless otherwise noted. Therefore, you must own a reference to any
- data-type object used as input to such a function.
-
-.. c:function:: int PyArray_DescrCheck(PyObject* obj)
-
- Evaluates as true if *obj* is a data-type object ( :c:type:`PyArray_Descr *` ).
-
-.. c:function:: PyArray_Descr* PyArray_DescrNew(PyArray_Descr* obj)
-
- Return a new data-type object copied from *obj* (the fields
- reference is just updated so that the new object points to the
- same fields dictionary if any).
-
-.. c:function:: PyArray_Descr* PyArray_DescrNewFromType(int typenum)
-
- Create a new data-type object from the built-in (or
- user-registered) data-type indicated by *typenum*. All builtin
- types should not have any of their fields changed. This creates a
- new copy of the :c:type:`PyArray_Descr` structure so that you can fill
- it in as appropriate. This function is especially needed for
- flexible data-types which need to have a new elsize member in
- order to be meaningful in array construction.
-
-.. c:function:: PyArray_Descr* PyArray_DescrNewByteorder( \
- PyArray_Descr* obj, char newendian)
-
- Create a new data-type object with the byteorder set according to
- *newendian*. All referenced data-type objects (in subdescr and
- fields members of the data-type object) are also changed
- (recursively). If a byteorder of :c:data:`NPY_IGNORE` is encountered it
- is left alone. If newendian is :c:data:`NPY_SWAP`, then all byte-orders
- are swapped. Other valid newendian values are :c:data:`NPY_NATIVE`,
- :c:data:`NPY_LITTLE`, and :c:data:`NPY_BIG` which all cause the returned
- data-typed descriptor (and all it's
- referenced data-type descriptors) to have the corresponding byte-
- order.
-
-.. c:function:: PyArray_Descr* PyArray_DescrFromObject( \
- PyObject* op, PyArray_Descr* mintype)
-
- Determine an appropriate data-type object from the object *op*
- (which should be a "nested" sequence object) and the minimum
- data-type descriptor mintype (which can be ``NULL`` ). Similar in
- behavior to array(*op*).dtype. Don't confuse this function with
- :c:func:`PyArray_DescrConverter`. This function essentially looks at
- all the objects in the (nested) sequence and determines the
- data-type from the elements it finds.
-
-.. c:function:: PyArray_Descr* PyArray_DescrFromScalar(PyObject* scalar)
-
- Return a data-type object from an array-scalar object. No checking
- is done to be sure that *scalar* is an array scalar. If no
- suitable data-type can be determined, then a data-type of
- :c:data:`NPY_OBJECT` is returned by default.
-
-.. c:function:: PyArray_Descr* PyArray_DescrFromType(int typenum)
-
- Returns a data-type object corresponding to *typenum*. The
- *typenum* can be one of the enumerated types, a character code for
- one of the enumerated types, or a user-defined type. If you want to use a
- flexible size array, then you need to ``flexible typenum`` and set the
- results ``elsize`` parameter to the desired size. The typenum is one of the
- :c:data:`NPY_TYPES`.
-
-.. c:function:: int PyArray_DescrConverter(PyObject* obj, PyArray_Descr** dtype)
-
- Convert any compatible Python object, *obj*, to a data-type object
- in *dtype*. A large number of Python objects can be converted to
- data-type objects. See :ref:`arrays.dtypes` for a complete
- description. This version of the converter converts None objects
- to a :c:data:`NPY_DEFAULT_TYPE` data-type object. This function can
- be used with the "O&" character code in :c:func:`PyArg_ParseTuple`
- processing.
-
-.. c:function:: int PyArray_DescrConverter2( \
- PyObject* obj, PyArray_Descr** dtype)
-
- Convert any compatible Python object, *obj*, to a data-type
- object in *dtype*. This version of the converter converts None
- objects so that the returned data-type is ``NULL``. This function
- can also be used with the "O&" character in PyArg_ParseTuple
- processing.
-
-.. c:function:: int Pyarray_DescrAlignConverter( \
- PyObject* obj, PyArray_Descr** dtype)
-
- Like :c:func:`PyArray_DescrConverter` except it aligns C-struct-like
- objects on word-boundaries as the compiler would.
-
-.. c:function:: int Pyarray_DescrAlignConverter2( \
- PyObject* obj, PyArray_Descr** dtype)
-
- Like :c:func:`PyArray_DescrConverter2` except it aligns C-struct-like
- objects on word-boundaries as the compiler would.
-
-.. c:function:: PyObject *PyArray_FieldNames(PyObject* dict)
-
- Take the fields dictionary, *dict*, such as the one attached to a
- data-type object and construct an ordered-list of field names such
- as is stored in the names field of the :c:type:`PyArray_Descr` object.
-
-
-Conversion Utilities
---------------------
-
-
-For use with :c:func:`PyArg_ParseTuple`
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-All of these functions can be used in :c:func:`PyArg_ParseTuple` (...) with
-the "O&" format specifier to automatically convert any Python object
-to the required C-object. All of these functions return
-:c:data:`NPY_SUCCEED` if successful and :c:data:`NPY_FAIL` if not. The first
-argument to all of these function is a Python object. The second
-argument is the **address** of the C-type to convert the Python object
-to.
-
-
-.. warning::
-
- Be sure to understand what steps you should take to manage the
- memory when using these conversion functions. These functions can
- require freeing memory, and/or altering the reference counts of
- specific objects based on your use.
-
-.. c:function:: int PyArray_Converter(PyObject* obj, PyObject** address)
-
- Convert any Python object to a :c:type:`PyArrayObject`. If
- :c:func:`PyArray_Check` (*obj*) is TRUE then its reference count is
- incremented and a reference placed in *address*. If *obj* is not
- an array, then convert it to an array using :c:func:`PyArray_FromAny`
- . No matter what is returned, you must DECREF the object returned
- by this routine in *address* when you are done with it.
-
-.. c:function:: int PyArray_OutputConverter( \
- PyObject* obj, PyArrayObject** address)
-
- This is a default converter for output arrays given to
- functions. If *obj* is :c:data:`Py_None` or ``NULL``, then *\*address*
- will be ``NULL`` but the call will succeed. If :c:func:`PyArray_Check` (
- *obj*) is TRUE then it is returned in *\*address* without
- incrementing its reference count.
-
-.. c:function:: int PyArray_IntpConverter(PyObject* obj, PyArray_Dims* seq)
-
- Convert any Python sequence, *obj*, smaller than :c:data:`NPY_MAXDIMS`
- to a C-array of :c:type:`npy_intp`. The Python object could also be a
- single number. The *seq* variable is a pointer to a structure with
- members ptr and len. On successful return, *seq* ->ptr contains a
- pointer to memory that must be freed, by calling :c:func:`PyDimMem_FREE`,
- to avoid a memory leak. The restriction on memory size allows this
- converter to be conveniently used for sequences intended to be
- interpreted as array shapes.
-
-.. c:function:: int PyArray_BufferConverter(PyObject* obj, PyArray_Chunk* buf)
-
- Convert any Python object, *obj*, with a (single-segment) buffer
- interface to a variable with members that detail the object's use
- of its chunk of memory. The *buf* variable is a pointer to a
- structure with base, ptr, len, and flags members. The
- :c:type:`PyArray_Chunk` structure is binary compatible with the
- Python's buffer object (through its len member on 32-bit platforms
- and its ptr member on 64-bit platforms or in Python 2.5). On
- return, the base member is set to *obj* (or its base if *obj* is
- already a buffer object pointing to another object). If you need
- to hold on to the memory be sure to INCREF the base member. The
- chunk of memory is pointed to by *buf* ->ptr member and has length
- *buf* ->len. The flags member of *buf* is :c:data:`NPY_BEHAVED_RO` with
- the :c:data:`NPY_ARRAY_WRITEABLE` flag set if *obj* has a writeable buffer
- interface.
-
-.. c:function:: int PyArray_AxisConverter(PyObject \* obj, int* axis)
-
- Convert a Python object, *obj*, representing an axis argument to
- the proper value for passing to the functions that take an integer
- axis. Specifically, if *obj* is None, *axis* is set to
- :c:data:`NPY_MAXDIMS` which is interpreted correctly by the C-API
- functions that take axis arguments.
-
-.. c:function:: int PyArray_BoolConverter(PyObject* obj, Bool* value)
-
- Convert any Python object, *obj*, to :c:data:`NPY_TRUE` or
- :c:data:`NPY_FALSE`, and place the result in *value*.
-
-.. c:function:: int PyArray_ByteorderConverter(PyObject* obj, char* endian)
-
- Convert Python strings into the corresponding byte-order
- character:
- '>', '<', 's', '=', or '\|'.
-
-.. c:function:: int PyArray_SortkindConverter(PyObject* obj, NPY_SORTKIND* sort)
-
- Convert Python strings into one of :c:data:`NPY_QUICKSORT` (starts
- with 'q' or 'Q'), :c:data:`NPY_HEAPSORT` (starts with 'h' or 'H'),
- :c:data:`NPY_MERGESORT` (starts with 'm' or 'M') or :c:data:`NPY_STABLESORT`
- (starts with 't' or 'T'). :c:data:`NPY_MERGESORT` and :c:data:`NPY_STABLESORT`
- are aliased to each other for backwards compatibility and may refer to one
- of several stable sorting algorithms depending on the data type.
-
-.. c:function:: int PyArray_SearchsideConverter( \
- PyObject* obj, NPY_SEARCHSIDE* side)
-
- Convert Python strings into one of :c:data:`NPY_SEARCHLEFT` (starts with 'l'
- or 'L'), or :c:data:`NPY_SEARCHRIGHT` (starts with 'r' or 'R').
-
-.. c:function:: int PyArray_OrderConverter(PyObject* obj, NPY_ORDER* order)
-
- Convert the Python strings 'C', 'F', 'A', and 'K' into the :c:type:`NPY_ORDER`
- enumeration :c:data:`NPY_CORDER`, :c:data:`NPY_FORTRANORDER`,
- :c:data:`NPY_ANYORDER`, and :c:data:`NPY_KEEPORDER`.
-
-.. c:function:: int PyArray_CastingConverter( \
- PyObject* obj, NPY_CASTING* casting)
-
- Convert the Python strings 'no', 'equiv', 'safe', 'same_kind', and
- 'unsafe' into the :c:type:`NPY_CASTING` enumeration :c:data:`NPY_NO_CASTING`,
- :c:data:`NPY_EQUIV_CASTING`, :c:data:`NPY_SAFE_CASTING`,
- :c:data:`NPY_SAME_KIND_CASTING`, and :c:data:`NPY_UNSAFE_CASTING`.
-
-.. c:function:: int PyArray_ClipmodeConverter( \
- PyObject* object, NPY_CLIPMODE* val)
-
- Convert the Python strings 'clip', 'wrap', and 'raise' into the
- :c:type:`NPY_CLIPMODE` enumeration :c:data:`NPY_CLIP`, :c:data:`NPY_WRAP`,
- and :c:data:`NPY_RAISE`.
-
-.. c:function:: int PyArray_ConvertClipmodeSequence( \
- PyObject* object, NPY_CLIPMODE* modes, int n)
-
- Converts either a sequence of clipmodes or a single clipmode into
- a C array of :c:type:`NPY_CLIPMODE` values. The number of clipmodes *n*
- must be known before calling this function. This function is provided
- to help functions allow a different clipmode for each dimension.
-
-Other conversions
-^^^^^^^^^^^^^^^^^
-
-.. c:function:: int PyArray_PyIntAsInt(PyObject* op)
-
- Convert all kinds of Python objects (including arrays and array
- scalars) to a standard integer. On error, -1 is returned and an
- exception set. You may find useful the macro:
-
- .. code-block:: c
-
- #define error_converting(x) (((x) == -1) && PyErr_Occurred()
-
-.. c:function:: npy_intp PyArray_PyIntAsIntp(PyObject* op)
-
- Convert all kinds of Python objects (including arrays and array
- scalars) to a (platform-pointer-sized) integer. On error, -1 is
- returned and an exception set.
-
-.. c:function:: int PyArray_IntpFromSequence( \
- PyObject* seq, npy_intp* vals, int maxvals)
-
- Convert any Python sequence (or single Python number) passed in as
- *seq* to (up to) *maxvals* pointer-sized integers and place them
- in the *vals* array. The sequence can be smaller then *maxvals* as
- the number of converted objects is returned.
-
-.. c:function:: int PyArray_TypestrConvert(int itemsize, int gentype)
-
- Convert typestring characters (with *itemsize*) to basic
- enumerated data types. The typestring character corresponding to
- signed and unsigned integers, floating point numbers, and
- complex-floating point numbers are recognized and converted. Other
- values of gentype are returned. This function can be used to
- convert, for example, the string 'f4' to :c:data:`NPY_FLOAT32`.
-
-
-Miscellaneous
--------------
-
-
-Importing the API
-^^^^^^^^^^^^^^^^^
-
-In order to make use of the C-API from another extension module, the
-:c:func:`import_array` function must be called. If the extension module is
-self-contained in a single .c file, then that is all that needs to be
-done. If, however, the extension module involves multiple files where
-the C-API is needed then some additional steps must be taken.
-
-.. c:function:: void import_array(void)
-
- This function must be called in the initialization section of a
- module that will make use of the C-API. It imports the module
- where the function-pointer table is stored and points the correct
- variable to it.
-
-.. c:macro:: PY_ARRAY_UNIQUE_SYMBOL
-
-.. c:macro:: NO_IMPORT_ARRAY
-
- Using these #defines you can use the C-API in multiple files for a
- single extension module. In each file you must define
- :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` to some name that will hold the
- C-API (*e.g.* myextension_ARRAY_API). This must be done **before**
- including the numpy/arrayobject.h file. In the module
- initialization routine you call :c:func:`import_array`. In addition,
- in the files that do not have the module initialization
- sub_routine define :c:macro:`NO_IMPORT_ARRAY` prior to including
- numpy/arrayobject.h.
-
- Suppose I have two files coolmodule.c and coolhelper.c which need
- to be compiled and linked into a single extension module. Suppose
- coolmodule.c contains the required initcool module initialization
- function (with the import_array() function called). Then,
- coolmodule.c would have at the top:
-
- .. code-block:: c
-
- #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API
- #include numpy/arrayobject.h
-
- On the other hand, coolhelper.c would contain at the top:
-
- .. code-block:: c
-
- #define NO_IMPORT_ARRAY
- #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API
- #include numpy/arrayobject.h
-
- You can also put the common two last lines into an extension-local
- header file as long as you make sure that NO_IMPORT_ARRAY is
- #defined before #including that file.
-
- Internally, these #defines work as follows:
-
- * If neither is defined, the C-API is declared to be
- :c:type:`static void**`, so it is only visible within the
- compilation unit that #includes numpy/arrayobject.h.
- * If :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is #defined, but
- :c:macro:`NO_IMPORT_ARRAY` is not, the C-API is declared to
- be :c:type:`void**`, so that it will also be visible to other
- compilation units.
- * If :c:macro:`NO_IMPORT_ARRAY` is #defined, regardless of
- whether :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is, the C-API is
- declared to be :c:type:`extern void**`, so it is expected to
- be defined in another compilation unit.
- * Whenever :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is #defined, it
- also changes the name of the variable holding the C-API, which
- defaults to :c:data:`PyArray_API`, to whatever the macro is
- #defined to.
-
-Checking the API Version
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-Because python extensions are not used in the same way as usual libraries on
-most platforms, some errors cannot be automatically detected at build time or
-even runtime. For example, if you build an extension using a function available
-only for numpy >= 1.3.0, and you import the extension later with numpy 1.2, you
-will not get an import error (but almost certainly a segmentation fault when
-calling the function). That's why several functions are provided to check for
-numpy versions. The macros :c:data:`NPY_VERSION` and
-:c:data:`NPY_FEATURE_VERSION` corresponds to the numpy version used to build the
-extension, whereas the versions returned by the functions
-PyArray_GetNDArrayCVersion and PyArray_GetNDArrayCFeatureVersion corresponds to
-the runtime numpy's version.
-
-The rules for ABI and API compatibilities can be summarized as follows:
-
- * Whenever :c:data:`NPY_VERSION` != PyArray_GetNDArrayCVersion, the
- extension has to be recompiled (ABI incompatibility).
- * :c:data:`NPY_VERSION` == PyArray_GetNDArrayCVersion and
- :c:data:`NPY_FEATURE_VERSION` <= PyArray_GetNDArrayCFeatureVersion means
- backward compatible changes.
-
-ABI incompatibility is automatically detected in every numpy's version. API
-incompatibility detection was added in numpy 1.4.0. If you want to supported
-many different numpy versions with one extension binary, you have to build your
-extension with the lowest NPY_FEATURE_VERSION as possible.
-
-.. c:function:: unsigned int PyArray_GetNDArrayCVersion(void)
-
- This just returns the value :c:data:`NPY_VERSION`. :c:data:`NPY_VERSION`
- changes whenever a backward incompatible change at the ABI level. Because
- it is in the C-API, however, comparing the output of this function from the
- value defined in the current header gives a way to test if the C-API has
- changed thus requiring a re-compilation of extension modules that use the
- C-API. This is automatically checked in the function :c:func:`import_array`.
-
-.. c:function:: unsigned int PyArray_GetNDArrayCFeatureVersion(void)
-
- .. versionadded:: 1.4.0
-
- This just returns the value :c:data:`NPY_FEATURE_VERSION`.
- :c:data:`NPY_FEATURE_VERSION` changes whenever the API changes (e.g. a
- function is added). A changed value does not always require a recompile.
-
-Internal Flexibility
-^^^^^^^^^^^^^^^^^^^^
-
-.. c:function:: int PyArray_SetNumericOps(PyObject* dict)
-
- NumPy stores an internal table of Python callable objects that are
- used to implement arithmetic operations for arrays as well as
- certain array calculation methods. This function allows the user
- to replace any or all of these Python objects with their own
- versions. The keys of the dictionary, *dict*, are the named
- functions to replace and the paired value is the Python callable
- object to use. Care should be taken that the function used to
- replace an internal array operation does not itself call back to
- that internal array operation (unless you have designed the
- function to handle that), or an unchecked infinite recursion can
- result (possibly causing program crash). The key names that
- represent operations that can be replaced are:
-
- **add**, **subtract**, **multiply**, **divide**,
- **remainder**, **power**, **square**, **reciprocal**,
- **ones_like**, **sqrt**, **negative**, **positive**,
- **absolute**, **invert**, **left_shift**, **right_shift**,
- **bitwise_and**, **bitwise_xor**, **bitwise_or**,
- **less**, **less_equal**, **equal**, **not_equal**,
- **greater**, **greater_equal**, **floor_divide**,
- **true_divide**, **logical_or**, **logical_and**,
- **floor**, **ceil**, **maximum**, **minimum**, **rint**.
-
-
- These functions are included here because they are used at least once
- in the array object's methods. The function returns -1 (without
- setting a Python Error) if one of the objects being assigned is not
- callable.
-
- .. deprecated:: 1.16
-
-.. c:function:: PyObject* PyArray_GetNumericOps(void)
-
- Return a Python dictionary containing the callable Python objects
- stored in the internal arithmetic operation table. The keys of
- this dictionary are given in the explanation for :c:func:`PyArray_SetNumericOps`.
-
- .. deprecated:: 1.16
-
-.. c:function:: void PyArray_SetStringFunction(PyObject* op, int repr)
-
- This function allows you to alter the tp_str and tp_repr methods
- of the array object to any Python function. Thus you can alter
- what happens for all arrays when str(arr) or repr(arr) is called
- from Python. The function to be called is passed in as *op*. If
- *repr* is non-zero, then this function will be called in response
- to repr(arr), otherwise the function will be called in response to
- str(arr). No check on whether or not *op* is callable is
- performed. The callable passed in to *op* should expect an array
- argument and should return a string to be printed.
-
-
-Memory management
-^^^^^^^^^^^^^^^^^
-
-.. c:function:: char* PyDataMem_NEW(size_t nbytes)
-
-.. c:function:: PyDataMem_FREE(char* ptr)
-
-.. c:function:: char* PyDataMem_RENEW(void * ptr, size_t newbytes)
-
- Macros to allocate, free, and reallocate memory. These macros are used
- internally to create arrays.
-
-.. c:function:: npy_intp* PyDimMem_NEW(int nd)
-
-.. c:function:: PyDimMem_FREE(char* ptr)
-
-.. c:function:: npy_intp* PyDimMem_RENEW(void* ptr, size_t newnd)
-
- Macros to allocate, free, and reallocate dimension and strides memory.
-
-.. c:function:: void* PyArray_malloc(size_t nbytes)
-
-.. c:function:: PyArray_free(void* ptr)
-
-.. c:function:: void* PyArray_realloc(npy_intp* ptr, size_t nbytes)
-
- These macros use different memory allocators, depending on the
- constant :c:data:`NPY_USE_PYMEM`. The system malloc is used when
- :c:data:`NPY_USE_PYMEM` is 0, if :c:data:`NPY_USE_PYMEM` is 1, then
- the Python memory allocator is used.
-
-.. c:function:: int PyArray_ResolveWritebackIfCopy(PyArrayObject* obj)
-
- If ``obj.flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or (deprecated)
- :c:data:`NPY_ARRAY_UPDATEIFCOPY`, this function clears the flags, `DECREF` s
- `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. It then
- copies ``obj->data`` to `obj->base->data`, and returns the error state of
- the copy operation. This is the opposite of
- :c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called once
- you are finished with ``obj``, just before ``Py_DECREF(obj)``. It may be called
- multiple times, or with ``NULL`` input. See also
- :c:func:`PyArray_DiscardWritebackIfCopy`.
-
- Returns 0 if nothing was done, -1 on error, and 1 if action was taken.
-
-Threading support
-^^^^^^^^^^^^^^^^^
-
-These macros are only meaningful if :c:data:`NPY_ALLOW_THREADS`
-evaluates True during compilation of the extension module. Otherwise,
-these macros are equivalent to whitespace. Python uses a single Global
-Interpreter Lock (GIL) for each Python process so that only a single
-thread may execute at a time (even on multi-cpu machines). When
-calling out to a compiled function that may take time to compute (and
-does not have side-effects for other threads like updated global
-variables), the GIL should be released so that other Python threads
-can run while the time-consuming calculations are performed. This can
-be accomplished using two groups of macros. Typically, if one macro in
-a group is used in a code block, all of them must be used in the same
-code block. Currently, :c:data:`NPY_ALLOW_THREADS` is defined to the
-python-defined :c:data:`WITH_THREADS` constant unless the environment
-variable :c:data:`NPY_NOSMP` is set in which case
-:c:data:`NPY_ALLOW_THREADS` is defined to be 0.
-
-Group 1
-"""""""
-
- This group is used to call code that may take some time but does not
- use any Python C-API calls. Thus, the GIL should be released during
- its calculation.
-
- .. c:macro:: NPY_BEGIN_ALLOW_THREADS
-
- Equivalent to :c:macro:`Py_BEGIN_ALLOW_THREADS` except it uses
- :c:data:`NPY_ALLOW_THREADS` to determine if the macro if
- replaced with white-space or not.
-
- .. c:macro:: NPY_END_ALLOW_THREADS
-
- Equivalent to :c:macro:`Py_END_ALLOW_THREADS` except it uses
- :c:data:`NPY_ALLOW_THREADS` to determine if the macro if
- replaced with white-space or not.
-
- .. c:macro:: NPY_BEGIN_THREADS_DEF
-
- Place in the variable declaration area. This macro sets up the
- variable needed for storing the Python state.
-
- .. c:macro:: NPY_BEGIN_THREADS
-
- Place right before code that does not need the Python
- interpreter (no Python C-API calls). This macro saves the
- Python state and releases the GIL.
-
- .. c:macro:: NPY_END_THREADS
-
- Place right after code that does not need the Python
- interpreter. This macro acquires the GIL and restores the
- Python state from the saved variable.
-
- .. c:function:: NPY_BEGIN_THREADS_DESCR(PyArray_Descr *dtype)
-
- Useful to release the GIL only if *dtype* does not contain
- arbitrary Python objects which may need the Python interpreter
- during execution of the loop. Equivalent to
-
- .. c:function:: NPY_END_THREADS_DESCR(PyArray_Descr *dtype)
-
- Useful to regain the GIL in situations where it was released
- using the BEGIN form of this macro.
-
- .. c:function:: NPY_BEGIN_THREADS_THRESHOLDED(int loop_size)
-
- Useful to release the GIL only if *loop_size* exceeds a
- minimum threshold, currently set to 500. Should be matched
- with a :c:macro:`NPY_END_THREADS` to regain the GIL.
-
-Group 2
-"""""""
-
- This group is used to re-acquire the Python GIL after it has been
- released. For example, suppose the GIL has been released (using the
- previous calls), and then some path in the code (perhaps in a
- different subroutine) requires use of the Python C-API, then these
- macros are useful to acquire the GIL. These macros accomplish
- essentially a reverse of the previous three (acquire the LOCK saving
- what state it had) and then re-release it with the saved state.
-
- .. c:macro:: NPY_ALLOW_C_API_DEF
-
- Place in the variable declaration area to set up the necessary
- variable.
-
- .. c:macro:: NPY_ALLOW_C_API
-
- Place before code that needs to call the Python C-API (when it is
- known that the GIL has already been released).
-
- .. c:macro:: NPY_DISABLE_C_API
-
- Place after code that needs to call the Python C-API (to re-release
- the GIL).
-
-.. tip::
-
- Never use semicolons after the threading support macros.
-
-
-Priority
-^^^^^^^^
-
-.. c:var:: NPY_PRIORITY
-
- Default priority for arrays.
-
-.. c:var:: NPY_SUBTYPE_PRIORITY
-
- Default subtype priority.
-
-.. c:var:: NPY_SCALAR_PRIORITY
-
- Default scalar priority (very small)
-
-.. c:function:: double PyArray_GetPriority(PyObject* obj, double def)
-
- Return the :obj:`~numpy.class.__array_priority__` attribute (converted to a
- double) of *obj* or *def* if no attribute of that name
- exists. Fast returns that avoid the attribute lookup are provided
- for objects of type :c:data:`PyArray_Type`.
-
-
-Default buffers
-^^^^^^^^^^^^^^^
-
-.. c:var:: NPY_BUFSIZE
-
- Default size of the user-settable internal buffers.
-
-.. c:var:: NPY_MIN_BUFSIZE
-
- Smallest size of user-settable internal buffers.
-
-.. c:var:: NPY_MAX_BUFSIZE
-
- Largest size allowed for the user-settable buffers.
-
-
-Other constants
-^^^^^^^^^^^^^^^
-
-.. c:var:: NPY_NUM_FLOATTYPE
-
- The number of floating-point types
-
-.. c:var:: NPY_MAXDIMS
-
- The maximum number of dimensions allowed in arrays.
-
-.. c:var:: NPY_VERSION
-
- The current version of the ndarray object (check to see if this
- variable is defined to guarantee the numpy/arrayobject.h header is
- being used).
-
-.. c:var:: NPY_FALSE
-
- Defined as 0 for use with Bool.
-
-.. c:var:: NPY_TRUE
-
- Defined as 1 for use with Bool.
-
-.. c:var:: NPY_FAIL
-
- The return value of failed converter functions which are called using
- the "O&" syntax in :c:func:`PyArg_ParseTuple`-like functions.
-
-.. c:var:: NPY_SUCCEED
-
- The return value of successful converter functions which are called
- using the "O&" syntax in :c:func:`PyArg_ParseTuple`-like functions.
-
-
-Miscellaneous Macros
-^^^^^^^^^^^^^^^^^^^^
-
-.. c:function:: PyArray_SAMESHAPE(PyArrayObject *a1, PyArrayObject *a2)
-
- Evaluates as True if arrays *a1* and *a2* have the same shape.
-
-.. c:macro:: PyArray_MAX(a,b)
-
- Returns the maximum of *a* and *b*. If (*a*) or (*b*) are
- expressions they are evaluated twice.
-
-.. c:macro:: PyArray_MIN(a,b)
-
- Returns the minimum of *a* and *b*. If (*a*) or (*b*) are
- expressions they are evaluated twice.
-
-.. c:macro:: PyArray_CLT(a,b)
-
-.. c:macro:: PyArray_CGT(a,b)
-
-.. c:macro:: PyArray_CLE(a,b)
-
-.. c:macro:: PyArray_CGE(a,b)
-
-.. c:macro:: PyArray_CEQ(a,b)
-
-.. c:macro:: PyArray_CNE(a,b)
-
- Implements the complex comparisons between two complex numbers
- (structures with a real and imag member) using NumPy's definition
- of the ordering which is lexicographic: comparing the real parts
- first and then the complex parts if the real parts are equal.
-
-.. c:function:: PyArray_REFCOUNT(PyObject* op)
-
- Returns the reference count of any Python object.
-
-.. c:function:: PyArray_DiscardWritebackIfCopy(PyObject* obj)
-
- If ``obj.flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or (deprecated)
- :c:data:`NPY_ARRAY_UPDATEIFCOPY`, this function clears the flags, `DECREF` s
- `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. In
- contrast to :c:func:`PyArray_DiscardWritebackIfCopy` it makes no attempt
- to copy the data from `obj->base` This undoes
- :c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called after an
- error when you are finished with ``obj``, just before ``Py_DECREF(obj)``.
- It may be called multiple times, or with ``NULL`` input.
-
-.. c:function:: PyArray_XDECREF_ERR(PyObject* obj)
-
- Deprecated in 1.14, use :c:func:`PyArray_DiscardWritebackIfCopy`
- followed by ``Py_XDECREF``
-
- DECREF's an array object which may have the (deprecated)
- :c:data:`NPY_ARRAY_UPDATEIFCOPY` or :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`
- flag set without causing the contents to be copied back into the
- original array. Resets the :c:data:`NPY_ARRAY_WRITEABLE` flag on the base
- object. This is useful for recovering from an error condition when
- writeback semantics are used, but will lead to wrong results.
-
-
-Enumerated Types
-^^^^^^^^^^^^^^^^
-
-.. c:type:: NPY_SORTKIND
-
- A special variable-type which can take on the values :c:data:`NPY_{KIND}`
- where ``{KIND}`` is
-
- **QUICKSORT**, **HEAPSORT**, **MERGESORT**, **STABLESORT**
-
- .. c:var:: NPY_NSORTS
-
- Defined to be the number of sorts. It is fixed at three by the need for
- backwards compatibility, and consequently :c:data:`NPY_MERGESORT` and
- :c:data:`NPY_STABLESORT` are aliased to each other and may refer to one
- of several stable sorting algorithms depending on the data type.
-
-
-.. c:type:: NPY_SCALARKIND
-
- A special variable type indicating the number of "kinds" of
- scalars distinguished in determining scalar-coercion rules. This
- variable can take on the values :c:data:`NPY_{KIND}` where ``{KIND}`` can be
-
- **NOSCALAR**, **BOOL_SCALAR**, **INTPOS_SCALAR**,
- **INTNEG_SCALAR**, **FLOAT_SCALAR**, **COMPLEX_SCALAR**,
- **OBJECT_SCALAR**
-
- .. c:var:: NPY_NSCALARKINDS
-
- Defined to be the number of scalar kinds
- (not including :c:data:`NPY_NOSCALAR`).
-
-.. c:type:: NPY_ORDER
-
- An enumeration type indicating the element order that an array should be
- interpreted in. When a brand new array is created, generally
- only **NPY_CORDER** and **NPY_FORTRANORDER** are used, whereas
- when one or more inputs are provided, the order can be based on them.
-
- .. c:var:: NPY_ANYORDER
-
- Fortran order if all the inputs are Fortran, C otherwise.
-
- .. c:var:: NPY_CORDER
-
- C order.
-
- .. c:var:: NPY_FORTRANORDER
-
- Fortran order.
-
- .. c:var:: NPY_KEEPORDER
-
- An order as close to the order of the inputs as possible, even
- if the input is in neither C nor Fortran order.
-
-.. c:type:: NPY_CLIPMODE
-
- A variable type indicating the kind of clipping that should be
- applied in certain functions.
-
- .. c:var:: NPY_RAISE
-
- The default for most operations, raises an exception if an index
- is out of bounds.
-
- .. c:var:: NPY_CLIP
-
- Clips an index to the valid range if it is out of bounds.
-
- .. c:var:: NPY_WRAP
-
- Wraps an index to the valid range if it is out of bounds.
-
-.. c:type:: NPY_CASTING
-
- .. versionadded:: 1.6
-
- An enumeration type indicating how permissive data conversions should
- be. This is used by the iterator added in NumPy 1.6, and is intended
- to be used more broadly in a future version.
-
- .. c:var:: NPY_NO_CASTING
-
- Only allow identical types.
-
- .. c:var:: NPY_EQUIV_CASTING
-
- Allow identical and casts involving byte swapping.
-
- .. c:var:: NPY_SAFE_CASTING
-
- Only allow casts which will not cause values to be rounded,
- truncated, or otherwise changed.
-
- .. c:var:: NPY_SAME_KIND_CASTING
-
- Allow any safe casts, and casts between types of the same kind.
- For example, float64 -> float32 is permitted with this rule.
-
- .. c:var:: NPY_UNSAFE_CASTING
-
- Allow any cast, no matter what kind of data loss may occur.
-
-.. index::
- pair: ndarray; C-API
+++ /dev/null
-System configuration
-====================
-
-.. sectionauthor:: Travis E. Oliphant
-
-When NumPy is built, information about system configuration is
-recorded, and is made available for extension modules using NumPy's C
-API. These are mostly defined in ``numpyconfig.h`` (included in
-``ndarrayobject.h``). The public symbols are prefixed by ``NPY_*``.
-NumPy also offers some functions for querying information about the
-platform in use.
-
-For private use, NumPy also constructs a ``config.h`` in the NumPy
-include directory, which is not exported by NumPy (that is a python
-extension which use the numpy C API will not see those symbols), to
-avoid namespace pollution.
-
-
-Data type sizes
----------------
-
-The :c:data:`NPY_SIZEOF_{CTYPE}` constants are defined so that sizeof
-information is available to the pre-processor.
-
-.. c:var:: NPY_SIZEOF_SHORT
-
- sizeof(short)
-
-.. c:var:: NPY_SIZEOF_INT
-
- sizeof(int)
-
-.. c:var:: NPY_SIZEOF_LONG
-
- sizeof(long)
-
-.. c:var:: NPY_SIZEOF_LONGLONG
-
- sizeof(longlong) where longlong is defined appropriately on the
- platform.
-
-.. c:var:: NPY_SIZEOF_PY_LONG_LONG
-
-
-.. c:var:: NPY_SIZEOF_FLOAT
-
- sizeof(float)
-
-.. c:var:: NPY_SIZEOF_DOUBLE
-
- sizeof(double)
-
-.. c:var:: NPY_SIZEOF_LONG_DOUBLE
-
- sizeof(longdouble) (A macro defines **NPY_SIZEOF_LONGDOUBLE** as well.)
-
-.. c:var:: NPY_SIZEOF_PY_INTPTR_T
-
- Size of a pointer on this platform (sizeof(void \*)) (A macro defines
- NPY_SIZEOF_INTP as well.)
-
-
-Platform information
---------------------
-
-.. c:var:: NPY_CPU_X86
-.. c:var:: NPY_CPU_AMD64
-.. c:var:: NPY_CPU_IA64
-.. c:var:: NPY_CPU_PPC
-.. c:var:: NPY_CPU_PPC64
-.. c:var:: NPY_CPU_SPARC
-.. c:var:: NPY_CPU_SPARC64
-.. c:var:: NPY_CPU_S390
-.. c:var:: NPY_CPU_PARISC
-
- .. versionadded:: 1.3.0
-
- CPU architecture of the platform; only one of the above is
- defined.
-
- Defined in ``numpy/npy_cpu.h``
-
-.. c:var:: NPY_LITTLE_ENDIAN
-
-.. c:var:: NPY_BIG_ENDIAN
-
-.. c:var:: NPY_BYTE_ORDER
-
- .. versionadded:: 1.3.0
-
- Portable alternatives to the ``endian.h`` macros of GNU Libc.
- If big endian, :c:data:`NPY_BYTE_ORDER` == :c:data:`NPY_BIG_ENDIAN`, and
- similarly for little endian architectures.
-
- Defined in ``numpy/npy_endian.h``.
-
-.. c:function:: PyArray_GetEndianness()
-
- .. versionadded:: 1.3.0
-
- Returns the endianness of the current platform.
- One of :c:data:`NPY_CPU_BIG`, :c:data:`NPY_CPU_LITTLE`,
- or :c:data:`NPY_CPU_UNKNOWN_ENDIAN`.
-
-
-Compiler directives
--------------------
-
-.. c:var:: NPY_LIKELY
-.. c:var:: NPY_UNLIKELY
-.. c:var:: NPY_UNUSED
-
-
-Interrupt Handling
-------------------
-
-.. c:var:: NPY_INTERRUPT_H
-.. c:var:: NPY_SIGSETJMP
-.. c:var:: NPY_SIGLONGJMP
-.. c:var:: NPY_SIGJMP_BUF
-.. c:var:: NPY_SIGINT_ON
-.. c:var:: NPY_SIGINT_OFF
+++ /dev/null
-NumPy core libraries
-====================
-
-.. sectionauthor:: David Cournapeau
-
-.. versionadded:: 1.3.0
-
-Starting from numpy 1.3.0, we are working on separating the pure C,
-"computational" code from the python dependent code. The goal is twofolds:
-making the code cleaner, and enabling code reuse by other extensions outside
-numpy (scipy, etc...).
-
-NumPy core math library
------------------------
-
-The numpy core math library ('npymath') is a first step in this direction. This
-library contains most math-related C99 functionality, which can be used on
-platforms where C99 is not well supported. The core math functions have the
-same API as the C99 ones, except for the npy_* prefix.
-
-The available functions are defined in <numpy/npy_math.h> - please refer to this header when
-in doubt.
-
-Floating point classification
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. c:var:: NPY_NAN
-
- This macro is defined to a NaN (Not a Number), and is guaranteed to have
- the signbit unset ('positive' NaN). The corresponding single and extension
- precision macro are available with the suffix F and L.
-
-.. c:var:: NPY_INFINITY
-
- This macro is defined to a positive inf. The corresponding single and
- extension precision macro are available with the suffix F and L.
-
-.. c:var:: NPY_PZERO
-
- This macro is defined to positive zero. The corresponding single and
- extension precision macro are available with the suffix F and L.
-
-.. c:var:: NPY_NZERO
-
- This macro is defined to negative zero (that is with the sign bit set). The
- corresponding single and extension precision macro are available with the
- suffix F and L.
-
-.. c:function:: int npy_isnan(x)
-
- This is a macro, and is equivalent to C99 isnan: works for single, double
- and extended precision, and return a non 0 value is x is a NaN.
-
-.. c:function:: int npy_isfinite(x)
-
- This is a macro, and is equivalent to C99 isfinite: works for single,
- double and extended precision, and return a non 0 value is x is neither a
- NaN nor an infinity.
-
-.. c:function:: int npy_isinf(x)
-
- This is a macro, and is equivalent to C99 isinf: works for single, double
- and extended precision, and return a non 0 value is x is infinite (positive
- and negative).
-
-.. c:function:: int npy_signbit(x)
-
- This is a macro, and is equivalent to C99 signbit: works for single, double
- and extended precision, and return a non 0 value is x has the signbit set
- (that is the number is negative).
-
-.. c:function:: double npy_copysign(double x, double y)
-
- This is a function equivalent to C99 copysign: return x with the same sign
- as y. Works for any value, including inf and nan. Single and extended
- precisions are available with suffix f and l.
-
- .. versionadded:: 1.4.0
-
-Useful math constants
-~~~~~~~~~~~~~~~~~~~~~
-
-The following math constants are available in ``npy_math.h``. Single
-and extended precision are also available by adding the ``f`` and
-``l`` suffixes respectively.
-
-.. c:var:: NPY_E
-
- Base of natural logarithm (:math:`e`)
-
-.. c:var:: NPY_LOG2E
-
- Logarithm to base 2 of the Euler constant (:math:`\frac{\ln(e)}{\ln(2)}`)
-
-.. c:var:: NPY_LOG10E
-
- Logarithm to base 10 of the Euler constant (:math:`\frac{\ln(e)}{\ln(10)}`)
-
-.. c:var:: NPY_LOGE2
-
- Natural logarithm of 2 (:math:`\ln(2)`)
-
-.. c:var:: NPY_LOGE10
-
- Natural logarithm of 10 (:math:`\ln(10)`)
-
-.. c:var:: NPY_PI
-
- Pi (:math:`\pi`)
-
-.. c:var:: NPY_PI_2
-
- Pi divided by 2 (:math:`\frac{\pi}{2}`)
-
-.. c:var:: NPY_PI_4
-
- Pi divided by 4 (:math:`\frac{\pi}{4}`)
-
-.. c:var:: NPY_1_PI
-
- Reciprocal of pi (:math:`\frac{1}{\pi}`)
-
-.. c:var:: NPY_2_PI
-
- Two times the reciprocal of pi (:math:`\frac{2}{\pi}`)
-
-.. c:var:: NPY_EULER
-
- The Euler constant
- :math:`\lim_{n\rightarrow\infty}({\sum_{k=1}^n{\frac{1}{k}}-\ln n})`
-
-Low-level floating point manipulation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Those can be useful for precise floating point comparison.
-
-.. c:function:: double npy_nextafter(double x, double y)
-
- This is a function equivalent to C99 nextafter: return next representable
- floating point value from x in the direction of y. Single and extended
- precisions are available with suffix f and l.
-
- .. versionadded:: 1.4.0
-
-.. c:function:: double npy_spacing(double x)
-
- This is a function equivalent to Fortran intrinsic. Return distance between
- x and next representable floating point value from x, e.g. spacing(1) ==
- eps. spacing of nan and +/- inf return nan. Single and extended precisions
- are available with suffix f and l.
-
- .. versionadded:: 1.4.0
-
-.. c:function:: void npy_set_floatstatus_divbyzero()
-
- Set the divide by zero floating point exception
-
- .. versionadded:: 1.6.0
-
-.. c:function:: void npy_set_floatstatus_overflow()
-
- Set the overflow floating point exception
-
- .. versionadded:: 1.6.0
-
-.. c:function:: void npy_set_floatstatus_underflow()
-
- Set the underflow floating point exception
-
- .. versionadded:: 1.6.0
-
-.. c:function:: void npy_set_floatstatus_invalid()
-
- Set the invalid floating point exception
-
- .. versionadded:: 1.6.0
-
-.. c:function:: int npy_get_floatstatus()
-
- Get floating point status. Returns a bitmask with following possible flags:
-
- * NPY_FPE_DIVIDEBYZERO
- * NPY_FPE_OVERFLOW
- * NPY_FPE_UNDERFLOW
- * NPY_FPE_INVALID
-
- Note that :c:func:`npy_get_floatstatus_barrier` is preferable as it prevents
- aggressive compiler optimizations reordering the call relative to
- the code setting the status, which could lead to incorrect results.
-
- .. versionadded:: 1.9.0
-
-.. c:function:: int npy_get_floatstatus_barrier(char*)
-
- Get floating point status. A pointer to a local variable is passed in to
- prevent aggressive compiler optimizations from reodering this function call
- relative to the code setting the status, which could lead to incorrect
- results.
-
- Returns a bitmask with following possible flags:
-
- * NPY_FPE_DIVIDEBYZERO
- * NPY_FPE_OVERFLOW
- * NPY_FPE_UNDERFLOW
- * NPY_FPE_INVALID
-
- .. versionadded:: 1.15.0
-
-.. c:function:: int npy_clear_floatstatus()
-
- Clears the floating point status. Returns the previous status mask.
-
- Note that :c:func:`npy_clear_floatstatus_barrier` is preferable as it
- prevents aggressive compiler optimizations reordering the call relative to
- the code setting the status, which could lead to incorrect results.
-
- .. versionadded:: 1.9.0
-
-.. c:function:: int npy_clear_floatstatus_barrier(char*)
-
- Clears the floating point status. A pointer to a local variable is passed in to
- prevent aggressive compiler optimizations from reodering this function call.
- Returns the previous status mask.
-
- .. versionadded:: 1.15.0
-
-Complex functions
-~~~~~~~~~~~~~~~~~
-
-.. versionadded:: 1.4.0
-
-C99-like complex functions have been added. Those can be used if you wish to
-implement portable C extensions. Since we still support platforms without C99
-complex type, you need to restrict to C90-compatible syntax, e.g.:
-
-.. code-block:: c
-
- /* a = 1 + 2i \*/
- npy_complex a = npy_cpack(1, 2);
- npy_complex b;
-
- b = npy_log(a);
-
-Linking against the core math library in an extension
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. versionadded:: 1.4.0
-
-To use the core math library in your own extension, you need to add the npymath
-compile and link options to your extension in your setup.py:
-
- >>> from numpy.distutils.misc_util import get_info
- >>> info = get_info('npymath')
- >>> config.add_extension('foo', sources=['foo.c'], extra_info=info)
-
-In other words, the usage of info is exactly the same as when using blas_info
-and co.
-
-Half-precision functions
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. versionadded:: 1.6.0
-
-The header file <numpy/halffloat.h> provides functions to work with
-IEEE 754-2008 16-bit floating point values. While this format is
-not typically used for numerical computations, it is useful for
-storing values which require floating point but do not need much precision.
-It can also be used as an educational tool to understand the nature
-of floating point round-off error.
-
-Like for other types, NumPy includes a typedef npy_half for the 16 bit
-float. Unlike for most of the other types, you cannot use this as a
-normal type in C, since it is a typedef for npy_uint16. For example,
-1.0 looks like 0x3c00 to C, and if you do an equality comparison
-between the different signed zeros, you will get -0.0 != 0.0
-(0x8000 != 0x0000), which is incorrect.
-
-For these reasons, NumPy provides an API to work with npy_half values
-accessible by including <numpy/halffloat.h> and linking to 'npymath'.
-For functions that are not provided directly, such as the arithmetic
-operations, the preferred method is to convert to float
-or double and back again, as in the following example.
-
-.. code-block:: c
-
- npy_half sum(int n, npy_half *array) {
- float ret = 0;
- while(n--) {
- ret += npy_half_to_float(*array++);
- }
- return npy_float_to_half(ret);
- }
-
-External Links:
-
-* `754-2008 IEEE Standard for Floating-Point Arithmetic`__
-* `Half-precision Float Wikipedia Article`__.
-* `OpenGL Half Float Pixel Support`__
-* `The OpenEXR image format`__.
-
-__ https://ieeexplore.ieee.org/document/4610935/
-__ https://en.wikipedia.org/wiki/Half-precision_floating-point_format
-__ https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_half_float_pixel.txt
-__ https://www.openexr.com/about.html
-
-.. c:var:: NPY_HALF_ZERO
-
- This macro is defined to positive zero.
-
-.. c:var:: NPY_HALF_PZERO
-
- This macro is defined to positive zero.
-
-.. c:var:: NPY_HALF_NZERO
-
- This macro is defined to negative zero.
-
-.. c:var:: NPY_HALF_ONE
-
- This macro is defined to 1.0.
-
-.. c:var:: NPY_HALF_NEGONE
-
- This macro is defined to -1.0.
-
-.. c:var:: NPY_HALF_PINF
-
- This macro is defined to +inf.
-
-.. c:var:: NPY_HALF_NINF
-
- This macro is defined to -inf.
-
-.. c:var:: NPY_HALF_NAN
-
- This macro is defined to a NaN value, guaranteed to have its sign bit unset.
-
-.. c:function:: float npy_half_to_float(npy_half h)
-
- Converts a half-precision float to a single-precision float.
-
-.. c:function:: double npy_half_to_double(npy_half h)
-
- Converts a half-precision float to a double-precision float.
-
-.. c:function:: npy_half npy_float_to_half(float f)
-
- Converts a single-precision float to a half-precision float. The
- value is rounded to the nearest representable half, with ties going
- to the nearest even. If the value is too small or too big, the
- system's floating point underflow or overflow bit will be set.
-
-.. c:function:: npy_half npy_double_to_half(double d)
-
- Converts a double-precision float to a half-precision float. The
- value is rounded to the nearest representable half, with ties going
- to the nearest even. If the value is too small or too big, the
- system's floating point underflow or overflow bit will be set.
-
-.. c:function:: int npy_half_eq(npy_half h1, npy_half h2)
-
- Compares two half-precision floats (h1 == h2).
-
-.. c:function:: int npy_half_ne(npy_half h1, npy_half h2)
-
- Compares two half-precision floats (h1 != h2).
-
-.. c:function:: int npy_half_le(npy_half h1, npy_half h2)
-
- Compares two half-precision floats (h1 <= h2).
-
-.. c:function:: int npy_half_lt(npy_half h1, npy_half h2)
-
- Compares two half-precision floats (h1 < h2).
-
-.. c:function:: int npy_half_ge(npy_half h1, npy_half h2)
-
- Compares two half-precision floats (h1 >= h2).
-
-.. c:function:: int npy_half_gt(npy_half h1, npy_half h2)
-
- Compares two half-precision floats (h1 > h2).
-
-.. c:function:: int npy_half_eq_nonan(npy_half h1, npy_half h2)
-
- Compares two half-precision floats that are known to not be NaN (h1 == h2). If
- a value is NaN, the result is undefined.
-
-.. c:function:: int npy_half_lt_nonan(npy_half h1, npy_half h2)
-
- Compares two half-precision floats that are known to not be NaN (h1 < h2). If
- a value is NaN, the result is undefined.
-
-.. c:function:: int npy_half_le_nonan(npy_half h1, npy_half h2)
-
- Compares two half-precision floats that are known to not be NaN (h1 <= h2). If
- a value is NaN, the result is undefined.
-
-.. c:function:: int npy_half_iszero(npy_half h)
-
- Tests whether the half-precision float has a value equal to zero. This may be slightly
- faster than calling npy_half_eq(h, NPY_ZERO).
-
-.. c:function:: int npy_half_isnan(npy_half h)
-
- Tests whether the half-precision float is a NaN.
-
-.. c:function:: int npy_half_isinf(npy_half h)
-
- Tests whether the half-precision float is plus or minus Inf.
-
-.. c:function:: int npy_half_isfinite(npy_half h)
-
- Tests whether the half-precision float is finite (not NaN or Inf).
-
-.. c:function:: int npy_half_signbit(npy_half h)
-
- Returns 1 is h is negative, 0 otherwise.
-
-.. c:function:: npy_half npy_half_copysign(npy_half x, npy_half y)
-
- Returns the value of x with the sign bit copied from y. Works for any value,
- including Inf and NaN.
-
-.. c:function:: npy_half npy_half_spacing(npy_half h)
-
- This is the same for half-precision float as npy_spacing and npy_spacingf
- described in the low-level floating point section.
-
-.. c:function:: npy_half npy_half_nextafter(npy_half x, npy_half y)
-
- This is the same for half-precision float as npy_nextafter and npy_nextafterf
- described in the low-level floating point section.
-
-.. c:function:: npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f)
-
- Low-level function which converts a 32-bit single-precision float, stored
- as a uint32, into a 16-bit half-precision float.
-
-.. c:function:: npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
-
- Low-level function which converts a 64-bit double-precision float, stored
- as a uint64, into a 16-bit half-precision float.
-
-.. c:function:: npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h)
-
- Low-level function which converts a 16-bit half-precision float
- into a 32-bit single-precision float, stored as a uint32.
-
-.. c:function:: npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h)
-
- Low-level function which converts a 16-bit half-precision float
- into a 64-bit double-precision float, stored as a uint64.
+++ /dev/null
-C API Deprecations
-==================
-
-Background
-----------
-
-The API exposed by NumPy for third-party extensions has grown over
-years of releases, and has allowed programmers to directly access
-NumPy functionality from C. This API can be best described as
-"organic". It has emerged from multiple competing desires and from
-multiple points of view over the years, strongly influenced by the
-desire to make it easy for users to move to NumPy from Numeric and
-Numarray. The core API originated with Numeric in 1995 and there are
-patterns such as the heavy use of macros written to mimic Python's
-C-API as well as account for compiler technology of the late 90's.
-There is also only a small group of volunteers who have had very little
-time to spend on improving this API.
-
-There is an ongoing effort to improve the API.
-It is important in this effort
-to ensure that code that compiles for NumPy 1.X continues to
-compile for NumPy 1.X. At the same time, certain API's will be marked
-as deprecated so that future-looking code can avoid these API's and
-follow better practices.
-
-Another important role played by deprecation markings in the C API is to move
-towards hiding internal details of the NumPy implementation. For those
-needing direct, easy, access to the data of ndarrays, this will not
-remove this ability. Rather, there are many potential performance
-optimizations which require changing the implementation details, and
-NumPy developers have been unable to try them because of the high
-value of preserving ABI compatibility. By deprecating this direct
-access, we will in the future be able to improve NumPy's performance
-in ways we cannot presently.
-
-Deprecation Mechanism NPY_NO_DEPRECATED_API
--------------------------------------------
-
-In C, there is no equivalent to the deprecation warnings that Python
-supports. One way to do deprecations is to flag them in the
-documentation and release notes, then remove or change the deprecated
-features in a future major version (NumPy 2.0 and beyond). Minor
-versions of NumPy should not have major C-API changes, however, that
-prevent code that worked on a previous minor release. For example, we
-will do our best to ensure that code that compiled and worked on NumPy
-1.4 should continue to work on NumPy 1.7 (but perhaps with compiler
-warnings).
-
-To use the NPY_NO_DEPRECATED_API mechanism, you need to #define it to
-the target API version of NumPy before #including any NumPy headers.
-If you want to confirm that your code is clean against 1.7, use::
-
- #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-
-On compilers which support a #warning mechanism, NumPy issues a
-compiler warning if you do not define the symbol NPY_NO_DEPRECATED_API.
-This way, the fact that there are deprecations will be flagged for
-third-party developers who may not have read the release notes closely.
+++ /dev/null
-Data Type API
-=============
-
-.. sectionauthor:: Travis E. Oliphant
-
-The standard array can have 24 different data types (and has some
-support for adding your own types). These data types all have an
-enumerated type, an enumerated type-character, and a corresponding
-array scalar Python type object (placed in a hierarchy). There are
-also standard C typedefs to make it easier to manipulate elements of
-the given data type. For the numeric types, there are also bit-width
-equivalent C typedefs and named typenumbers that make it easier to
-select the precision desired.
-
-.. warning::
-
- The names for the types in c code follows c naming conventions
- more closely. The Python names for these types follow Python
- conventions. Thus, :c:data:`NPY_FLOAT` picks up a 32-bit float in
- C, but :class:`numpy.float_` in Python corresponds to a 64-bit
- double. The bit-width names can be used in both Python and C for
- clarity.
-
-
-Enumerated Types
-----------------
-
-.. c:var:: NPY_TYPES
-
-There is a list of enumerated types defined providing the basic 24
-data types plus some useful generic names. Whenever the code requires
-a type number, one of these enumerated types is requested. The types
-are all called :c:data:`NPY_{NAME}`:
-
-.. c:var:: NPY_BOOL
-
- The enumeration value for the boolean type, stored as one byte.
- It may only be set to the values 0 and 1.
-
-.. c:var:: NPY_BYTE
-.. c:var:: NPY_INT8
-
- The enumeration value for an 8-bit/1-byte signed integer.
-
-.. c:var:: NPY_SHORT
-.. c:var:: NPY_INT16
-
- The enumeration value for a 16-bit/2-byte signed integer.
-
-.. c:var:: NPY_INT
-.. c:var:: NPY_INT32
-
- The enumeration value for a 32-bit/4-byte signed integer.
-
-.. c:var:: NPY_LONG
-
- Equivalent to either NPY_INT or NPY_LONGLONG, depending on the
- platform.
-
-.. c:var:: NPY_LONGLONG
-.. c:var:: NPY_INT64
-
- The enumeration value for a 64-bit/8-byte signed integer.
-
-.. c:var:: NPY_UBYTE
-.. c:var:: NPY_UINT8
-
- The enumeration value for an 8-bit/1-byte unsigned integer.
-
-.. c:var:: NPY_USHORT
-.. c:var:: NPY_UINT16
-
- The enumeration value for a 16-bit/2-byte unsigned integer.
-
-.. c:var:: NPY_UINT
-.. c:var:: NPY_UINT32
-
- The enumeration value for a 32-bit/4-byte unsigned integer.
-
-.. c:var:: NPY_ULONG
-
- Equivalent to either NPY_UINT or NPY_ULONGLONG, depending on the
- platform.
-
-.. c:var:: NPY_ULONGLONG
-.. c:var:: NPY_UINT64
-
- The enumeration value for a 64-bit/8-byte unsigned integer.
-
-.. c:var:: NPY_HALF
-.. c:var:: NPY_FLOAT16
-
- The enumeration value for a 16-bit/2-byte IEEE 754-2008 compatible floating
- point type.
-
-.. c:var:: NPY_FLOAT
-.. c:var:: NPY_FLOAT32
-
- The enumeration value for a 32-bit/4-byte IEEE 754 compatible floating
- point type.
-
-.. c:var:: NPY_DOUBLE
-.. c:var:: NPY_FLOAT64
-
- The enumeration value for a 64-bit/8-byte IEEE 754 compatible floating
- point type.
-
-.. c:var:: NPY_LONGDOUBLE
-
- The enumeration value for a platform-specific floating point type which is
- at least as large as NPY_DOUBLE, but larger on many platforms.
-
-.. c:var:: NPY_CFLOAT
-.. c:var:: NPY_COMPLEX64
-
- The enumeration value for a 64-bit/8-byte complex type made up of
- two NPY_FLOAT values.
-
-.. c:var:: NPY_CDOUBLE
-.. c:var:: NPY_COMPLEX128
-
- The enumeration value for a 128-bit/16-byte complex type made up of
- two NPY_DOUBLE values.
-
-.. c:var:: NPY_CLONGDOUBLE
-
- The enumeration value for a platform-specific complex floating point
- type which is made up of two NPY_LONGDOUBLE values.
-
-.. c:var:: NPY_DATETIME
-
- The enumeration value for a data type which holds dates or datetimes with
- a precision based on selectable date or time units.
-
-.. c:var:: NPY_TIMEDELTA
-
- The enumeration value for a data type which holds lengths of times in
- integers of selectable date or time units.
-
-.. c:var:: NPY_STRING
-
- The enumeration value for ASCII strings of a selectable size. The
- strings have a fixed maximum size within a given array.
-
-.. c:var:: NPY_UNICODE
-
- The enumeration value for UCS4 strings of a selectable size. The
- strings have a fixed maximum size within a given array.
-
-.. c:var:: NPY_OBJECT
-
- The enumeration value for references to arbitrary Python objects.
-
-.. c:var:: NPY_VOID
-
- Primarily used to hold struct dtypes, but can contain arbitrary
- binary data.
-
-Some useful aliases of the above types are
-
-.. c:var:: NPY_INTP
-
- The enumeration value for a signed integer type which is the same
- size as a (void \*) pointer. This is the type used by all
- arrays of indices.
-
-.. c:var:: NPY_UINTP
-
- The enumeration value for an unsigned integer type which is the
- same size as a (void \*) pointer.
-
-.. c:var:: NPY_MASK
-
- The enumeration value of the type used for masks, such as with
- the :c:data:`NPY_ITER_ARRAYMASK` iterator flag. This is equivalent
- to :c:data:`NPY_UINT8`.
-
-.. c:var:: NPY_DEFAULT_TYPE
-
- The default type to use when no dtype is explicitly specified, for
- example when calling np.zero(shape). This is equivalent to
- :c:data:`NPY_DOUBLE`.
-
-Other useful related constants are
-
-.. c:var:: NPY_NTYPES
-
- The total number of built-in NumPy types. The enumeration covers
- the range from 0 to NPY_NTYPES-1.
-
-.. c:var:: NPY_NOTYPE
-
- A signal value guaranteed not to be a valid type enumeration number.
-
-.. c:var:: NPY_USERDEF
-
- The start of type numbers used for Custom Data types.
-
-The various character codes indicating certain types are also part of
-an enumerated list. References to type characters (should they be
-needed at all) should always use these enumerations. The form of them
-is :c:data:`NPY_{NAME}LTR` where ``{NAME}`` can be
-
- **BOOL**, **BYTE**, **UBYTE**, **SHORT**, **USHORT**, **INT**,
- **UINT**, **LONG**, **ULONG**, **LONGLONG**, **ULONGLONG**,
- **HALF**, **FLOAT**, **DOUBLE**, **LONGDOUBLE**, **CFLOAT**,
- **CDOUBLE**, **CLONGDOUBLE**, **DATETIME**, **TIMEDELTA**,
- **OBJECT**, **STRING**, **VOID**
-
- **INTP**, **UINTP**
-
- **GENBOOL**, **SIGNED**, **UNSIGNED**, **FLOATING**, **COMPLEX**
-
-The latter group of ``{NAME}s`` corresponds to letters used in the array
-interface typestring specification.
-
-
-Defines
--------
-
-Max and min values for integers
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. c:var:: NPY_MAX_INT{bits}
-
-.. c:var:: NPY_MAX_UINT{bits}
-
-.. c:var:: NPY_MIN_INT{bits}
-
- These are defined for ``{bits}`` = 8, 16, 32, 64, 128, and 256 and provide
- the maximum (minimum) value of the corresponding (unsigned) integer
- type. Note: the actual integer type may not be available on all
- platforms (i.e. 128-bit and 256-bit integers are rare).
-
-.. c:var:: NPY_MIN_{type}
-
- This is defined for ``{type}`` = **BYTE**, **SHORT**, **INT**,
- **LONG**, **LONGLONG**, **INTP**
-
-.. c:var:: NPY_MAX_{type}
-
- This is defined for all defined for ``{type}`` = **BYTE**, **UBYTE**,
- **SHORT**, **USHORT**, **INT**, **UINT**, **LONG**, **ULONG**,
- **LONGLONG**, **ULONGLONG**, **INTP**, **UINTP**
-
-
-Number of bits in data types
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-All :c:data:`NPY_SIZEOF_{CTYPE}` constants have corresponding
-:c:data:`NPY_BITSOF_{CTYPE}` constants defined. The :c:data:`NPY_BITSOF_{CTYPE}`
-constants provide the number of bits in the data type. Specifically,
-the available ``{CTYPE}s`` are
-
- **BOOL**, **CHAR**, **SHORT**, **INT**, **LONG**,
- **LONGLONG**, **FLOAT**, **DOUBLE**, **LONGDOUBLE**
-
-
-Bit-width references to enumerated typenums
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-All of the numeric data types (integer, floating point, and complex)
-have constants that are defined to be a specific enumerated type
-number. Exactly which enumerated type a bit-width type refers to is
-platform dependent. In particular, the constants available are
-:c:data:`PyArray_{NAME}{BITS}` where ``{NAME}`` is **INT**, **UINT**,
-**FLOAT**, **COMPLEX** and ``{BITS}`` can be 8, 16, 32, 64, 80, 96, 128,
-160, 192, 256, and 512. Obviously not all bit-widths are available on
-all platforms for all the kinds of numeric types. Commonly 8-, 16-,
-32-, 64-bit integers; 32-, 64-bit floats; and 64-, 128-bit complex
-types are available.
-
-
-Integer that can hold a pointer
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The constants **NPY_INTP** and **NPY_UINTP** refer to an
-enumerated integer type that is large enough to hold a pointer on the
-platform. Index arrays should always be converted to **NPY_INTP**
-, because the dimension of the array is of type npy_intp.
-
-
-C-type names
-------------
-
-There are standard variable types for each of the numeric data types
-and the bool data type. Some of these are already available in the
-C-specification. You can create variables in extension code with these
-types.
-
-
-Boolean
-^^^^^^^
-
-.. c:type:: npy_bool
-
- unsigned char; The constants :c:data:`NPY_FALSE` and
- :c:data:`NPY_TRUE` are also defined.
-
-
-(Un)Signed Integer
-^^^^^^^^^^^^^^^^^^
-
-Unsigned versions of the integers can be defined by pre-pending a 'u'
-to the front of the integer name.
-
-.. c:type:: npy_(u)byte
-
- (unsigned) char
-
-.. c:type:: npy_short
-
- short
-
-.. c:type:: npy_ushort
-
- unsigned short
-
-.. c:type:: npy_uint
-
- unsigned int
-
-.. c:type:: npy_int
-
- int
-
-.. c:type:: npy_int16
-
- 16-bit integer
-
-.. c:type:: npy_uint16
-
- 16-bit unsigned integer
-
-.. c:type:: npy_int32
-
- 32-bit integer
-
-.. c:type:: npy_uint32
-
- 32-bit unsigned integer
-
-.. c:type:: npy_int64
-
- 64-bit integer
-
-.. c:type:: npy_uint64
-
- 64-bit unsigned integer
-
-.. c:type:: npy_(u)long
-
- (unsigned) long int
-
-.. c:type:: npy_(u)longlong
-
- (unsigned long long int)
-
-.. c:type:: npy_intp
-
- Py_intptr_t (an integer that is the size of a pointer on
- the platform).
-
-.. c:type:: npy_uintp
-
- unsigned Py_intptr_t (an integer that is the size of a pointer on
- the platform).
-
-
-(Complex) Floating point
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. c:type:: npy_half
-
- 16-bit float
-
-.. c:type:: npy_(c)float
-
- 32-bit float
-
-.. c:type:: npy_(c)double
-
- 64-bit double
-
-.. c:type:: npy_(c)longdouble
-
- long double
-
-complex types are structures with **.real** and **.imag** members (in
-that order).
-
-
-Bit-width names
-^^^^^^^^^^^^^^^
-
-There are also typedefs for signed integers, unsigned integers,
-floating point, and complex floating point types of specific bit-
-widths. The available type names are
-
- :c:type:`npy_int{bits}`, :c:type:`npy_uint{bits}`, :c:type:`npy_float{bits}`,
- and :c:type:`npy_complex{bits}`
-
-where ``{bits}`` is the number of bits in the type and can be **8**,
-**16**, **32**, **64**, 128, and 256 for integer types; 16, **32**
-, **64**, 80, 96, 128, and 256 for floating-point types; and 32,
-**64**, **128**, 160, 192, and 512 for complex-valued types. Which
-bit-widths are available is platform dependent. The bolded bit-widths
-are usually available on all platforms.
-
-
-Printf Formatting
------------------
-
-For help in printing, the following strings are defined as the correct
-format specifier in printf and related commands.
-
- :c:data:`NPY_LONGLONG_FMT`, :c:data:`NPY_ULONGLONG_FMT`,
- :c:data:`NPY_INTP_FMT`, :c:data:`NPY_UINTP_FMT`,
- :c:data:`NPY_LONGDOUBLE_FMT`
+++ /dev/null
-.. _c-api.generalized-ufuncs:
-
-==================================
-Generalized Universal Function API
-==================================
-
-There is a general need for looping over not only functions on scalars
-but also over functions on vectors (or arrays).
-This concept is realized in NumPy by generalizing the universal functions
-(ufuncs). In regular ufuncs, the elementary function is limited to
-element-by-element operations, whereas the generalized version (gufuncs)
-supports "sub-array" by "sub-array" operations. The Perl vector library PDL
-provides a similar functionality and its terms are re-used in the following.
-
-Each generalized ufunc has information associated with it that states
-what the "core" dimensionality of the inputs is, as well as the
-corresponding dimensionality of the outputs (the element-wise ufuncs
-have zero core dimensions). The list of the core dimensions for all
-arguments is called the "signature" of a ufunc. For example, the
-ufunc numpy.add has signature ``(),()->()`` defining two scalar inputs
-and one scalar output.
-
-Another example is the function ``inner1d(a, b)`` with a signature of
-``(i),(i)->()``. This applies the inner product along the last axis of
-each input, but keeps the remaining indices intact.
-For example, where ``a`` is of shape ``(3, 5, N)`` and ``b`` is of shape
-``(5, N)``, this will return an output of shape ``(3,5)``.
-The underlying elementary function is called ``3 * 5`` times. In the
-signature, we specify one core dimension ``(i)`` for each input and zero core
-dimensions ``()`` for the output, since it takes two 1-d arrays and
-returns a scalar. By using the same name ``i``, we specify that the two
-corresponding dimensions should be of the same size.
-
-The dimensions beyond the core dimensions are called "loop" dimensions. In
-the above example, this corresponds to ``(3, 5)``.
-
-The signature determines how the dimensions of each input/output array are
-split into core and loop dimensions:
-
-#. Each dimension in the signature is matched to a dimension of the
- corresponding passed-in array, starting from the end of the shape tuple.
- These are the core dimensions, and they must be present in the arrays, or
- an error will be raised.
-#. Core dimensions assigned to the same label in the signature (e.g. the
- ``i`` in ``inner1d``'s ``(i),(i)->()``) must have exactly matching sizes,
- no broadcasting is performed.
-#. The core dimensions are removed from all inputs and the remaining
- dimensions are broadcast together, defining the loop dimensions.
-#. The shape of each output is determined from the loop dimensions plus the
- output's core dimensions
-
-Typically, the size of all core dimensions in an output will be determined by
-the size of a core dimension with the same label in an input array. This is
-not a requirement, and it is possible to define a signature where a label
-comes up for the first time in an output, although some precautions must be
-taken when calling such a function. An example would be the function
-``euclidean_pdist(a)``, with signature ``(n,d)->(p)``, that given an array of
-``n`` ``d``-dimensional vectors, computes all unique pairwise Euclidean
-distances among them. The output dimension ``p`` must therefore be equal to
-``n * (n - 1) / 2``, but it is the caller's responsibility to pass in an
-output array of the right size. If the size of a core dimension of an output
-cannot be determined from a passed in input or output array, an error will be
-raised.
-
-Note: Prior to NumPy 1.10.0, less strict checks were in place: missing core
-dimensions were created by prepending 1's to the shape as necessary, core
-dimensions with the same label were broadcast together, and undetermined
-dimensions were created with size 1.
-
-
-Definitions
------------
-
-Elementary Function
- Each ufunc consists of an elementary function that performs the
- most basic operation on the smallest portion of array arguments
- (e.g. adding two numbers is the most basic operation in adding two
- arrays). The ufunc applies the elementary function multiple times
- on different parts of the arrays. The input/output of elementary
- functions can be vectors; e.g., the elementary function of inner1d
- takes two vectors as input.
-
-Signature
- A signature is a string describing the input/output dimensions of
- the elementary function of a ufunc. See section below for more
- details.
-
-Core Dimension
- The dimensionality of each input/output of an elementary function
- is defined by its core dimensions (zero core dimensions correspond
- to a scalar input/output). The core dimensions are mapped to the
- last dimensions of the input/output arrays.
-
-Dimension Name
- A dimension name represents a core dimension in the signature.
- Different dimensions may share a name, indicating that they are of
- the same size.
-
-Dimension Index
- A dimension index is an integer representing a dimension name. It
- enumerates the dimension names according to the order of the first
- occurrence of each name in the signature.
-
-.. _details-of-signature:
-
-Details of Signature
---------------------
-
-The signature defines "core" dimensionality of input and output
-variables, and thereby also defines the contraction of the
-dimensions. The signature is represented by a string of the
-following format:
-
-* Core dimensions of each input or output array are represented by a
- list of dimension names in parentheses, ``(i_1,...,i_N)``; a scalar
- input/output is denoted by ``()``. Instead of ``i_1``, ``i_2``,
- etc, one can use any valid Python variable name.
-* Dimension lists for different arguments are separated by ``","``.
- Input/output arguments are separated by ``"->"``.
-* If one uses the same dimension name in multiple locations, this
- enforces the same size of the corresponding dimensions.
-
-The formal syntax of signatures is as follows::
-
- <Signature> ::= <Input arguments> "->" <Output arguments>
- <Input arguments> ::= <Argument list>
- <Output arguments> ::= <Argument list>
- <Argument list> ::= nil | <Argument> | <Argument> "," <Argument list>
- <Argument> ::= "(" <Core dimension list> ")"
- <Core dimension list> ::= nil | <Core dimension> |
- <Core dimension> "," <Core dimension list>
- <Core dimension> ::= <Dimension name> <Dimension modifier>
- <Dimension name> ::= valid Python variable name | valid integer
- <Dimension modifier> ::= nil | "?"
-
-Notes:
-
-#. All quotes are for clarity.
-#. Unmodified core dimensions that share the same name must have the same size.
- Each dimension name typically corresponds to one level of looping in the
- elementary function's implementation.
-#. White spaces are ignored.
-#. An integer as a dimension name freezes that dimension to the value.
-#. If the name is suffixed with the "?" modifier, the dimension is a core
- dimension only if it exists on all inputs and outputs that share it;
- otherwise it is ignored (and replaced by a dimension of size 1 for the
- elementary function).
-
-Here are some examples of signatures:
-
-+-------------+----------------------------+-----------------------------------+
-| name | signature | common usage |
-+=============+============================+===================================+
-| add | ``(),()->()`` | binary ufunc |
-+-------------+----------------------------+-----------------------------------+
-| sum1d | ``(i)->()`` | reduction |
-+-------------+----------------------------+-----------------------------------+
-| inner1d | ``(i),(i)->()`` | vector-vector multiplication |
-+-------------+----------------------------+-----------------------------------+
-| matmat | ``(m,n),(n,p)->(m,p)`` | matrix multiplication |
-+-------------+----------------------------+-----------------------------------+
-| vecmat | ``(n),(n,p)->(p)`` | vector-matrix multiplication |
-+-------------+----------------------------+-----------------------------------+
-| matvec | ``(m,n),(n)->(m)`` | matrix-vector multiplication |
-+-------------+----------------------------+-----------------------------------+
-| matmul | ``(m?,n),(n,p?)->(m?,p?)`` | combination of the four above |
-+-------------+----------------------------+-----------------------------------+
-| outer_inner | ``(i,t),(j,t)->(i,j)`` | inner over the last dimension, |
-| | | outer over the second to last, |
-| | | and loop/broadcast over the rest. |
-+-------------+----------------------------+-----------------------------------+
-| cross1d | ``(3),(3)->(3)`` | cross product where the last |
-| | | dimension is frozen and must be 3 |
-+-------------+----------------------------+-----------------------------------+
-
-.. _frozen:
-
-The last is an instance of freezing a core dimension and can be used to
-improve ufunc performance
-
-C-API for implementing Elementary Functions
--------------------------------------------
-
-The current interface remains unchanged, and ``PyUFunc_FromFuncAndData``
-can still be used to implement (specialized) ufuncs, consisting of
-scalar elementary functions.
-
-One can use ``PyUFunc_FromFuncAndDataAndSignature`` to declare a more
-general ufunc. The argument list is the same as
-``PyUFunc_FromFuncAndData``, with an additional argument specifying the
-signature as C string.
-
-Furthermore, the callback function is of the same type as before,
-``void (*foo)(char **args, intp *dimensions, intp *steps, void *func)``.
-When invoked, ``args`` is a list of length ``nargs`` containing
-the data of all input/output arguments. For a scalar elementary
-function, ``steps`` is also of length ``nargs``, denoting the strides used
-for the arguments. ``dimensions`` is a pointer to a single integer
-defining the size of the axis to be looped over.
-
-For a non-trivial signature, ``dimensions`` will also contain the sizes
-of the core dimensions as well, starting at the second entry. Only
-one size is provided for each unique dimension name and the sizes are
-given according to the first occurrence of a dimension name in the
-signature.
-
-The first ``nargs`` elements of ``steps`` remain the same as for scalar
-ufuncs. The following elements contain the strides of all core
-dimensions for all arguments in order.
-
-For example, consider a ufunc with signature ``(i,j),(i)->()``. In
-this case, ``args`` will contain three pointers to the data of the
-input/output arrays ``a``, ``b``, ``c``. Furthermore, ``dimensions`` will be
-``[N, I, J]`` to define the size of ``N`` of the loop and the sizes ``I`` and ``J``
-for the core dimensions ``i`` and ``j``. Finally, ``steps`` will be
-``[a_N, b_N, c_N, a_i, a_j, b_i]``, containing all necessary strides.
+++ /dev/null
-Array Iterator API
-==================
-
-.. sectionauthor:: Mark Wiebe
-
-.. index::
- pair: iterator; C-API
- pair: C-API; iterator
-
-.. versionadded:: 1.6
-
-Array Iterator
---------------
-
-The array iterator encapsulates many of the key features in ufuncs,
-allowing user code to support features like output parameters,
-preservation of memory layouts, and buffering of data with the wrong
-alignment or type, without requiring difficult coding.
-
-This page documents the API for the iterator.
-The iterator is named ``NpyIter`` and functions are
-named ``NpyIter_*``.
-
-There is an :ref:`introductory guide to array iteration <arrays.nditer>`
-which may be of interest for those using this C API. In many instances,
-testing out ideas by creating the iterator in Python is a good idea
-before writing the C iteration code.
-
-Simple Iteration Example
-------------------------
-
-The best way to become familiar with the iterator is to look at its
-usage within the NumPy codebase itself. For example, here is a slightly
-tweaked version of the code for :c:func:`PyArray_CountNonzero`, which counts the
-number of non-zero elements in an array.
-
-.. code-block:: c
-
- npy_intp PyArray_CountNonzero(PyArrayObject* self)
- {
- /* Nonzero boolean function */
- PyArray_NonzeroFunc* nonzero = PyArray_DESCR(self)->f->nonzero;
-
- NpyIter* iter;
- NpyIter_IterNextFunc *iternext;
- char** dataptr;
- npy_intp nonzero_count;
- npy_intp* strideptr,* innersizeptr;
-
- /* Handle zero-sized arrays specially */
- if (PyArray_SIZE(self) == 0) {
- return 0;
- }
-
- /*
- * Create and use an iterator to count the nonzeros.
- * flag NPY_ITER_READONLY
- * - The array is never written to.
- * flag NPY_ITER_EXTERNAL_LOOP
- * - Inner loop is done outside the iterator for efficiency.
- * flag NPY_ITER_NPY_ITER_REFS_OK
- * - Reference types are acceptable.
- * order NPY_KEEPORDER
- * - Visit elements in memory order, regardless of strides.
- * This is good for performance when the specific order
- * elements are visited is unimportant.
- * casting NPY_NO_CASTING
- * - No casting is required for this operation.
- */
- iter = NpyIter_New(self, NPY_ITER_READONLY|
- NPY_ITER_EXTERNAL_LOOP|
- NPY_ITER_REFS_OK,
- NPY_KEEPORDER, NPY_NO_CASTING,
- NULL);
- if (iter == NULL) {
- return -1;
- }
-
- /*
- * The iternext function gets stored in a local variable
- * so it can be called repeatedly in an efficient manner.
- */
- iternext = NpyIter_GetIterNext(iter, NULL);
- if (iternext == NULL) {
- NpyIter_Deallocate(iter);
- return -1;
- }
- /* The location of the data pointer which the iterator may update */
- dataptr = NpyIter_GetDataPtrArray(iter);
- /* The location of the stride which the iterator may update */
- strideptr = NpyIter_GetInnerStrideArray(iter);
- /* The location of the inner loop size which the iterator may update */
- innersizeptr = NpyIter_GetInnerLoopSizePtr(iter);
-
- nonzero_count = 0;
- do {
- /* Get the inner loop data/stride/count values */
- char* data = *dataptr;
- npy_intp stride = *strideptr;
- npy_intp count = *innersizeptr;
-
- /* This is a typical inner loop for NPY_ITER_EXTERNAL_LOOP */
- while (count--) {
- if (nonzero(data, self)) {
- ++nonzero_count;
- }
- data += stride;
- }
-
- /* Increment the iterator to the next inner loop */
- } while(iternext(iter));
-
- NpyIter_Deallocate(iter);
-
- return nonzero_count;
- }
-
-Simple Multi-Iteration Example
-------------------------------
-
-Here is a simple copy function using the iterator. The ``order`` parameter
-is used to control the memory layout of the allocated result, typically
-:c:data:`NPY_KEEPORDER` is desired.
-
-.. code-block:: c
-
- PyObject *CopyArray(PyObject *arr, NPY_ORDER order)
- {
- NpyIter *iter;
- NpyIter_IterNextFunc *iternext;
- PyObject *op[2], *ret;
- npy_uint32 flags;
- npy_uint32 op_flags[2];
- npy_intp itemsize, *innersizeptr, innerstride;
- char **dataptrarray;
-
- /*
- * No inner iteration - inner loop is handled by CopyArray code
- */
- flags = NPY_ITER_EXTERNAL_LOOP;
- /*
- * Tell the constructor to automatically allocate the output.
- * The data type of the output will match that of the input.
- */
- op[0] = arr;
- op[1] = NULL;
- op_flags[0] = NPY_ITER_READONLY;
- op_flags[1] = NPY_ITER_WRITEONLY | NPY_ITER_ALLOCATE;
-
- /* Construct the iterator */
- iter = NpyIter_MultiNew(2, op, flags, order, NPY_NO_CASTING,
- op_flags, NULL);
- if (iter == NULL) {
- return NULL;
- }
-
- /*
- * Make a copy of the iternext function pointer and
- * a few other variables the inner loop needs.
- */
- iternext = NpyIter_GetIterNext(iter, NULL);
- innerstride = NpyIter_GetInnerStrideArray(iter)[0];
- itemsize = NpyIter_GetDescrArray(iter)[0]->elsize;
- /*
- * The inner loop size and data pointers may change during the
- * loop, so just cache the addresses.
- */
- innersizeptr = NpyIter_GetInnerLoopSizePtr(iter);
- dataptrarray = NpyIter_GetDataPtrArray(iter);
-
- /*
- * Note that because the iterator allocated the output,
- * it matches the iteration order and is packed tightly,
- * so we don't need to check it like the input.
- */
- if (innerstride == itemsize) {
- do {
- memcpy(dataptrarray[1], dataptrarray[0],
- itemsize * (*innersizeptr));
- } while (iternext(iter));
- } else {
- /* For efficiency, should specialize this based on item size... */
- npy_intp i;
- do {
- npy_intp size = *innersizeptr;
- char *src = dataptrarray[0], *dst = dataptrarray[1];
- for(i = 0; i < size; i++, src += innerstride, dst += itemsize) {
- memcpy(dst, src, itemsize);
- }
- } while (iternext(iter));
- }
-
- /* Get the result from the iterator object array */
- ret = NpyIter_GetOperandArray(iter)[1];
- Py_INCREF(ret);
-
- if (NpyIter_Deallocate(iter) != NPY_SUCCEED) {
- Py_DECREF(ret);
- return NULL;
- }
-
- return ret;
- }
-
-
-Iterator Data Types
----------------------
-
-The iterator layout is an internal detail, and user code only sees
-an incomplete struct.
-
-.. c:type:: NpyIter
-
- This is an opaque pointer type for the iterator. Access to its contents
- can only be done through the iterator API.
-
-.. c:type:: NpyIter_Type
-
- This is the type which exposes the iterator to Python. Currently, no
- API is exposed which provides access to the values of a Python-created
- iterator. If an iterator is created in Python, it must be used in Python
- and vice versa. Such an API will likely be created in a future version.
-
-.. c:type:: NpyIter_IterNextFunc
-
- This is a function pointer for the iteration loop, returned by
- :c:func:`NpyIter_GetIterNext`.
-
-.. c:type:: NpyIter_GetMultiIndexFunc
-
- This is a function pointer for getting the current iterator multi-index,
- returned by :c:func:`NpyIter_GetGetMultiIndex`.
-
-Construction and Destruction
-----------------------------
-
-.. c:function:: NpyIter* NpyIter_New( \
- PyArrayObject* op, npy_uint32 flags, NPY_ORDER order, \
- NPY_CASTING casting, PyArray_Descr* dtype)
-
- Creates an iterator for the given numpy array object ``op``.
-
- Flags that may be passed in ``flags`` are any combination
- of the global and per-operand flags documented in
- :c:func:`NpyIter_MultiNew`, except for :c:data:`NPY_ITER_ALLOCATE`.
-
- Any of the :c:type:`NPY_ORDER` enum values may be passed to ``order``. For
- efficient iteration, :c:type:`NPY_KEEPORDER` is the best option, and
- the other orders enforce the particular iteration pattern.
-
- Any of the :c:type:`NPY_CASTING` enum values may be passed to ``casting``.
- The values include :c:data:`NPY_NO_CASTING`, :c:data:`NPY_EQUIV_CASTING`,
- :c:data:`NPY_SAFE_CASTING`, :c:data:`NPY_SAME_KIND_CASTING`, and
- :c:data:`NPY_UNSAFE_CASTING`. To allow the casts to occur, copying or
- buffering must also be enabled.
-
- If ``dtype`` isn't ``NULL``, then it requires that data type.
- If copying is allowed, it will make a temporary copy if the data
- is castable. If :c:data:`NPY_ITER_UPDATEIFCOPY` is enabled, it will
- also copy the data back with another cast upon iterator destruction.
-
- Returns NULL if there is an error, otherwise returns the allocated
- iterator.
-
- To make an iterator similar to the old iterator, this should work.
-
- .. code-block:: c
-
- iter = NpyIter_New(op, NPY_ITER_READWRITE,
- NPY_CORDER, NPY_NO_CASTING, NULL);
-
- If you want to edit an array with aligned ``double`` code,
- but the order doesn't matter, you would use this.
-
- .. code-block:: c
-
- dtype = PyArray_DescrFromType(NPY_DOUBLE);
- iter = NpyIter_New(op, NPY_ITER_READWRITE|
- NPY_ITER_BUFFERED|
- NPY_ITER_NBO|
- NPY_ITER_ALIGNED,
- NPY_KEEPORDER,
- NPY_SAME_KIND_CASTING,
- dtype);
- Py_DECREF(dtype);
-
-.. c:function:: NpyIter* NpyIter_MultiNew( \
- npy_intp nop, PyArrayObject** op, npy_uint32 flags, NPY_ORDER order, \
- NPY_CASTING casting, npy_uint32* op_flags, PyArray_Descr** op_dtypes)
-
- Creates an iterator for broadcasting the ``nop`` array objects provided
- in ``op``, using regular NumPy broadcasting rules.
-
- Any of the :c:type:`NPY_ORDER` enum values may be passed to ``order``. For
- efficient iteration, :c:data:`NPY_KEEPORDER` is the best option, and the
- other orders enforce the particular iteration pattern. When using
- :c:data:`NPY_KEEPORDER`, if you also want to ensure that the iteration is
- not reversed along an axis, you should pass the flag
- :c:data:`NPY_ITER_DONT_NEGATE_STRIDES`.
-
- Any of the :c:type:`NPY_CASTING` enum values may be passed to ``casting``.
- The values include :c:data:`NPY_NO_CASTING`, :c:data:`NPY_EQUIV_CASTING`,
- :c:data:`NPY_SAFE_CASTING`, :c:data:`NPY_SAME_KIND_CASTING`, and
- :c:data:`NPY_UNSAFE_CASTING`. To allow the casts to occur, copying or
- buffering must also be enabled.
-
- If ``op_dtypes`` isn't ``NULL``, it specifies a data type or ``NULL``
- for each ``op[i]``.
-
- Returns NULL if there is an error, otherwise returns the allocated
- iterator.
-
- Flags that may be passed in ``flags``, applying to the whole
- iterator, are:
-
- .. c:var:: NPY_ITER_C_INDEX
-
- Causes the iterator to track a raveled flat index matching C
- order. This option cannot be used with :c:data:`NPY_ITER_F_INDEX`.
-
- .. c:var:: NPY_ITER_F_INDEX
-
- Causes the iterator to track a raveled flat index matching Fortran
- order. This option cannot be used with :c:data:`NPY_ITER_C_INDEX`.
-
- .. c:var:: NPY_ITER_MULTI_INDEX
-
- Causes the iterator to track a multi-index.
- This prevents the iterator from coalescing axes to
- produce bigger inner loops. If the loop is also not buffered
- and no index is being tracked (`NpyIter_RemoveAxis` can be called),
- then the iterator size can be ``-1`` to indicate that the iterator
- is too large. This can happen due to complex broadcasting and
- will result in errors being created when the setting the iterator
- range, removing the multi index, or getting the next function.
- However, it is possible to remove axes again and use the iterator
- normally if the size is small enough after removal.
-
- .. c:var:: NPY_ITER_EXTERNAL_LOOP
-
- Causes the iterator to skip iteration of the innermost
- loop, requiring the user of the iterator to handle it.
-
- This flag is incompatible with :c:data:`NPY_ITER_C_INDEX`,
- :c:data:`NPY_ITER_F_INDEX`, and :c:data:`NPY_ITER_MULTI_INDEX`.
-
- .. c:var:: NPY_ITER_DONT_NEGATE_STRIDES
-
- This only affects the iterator when :c:type:`NPY_KEEPORDER` is
- specified for the order parameter. By default with
- :c:type:`NPY_KEEPORDER`, the iterator reverses axes which have
- negative strides, so that memory is traversed in a forward
- direction. This disables this step. Use this flag if you
- want to use the underlying memory-ordering of the axes,
- but don't want an axis reversed. This is the behavior of
- ``numpy.ravel(a, order='K')``, for instance.
-
- .. c:var:: NPY_ITER_COMMON_DTYPE
-
- Causes the iterator to convert all the operands to a common
- data type, calculated based on the ufunc type promotion rules.
- Copying or buffering must be enabled.
-
- If the common data type is known ahead of time, don't use this
- flag. Instead, set the requested dtype for all the operands.
-
- .. c:var:: NPY_ITER_REFS_OK
-
- Indicates that arrays with reference types (object
- arrays or structured arrays containing an object type)
- may be accepted and used in the iterator. If this flag
- is enabled, the caller must be sure to check whether
- :c:func:`NpyIter_IterationNeedsAPI(iter)` is true, in which case
- it may not release the GIL during iteration.
-
- .. c:var:: NPY_ITER_ZEROSIZE_OK
-
- Indicates that arrays with a size of zero should be permitted.
- Since the typical iteration loop does not naturally work with
- zero-sized arrays, you must check that the IterSize is larger
- than zero before entering the iteration loop.
- Currently only the operands are checked, not a forced shape.
-
- .. c:var:: NPY_ITER_REDUCE_OK
-
- Permits writeable operands with a dimension with zero
- stride and size greater than one. Note that such operands
- must be read/write.
-
- When buffering is enabled, this also switches to a special
- buffering mode which reduces the loop length as necessary to
- not trample on values being reduced.
-
- Note that if you want to do a reduction on an automatically
- allocated output, you must use :c:func:`NpyIter_GetOperandArray`
- to get its reference, then set every value to the reduction
- unit before doing the iteration loop. In the case of a
- buffered reduction, this means you must also specify the
- flag :c:data:`NPY_ITER_DELAY_BUFALLOC`, then reset the iterator
- after initializing the allocated operand to prepare the
- buffers.
-
- .. c:var:: NPY_ITER_RANGED
-
- Enables support for iteration of sub-ranges of the full
- ``iterindex`` range ``[0, NpyIter_IterSize(iter))``. Use
- the function :c:func:`NpyIter_ResetToIterIndexRange` to specify
- a range for iteration.
-
- This flag can only be used with :c:data:`NPY_ITER_EXTERNAL_LOOP`
- when :c:data:`NPY_ITER_BUFFERED` is enabled. This is because
- without buffering, the inner loop is always the size of the
- innermost iteration dimension, and allowing it to get cut up
- would require special handling, effectively making it more
- like the buffered version.
-
- .. c:var:: NPY_ITER_BUFFERED
-
- Causes the iterator to store buffering data, and use buffering
- to satisfy data type, alignment, and byte-order requirements.
- To buffer an operand, do not specify the :c:data:`NPY_ITER_COPY`
- or :c:data:`NPY_ITER_UPDATEIFCOPY` flags, because they will
- override buffering. Buffering is especially useful for Python
- code using the iterator, allowing for larger chunks
- of data at once to amortize the Python interpreter overhead.
-
- If used with :c:data:`NPY_ITER_EXTERNAL_LOOP`, the inner loop
- for the caller may get larger chunks than would be possible
- without buffering, because of how the strides are laid out.
-
- Note that if an operand is given the flag :c:data:`NPY_ITER_COPY`
- or :c:data:`NPY_ITER_UPDATEIFCOPY`, a copy will be made in preference
- to buffering. Buffering will still occur when the array was
- broadcast so elements need to be duplicated to get a constant
- stride.
-
- In normal buffering, the size of each inner loop is equal
- to the buffer size, or possibly larger if
- :c:data:`NPY_ITER_GROWINNER` is specified. If
- :c:data:`NPY_ITER_REDUCE_OK` is enabled and a reduction occurs,
- the inner loops may become smaller depending
- on the structure of the reduction.
-
- .. c:var:: NPY_ITER_GROWINNER
-
- When buffering is enabled, this allows the size of the inner
- loop to grow when buffering isn't necessary. This option
- is best used if you're doing a straight pass through all the
- data, rather than anything with small cache-friendly arrays
- of temporary values for each inner loop.
-
- .. c:var:: NPY_ITER_DELAY_BUFALLOC
-
- When buffering is enabled, this delays allocation of the
- buffers until :c:func:`NpyIter_Reset` or another reset function is
- called. This flag exists to avoid wasteful copying of
- buffer data when making multiple copies of a buffered
- iterator for multi-threaded iteration.
-
- Another use of this flag is for setting up reduction operations.
- After the iterator is created, and a reduction output
- is allocated automatically by the iterator (be sure to use
- READWRITE access), its value may be initialized to the reduction
- unit. Use :c:func:`NpyIter_GetOperandArray` to get the object.
- Then, call :c:func:`NpyIter_Reset` to allocate and fill the buffers
- with their initial values.
-
- .. c:var:: NPY_ITER_COPY_IF_OVERLAP
-
- If any write operand has overlap with any read operand, eliminate all
- overlap by making temporary copies (enabling UPDATEIFCOPY for write
- operands, if necessary). A pair of operands has overlap if there is
- a memory address that contains data common to both arrays.
-
- Because exact overlap detection has exponential runtime
- in the number of dimensions, the decision is made based
- on heuristics, which has false positives (needless copies in unusual
- cases) but has no false negatives.
-
- If any read/write overlap exists, this flag ensures the result of the
- operation is the same as if all operands were copied.
- In cases where copies would need to be made, **the result of the
- computation may be undefined without this flag!**
-
- Flags that may be passed in ``op_flags[i]``, where ``0 <= i < nop``:
-
- .. c:var:: NPY_ITER_READWRITE
- .. c:var:: NPY_ITER_READONLY
- .. c:var:: NPY_ITER_WRITEONLY
-
- Indicate how the user of the iterator will read or write
- to ``op[i]``. Exactly one of these flags must be specified
- per operand. Using ``NPY_ITER_READWRITE`` or ``NPY_ITER_WRITEONLY``
- for a user-provided operand may trigger `WRITEBACKIFCOPY``
- semantics. The data will be written back to the original array
- when ``NpyIter_Deallocate`` is called.
-
- .. c:var:: NPY_ITER_COPY
-
- Allow a copy of ``op[i]`` to be made if it does not
- meet the data type or alignment requirements as specified
- by the constructor flags and parameters.
-
- .. c:var:: NPY_ITER_UPDATEIFCOPY
-
- Triggers :c:data:`NPY_ITER_COPY`, and when an array operand
- is flagged for writing and is copied, causes the data
- in a copy to be copied back to ``op[i]`` when
- ``NpyIter_Deallocate`` is called.
-
- If the operand is flagged as write-only and a copy is needed,
- an uninitialized temporary array will be created and then copied
- to back to ``op[i]`` on calling ``NpyIter_Deallocate``, instead of
- doing the unnecessary copy operation.
-
- .. c:var:: NPY_ITER_NBO
- .. c:var:: NPY_ITER_ALIGNED
- .. c:var:: NPY_ITER_CONTIG
-
- Causes the iterator to provide data for ``op[i]``
- that is in native byte order, aligned according to
- the dtype requirements, contiguous, or any combination.
-
- By default, the iterator produces pointers into the
- arrays provided, which may be aligned or unaligned, and
- with any byte order. If copying or buffering is not
- enabled and the operand data doesn't satisfy the constraints,
- an error will be raised.
-
- The contiguous constraint applies only to the inner loop,
- successive inner loops may have arbitrary pointer changes.
-
- If the requested data type is in non-native byte order,
- the NBO flag overrides it and the requested data type is
- converted to be in native byte order.
-
- .. c:var:: NPY_ITER_ALLOCATE
-
- This is for output arrays, and requires that the flag
- :c:data:`NPY_ITER_WRITEONLY` or :c:data:`NPY_ITER_READWRITE`
- be set. If ``op[i]`` is NULL, creates a new array with
- the final broadcast dimensions, and a layout matching
- the iteration order of the iterator.
-
- When ``op[i]`` is NULL, the requested data type
- ``op_dtypes[i]`` may be NULL as well, in which case it is
- automatically generated from the dtypes of the arrays which
- are flagged as readable. The rules for generating the dtype
- are the same is for UFuncs. Of special note is handling
- of byte order in the selected dtype. If there is exactly
- one input, the input's dtype is used as is. Otherwise,
- if more than one input dtypes are combined together, the
- output will be in native byte order.
-
- After being allocated with this flag, the caller may retrieve
- the new array by calling :c:func:`NpyIter_GetOperandArray` and
- getting the i-th object in the returned C array. The caller
- must call Py_INCREF on it to claim a reference to the array.
-
- .. c:var:: NPY_ITER_NO_SUBTYPE
-
- For use with :c:data:`NPY_ITER_ALLOCATE`, this flag disables
- allocating an array subtype for the output, forcing
- it to be a straight ndarray.
-
- TODO: Maybe it would be better to introduce a function
- ``NpyIter_GetWrappedOutput`` and remove this flag?
-
- .. c:var:: NPY_ITER_NO_BROADCAST
-
- Ensures that the input or output matches the iteration
- dimensions exactly.
-
- .. c:var:: NPY_ITER_ARRAYMASK
-
- .. versionadded:: 1.7
-
- Indicates that this operand is the mask to use for
- selecting elements when writing to operands which have
- the :c:data:`NPY_ITER_WRITEMASKED` flag applied to them.
- Only one operand may have :c:data:`NPY_ITER_ARRAYMASK` flag
- applied to it.
-
- The data type of an operand with this flag should be either
- :c:data:`NPY_BOOL`, :c:data:`NPY_MASK`, or a struct dtype
- whose fields are all valid mask dtypes. In the latter case,
- it must match up with a struct operand being WRITEMASKED,
- as it is specifying a mask for each field of that array.
-
- This flag only affects writing from the buffer back to
- the array. This means that if the operand is also
- :c:data:`NPY_ITER_READWRITE` or :c:data:`NPY_ITER_WRITEONLY`,
- code doing iteration can write to this operand to
- control which elements will be untouched and which ones will be
- modified. This is useful when the mask should be a combination
- of input masks.
-
- .. c:var:: NPY_ITER_WRITEMASKED
-
- .. versionadded:: 1.7
-
- This array is the mask for all `writemasked <numpy.nditer>`
- operands. Code uses the ``writemasked`` flag which indicates
- that only elements where the chosen ARRAYMASK operand is True
- will be written to. In general, the iterator does not enforce
- this, it is up to the code doing the iteration to follow that
- promise.
-
- When ``writemasked`` flag is used, and this operand is buffered,
- this changes how data is copied from the buffer into the array.
- A masked copying routine is used, which only copies the
- elements in the buffer for which ``writemasked``
- returns true from the corresponding element in the ARRAYMASK
- operand.
-
- .. c:var:: NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE
-
- In memory overlap checks, assume that operands with
- ``NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE`` enabled are accessed only
- in the iterator order.
-
- This enables the iterator to reason about data dependency,
- possibly avoiding unnecessary copies.
-
- This flag has effect only if ``NPY_ITER_COPY_IF_OVERLAP`` is enabled
- on the iterator.
-
-.. c:function:: NpyIter* NpyIter_AdvancedNew( \
- npy_intp nop, PyArrayObject** op, npy_uint32 flags, NPY_ORDER order, \
- NPY_CASTING casting, npy_uint32* op_flags, PyArray_Descr** op_dtypes, \
- int oa_ndim, int** op_axes, npy_intp const* itershape, npy_intp buffersize)
-
- Extends :c:func:`NpyIter_MultiNew` with several advanced options providing
- more control over broadcasting and buffering.
-
- If -1/NULL values are passed to ``oa_ndim``, ``op_axes``, ``itershape``,
- and ``buffersize``, it is equivalent to :c:func:`NpyIter_MultiNew`.
-
- The parameter ``oa_ndim``, when not zero or -1, specifies the number of
- dimensions that will be iterated with customized broadcasting.
- If it is provided, ``op_axes`` must and ``itershape`` can also be provided.
- The ``op_axes`` parameter let you control in detail how the
- axes of the operand arrays get matched together and iterated.
- In ``op_axes``, you must provide an array of ``nop`` pointers
- to ``oa_ndim``-sized arrays of type ``npy_intp``. If an entry
- in ``op_axes`` is NULL, normal broadcasting rules will apply.
- In ``op_axes[j][i]`` is stored either a valid axis of ``op[j]``, or
- -1 which means ``newaxis``. Within each ``op_axes[j]`` array, axes
- may not be repeated. The following example is how normal broadcasting
- applies to a 3-D array, a 2-D array, a 1-D array and a scalar.
-
- **Note**: Before NumPy 1.8 ``oa_ndim == 0` was used for signalling that
- that ``op_axes`` and ``itershape`` are unused. This is deprecated and
- should be replaced with -1. Better backward compatibility may be
- achieved by using :c:func:`NpyIter_MultiNew` for this case.
-
- .. code-block:: c
-
- int oa_ndim = 3; /* # iteration axes */
- int op0_axes[] = {0, 1, 2}; /* 3-D operand */
- int op1_axes[] = {-1, 0, 1}; /* 2-D operand */
- int op2_axes[] = {-1, -1, 0}; /* 1-D operand */
- int op3_axes[] = {-1, -1, -1} /* 0-D (scalar) operand */
- int* op_axes[] = {op0_axes, op1_axes, op2_axes, op3_axes};
-
- The ``itershape`` parameter allows you to force the iterator
- to have a specific iteration shape. It is an array of length
- ``oa_ndim``. When an entry is negative, its value is determined
- from the operands. This parameter allows automatically allocated
- outputs to get additional dimensions which don't match up with
- any dimension of an input.
-
- If ``buffersize`` is zero, a default buffer size is used,
- otherwise it specifies how big of a buffer to use. Buffers
- which are powers of 2 such as 4096 or 8192 are recommended.
-
- Returns NULL if there is an error, otherwise returns the allocated
- iterator.
-
-.. c:function:: NpyIter* NpyIter_Copy(NpyIter* iter)
-
- Makes a copy of the given iterator. This function is provided
- primarily to enable multi-threaded iteration of the data.
-
- *TODO*: Move this to a section about multithreaded iteration.
-
- The recommended approach to multithreaded iteration is to
- first create an iterator with the flags
- :c:data:`NPY_ITER_EXTERNAL_LOOP`, :c:data:`NPY_ITER_RANGED`,
- :c:data:`NPY_ITER_BUFFERED`, :c:data:`NPY_ITER_DELAY_BUFALLOC`, and
- possibly :c:data:`NPY_ITER_GROWINNER`. Create a copy of this iterator
- for each thread (minus one for the first iterator). Then, take
- the iteration index range ``[0, NpyIter_GetIterSize(iter))`` and
- split it up into tasks, for example using a TBB parallel_for loop.
- When a thread gets a task to execute, it then uses its copy of
- the iterator by calling :c:func:`NpyIter_ResetToIterIndexRange` and
- iterating over the full range.
-
- When using the iterator in multi-threaded code or in code not
- holding the Python GIL, care must be taken to only call functions
- which are safe in that context. :c:func:`NpyIter_Copy` cannot be safely
- called without the Python GIL, because it increments Python
- references. The ``Reset*`` and some other functions may be safely
- called by passing in the ``errmsg`` parameter as non-NULL, so that
- the functions will pass back errors through it instead of setting
- a Python exception.
-
- :c:func:`NpyIter_Deallocate` must be called for each copy.
-
-.. c:function:: int NpyIter_RemoveAxis(NpyIter* iter, int axis)``
-
- Removes an axis from iteration. This requires that
- :c:data:`NPY_ITER_MULTI_INDEX` was set for iterator creation, and does
- not work if buffering is enabled or an index is being tracked. This
- function also resets the iterator to its initial state.
-
- This is useful for setting up an accumulation loop, for example.
- The iterator can first be created with all the dimensions, including
- the accumulation axis, so that the output gets created correctly.
- Then, the accumulation axis can be removed, and the calculation
- done in a nested fashion.
-
- **WARNING**: This function may change the internal memory layout of
- the iterator. Any cached functions or pointers from the iterator
- must be retrieved again! The iterator range will be reset as well.
-
- Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
-
-
-.. c:function:: int NpyIter_RemoveMultiIndex(NpyIter* iter)
-
- If the iterator is tracking a multi-index, this strips support for them,
- and does further iterator optimizations that are possible if multi-indices
- are not needed. This function also resets the iterator to its initial
- state.
-
- **WARNING**: This function may change the internal memory layout of
- the iterator. Any cached functions or pointers from the iterator
- must be retrieved again!
-
- After calling this function, :c:func:`NpyIter_HasMultiIndex(iter)` will
- return false.
-
- Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
-
-.. c:function:: int NpyIter_EnableExternalLoop(NpyIter* iter)
-
- If :c:func:`NpyIter_RemoveMultiIndex` was called, you may want to enable the
- flag :c:data:`NPY_ITER_EXTERNAL_LOOP`. This flag is not permitted
- together with :c:data:`NPY_ITER_MULTI_INDEX`, so this function is provided
- to enable the feature after :c:func:`NpyIter_RemoveMultiIndex` is called.
- This function also resets the iterator to its initial state.
-
- **WARNING**: This function changes the internal logic of the iterator.
- Any cached functions or pointers from the iterator must be retrieved
- again!
-
- Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
-
-.. c:function:: int NpyIter_Deallocate(NpyIter* iter)
-
- Deallocates the iterator object and resolves any needed writebacks.
-
- Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
-
-.. c:function:: int NpyIter_Reset(NpyIter* iter, char** errmsg)
-
- Resets the iterator back to its initial state, at the beginning
- of the iteration range.
-
- Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. If errmsg is non-NULL,
- no Python exception is set when ``NPY_FAIL`` is returned.
- Instead, \*errmsg is set to an error message. When errmsg is
- non-NULL, the function may be safely called without holding
- the Python GIL.
-
-.. c:function:: int NpyIter_ResetToIterIndexRange( \
- NpyIter* iter, npy_intp istart, npy_intp iend, char** errmsg)
-
- Resets the iterator and restricts it to the ``iterindex`` range
- ``[istart, iend)``. See :c:func:`NpyIter_Copy` for an explanation of
- how to use this for multi-threaded iteration. This requires that
- the flag :c:data:`NPY_ITER_RANGED` was passed to the iterator constructor.
-
- If you want to reset both the ``iterindex`` range and the base
- pointers at the same time, you can do the following to avoid
- extra buffer copying (be sure to add the return code error checks
- when you copy this code).
-
- .. code-block:: c
-
- /* Set to a trivial empty range */
- NpyIter_ResetToIterIndexRange(iter, 0, 0);
- /* Set the base pointers */
- NpyIter_ResetBasePointers(iter, baseptrs);
- /* Set to the desired range */
- NpyIter_ResetToIterIndexRange(iter, istart, iend);
-
- Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. If errmsg is non-NULL,
- no Python exception is set when ``NPY_FAIL`` is returned.
- Instead, \*errmsg is set to an error message. When errmsg is
- non-NULL, the function may be safely called without holding
- the Python GIL.
-
-.. c:function:: int NpyIter_ResetBasePointers( \
- NpyIter *iter, char** baseptrs, char** errmsg)
-
- Resets the iterator back to its initial state, but using the values
- in ``baseptrs`` for the data instead of the pointers from the arrays
- being iterated. This functions is intended to be used, together with
- the ``op_axes`` parameter, by nested iteration code with two or more
- iterators.
-
- Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. If errmsg is non-NULL,
- no Python exception is set when ``NPY_FAIL`` is returned.
- Instead, \*errmsg is set to an error message. When errmsg is
- non-NULL, the function may be safely called without holding
- the Python GIL.
-
- *TODO*: Move the following into a special section on nested iterators.
-
- Creating iterators for nested iteration requires some care. All
- the iterator operands must match exactly, or the calls to
- :c:func:`NpyIter_ResetBasePointers` will be invalid. This means that
- automatic copies and output allocation should not be used haphazardly.
- It is possible to still use the automatic data conversion and casting
- features of the iterator by creating one of the iterators with
- all the conversion parameters enabled, then grabbing the allocated
- operands with the :c:func:`NpyIter_GetOperandArray` function and passing
- them into the constructors for the rest of the iterators.
-
- **WARNING**: When creating iterators for nested iteration,
- the code must not use a dimension more than once in the different
- iterators. If this is done, nested iteration will produce
- out-of-bounds pointers during iteration.
-
- **WARNING**: When creating iterators for nested iteration, buffering
- can only be applied to the innermost iterator. If a buffered iterator
- is used as the source for ``baseptrs``, it will point into a small buffer
- instead of the array and the inner iteration will be invalid.
-
- The pattern for using nested iterators is as follows.
-
- .. code-block:: c
-
- NpyIter *iter1, *iter1;
- NpyIter_IterNextFunc *iternext1, *iternext2;
- char **dataptrs1;
-
- /*
- * With the exact same operands, no copies allowed, and
- * no axis in op_axes used both in iter1 and iter2.
- * Buffering may be enabled for iter2, but not for iter1.
- */
- iter1 = ...; iter2 = ...;
-
- iternext1 = NpyIter_GetIterNext(iter1);
- iternext2 = NpyIter_GetIterNext(iter2);
- dataptrs1 = NpyIter_GetDataPtrArray(iter1);
-
- do {
- NpyIter_ResetBasePointers(iter2, dataptrs1);
- do {
- /* Use the iter2 values */
- } while (iternext2(iter2));
- } while (iternext1(iter1));
-
-.. c:function:: int NpyIter_GotoMultiIndex(NpyIter* iter, npy_intp const* multi_index)
-
- Adjusts the iterator to point to the ``ndim`` indices
- pointed to by ``multi_index``. Returns an error if a multi-index
- is not being tracked, the indices are out of bounds,
- or inner loop iteration is disabled.
-
- Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
-
-.. c:function:: int NpyIter_GotoIndex(NpyIter* iter, npy_intp index)
-
- Adjusts the iterator to point to the ``index`` specified.
- If the iterator was constructed with the flag
- :c:data:`NPY_ITER_C_INDEX`, ``index`` is the C-order index,
- and if the iterator was constructed with the flag
- :c:data:`NPY_ITER_F_INDEX`, ``index`` is the Fortran-order
- index. Returns an error if there is no index being tracked,
- the index is out of bounds, or inner loop iteration is disabled.
-
- Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
-
-.. c:function:: npy_intp NpyIter_GetIterSize(NpyIter* iter)
-
- Returns the number of elements being iterated. This is the product
- of all the dimensions in the shape. When a multi index is being tracked
- (and `NpyIter_RemoveAxis` may be called) the size may be ``-1`` to
- indicate an iterator is too large. Such an iterator is invalid, but
- may become valid after `NpyIter_RemoveAxis` is called. It is not
- necessary to check for this case.
-
-.. c:function:: npy_intp NpyIter_GetIterIndex(NpyIter* iter)
-
- Gets the ``iterindex`` of the iterator, which is an index matching
- the iteration order of the iterator.
-
-.. c:function:: void NpyIter_GetIterIndexRange( \
- NpyIter* iter, npy_intp* istart, npy_intp* iend)
-
- Gets the ``iterindex`` sub-range that is being iterated. If
- :c:data:`NPY_ITER_RANGED` was not specified, this always returns the
- range ``[0, NpyIter_IterSize(iter))``.
-
-.. c:function:: int NpyIter_GotoIterIndex(NpyIter* iter, npy_intp iterindex)
-
- Adjusts the iterator to point to the ``iterindex`` specified.
- The IterIndex is an index matching the iteration order of the iterator.
- Returns an error if the ``iterindex`` is out of bounds,
- buffering is enabled, or inner loop iteration is disabled.
-
- Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
-
-.. c:function:: npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* iter)
-
- Returns 1 if the flag :c:data:`NPY_ITER_DELAY_BUFALLOC` was passed
- to the iterator constructor, and no call to one of the Reset
- functions has been done yet, 0 otherwise.
-
-.. c:function:: npy_bool NpyIter_HasExternalLoop(NpyIter* iter)
-
- Returns 1 if the caller needs to handle the inner-most 1-dimensional
- loop, or 0 if the iterator handles all looping. This is controlled
- by the constructor flag :c:data:`NPY_ITER_EXTERNAL_LOOP` or
- :c:func:`NpyIter_EnableExternalLoop`.
-
-.. c:function:: npy_bool NpyIter_HasMultiIndex(NpyIter* iter)
-
- Returns 1 if the iterator was created with the
- :c:data:`NPY_ITER_MULTI_INDEX` flag, 0 otherwise.
-
-.. c:function:: npy_bool NpyIter_HasIndex(NpyIter* iter)
-
- Returns 1 if the iterator was created with the
- :c:data:`NPY_ITER_C_INDEX` or :c:data:`NPY_ITER_F_INDEX`
- flag, 0 otherwise.
-
-.. c:function:: npy_bool NpyIter_RequiresBuffering(NpyIter* iter)
-
- Returns 1 if the iterator requires buffering, which occurs
- when an operand needs conversion or alignment and so cannot
- be used directly.
-
-.. c:function:: npy_bool NpyIter_IsBuffered(NpyIter* iter)
-
- Returns 1 if the iterator was created with the
- :c:data:`NPY_ITER_BUFFERED` flag, 0 otherwise.
-
-.. c:function:: npy_bool NpyIter_IsGrowInner(NpyIter* iter)
-
- Returns 1 if the iterator was created with the
- :c:data:`NPY_ITER_GROWINNER` flag, 0 otherwise.
-
-.. c:function:: npy_intp NpyIter_GetBufferSize(NpyIter* iter)
-
- If the iterator is buffered, returns the size of the buffer
- being used, otherwise returns 0.
-
-.. c:function:: int NpyIter_GetNDim(NpyIter* iter)
-
- Returns the number of dimensions being iterated. If a multi-index
- was not requested in the iterator constructor, this value
- may be smaller than the number of dimensions in the original
- objects.
-
-.. c:function:: int NpyIter_GetNOp(NpyIter* iter)
-
- Returns the number of operands in the iterator.
-
-.. c:function:: npy_intp* NpyIter_GetAxisStrideArray(NpyIter* iter, int axis)
-
- Gets the array of strides for the specified axis. Requires that
- the iterator be tracking a multi-index, and that buffering not
- be enabled.
-
- This may be used when you want to match up operand axes in
- some fashion, then remove them with :c:func:`NpyIter_RemoveAxis` to
- handle their processing manually. By calling this function
- before removing the axes, you can get the strides for the
- manual processing.
-
- Returns ``NULL`` on error.
-
-.. c:function:: int NpyIter_GetShape(NpyIter* iter, npy_intp* outshape)
-
- Returns the broadcast shape of the iterator in ``outshape``.
- This can only be called on an iterator which is tracking a multi-index.
-
- Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
-
-.. c:function:: PyArray_Descr** NpyIter_GetDescrArray(NpyIter* iter)
-
- This gives back a pointer to the ``nop`` data type Descrs for
- the objects being iterated. The result points into ``iter``,
- so the caller does not gain any references to the Descrs.
-
- This pointer may be cached before the iteration loop, calling
- ``iternext`` will not change it.
-
-.. c:function:: PyObject** NpyIter_GetOperandArray(NpyIter* iter)
-
- This gives back a pointer to the ``nop`` operand PyObjects
- that are being iterated. The result points into ``iter``,
- so the caller does not gain any references to the PyObjects.
-
-.. c:function:: PyObject* NpyIter_GetIterView(NpyIter* iter, npy_intp i)
-
- This gives back a reference to a new ndarray view, which is a view
- into the i-th object in the array :c:func:`NpyIter_GetOperandArray()`,
- whose dimensions and strides match the internal optimized
- iteration pattern. A C-order iteration of this view is equivalent
- to the iterator's iteration order.
-
- For example, if an iterator was created with a single array as its
- input, and it was possible to rearrange all its axes and then
- collapse it into a single strided iteration, this would return
- a view that is a one-dimensional array.
-
-.. c:function:: void NpyIter_GetReadFlags(NpyIter* iter, char* outreadflags)
-
- Fills ``nop`` flags. Sets ``outreadflags[i]`` to 1 if
- ``op[i]`` can be read from, and to 0 if not.
-
-.. c:function:: void NpyIter_GetWriteFlags(NpyIter* iter, char* outwriteflags)
-
- Fills ``nop`` flags. Sets ``outwriteflags[i]`` to 1 if
- ``op[i]`` can be written to, and to 0 if not.
-
-.. c:function:: int NpyIter_CreateCompatibleStrides( \
- NpyIter* iter, npy_intp itemsize, npy_intp* outstrides)
-
- Builds a set of strides which are the same as the strides of an
- output array created using the :c:data:`NPY_ITER_ALLOCATE` flag, where NULL
- was passed for op_axes. This is for data packed contiguously,
- but not necessarily in C or Fortran order. This should be used
- together with :c:func:`NpyIter_GetShape` and :c:func:`NpyIter_GetNDim`
- with the flag :c:data:`NPY_ITER_MULTI_INDEX` passed into the constructor.
-
- A use case for this function is to match the shape and layout of
- the iterator and tack on one or more dimensions. For example,
- in order to generate a vector per input value for a numerical gradient,
- you pass in ndim*itemsize for itemsize, then add another dimension to
- the end with size ndim and stride itemsize. To do the Hessian matrix,
- you do the same thing but add two dimensions, or take advantage of
- the symmetry and pack it into 1 dimension with a particular encoding.
-
- This function may only be called if the iterator is tracking a multi-index
- and if :c:data:`NPY_ITER_DONT_NEGATE_STRIDES` was used to prevent an axis
- from being iterated in reverse order.
-
- If an array is created with this method, simply adding 'itemsize'
- for each iteration will traverse the new array matching the
- iterator.
-
- Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
-
-.. c:function:: npy_bool NpyIter_IsFirstVisit(NpyIter* iter, int iop)
-
- .. versionadded:: 1.7
-
- Checks to see whether this is the first time the elements of the
- specified reduction operand which the iterator points at are being
- seen for the first time. The function returns a reasonable answer
- for reduction operands and when buffering is disabled. The answer
- may be incorrect for buffered non-reduction operands.
-
- This function is intended to be used in EXTERNAL_LOOP mode only,
- and will produce some wrong answers when that mode is not enabled.
-
- If this function returns true, the caller should also check the inner
- loop stride of the operand, because if that stride is 0, then only
- the first element of the innermost external loop is being visited
- for the first time.
-
- *WARNING*: For performance reasons, 'iop' is not bounds-checked,
- it is not confirmed that 'iop' is actually a reduction operand,
- and it is not confirmed that EXTERNAL_LOOP mode is enabled. These
- checks are the responsibility of the caller, and should be done
- outside of any inner loops.
-
-Functions For Iteration
------------------------
-
-.. c:function:: NpyIter_IterNextFunc* NpyIter_GetIterNext( \
- NpyIter* iter, char** errmsg)
-
- Returns a function pointer for iteration. A specialized version
- of the function pointer may be calculated by this function
- instead of being stored in the iterator structure. Thus, to
- get good performance, it is required that the function pointer
- be saved in a variable rather than retrieved for each loop iteration.
-
- Returns NULL if there is an error. If errmsg is non-NULL,
- no Python exception is set when ``NPY_FAIL`` is returned.
- Instead, \*errmsg is set to an error message. When errmsg is
- non-NULL, the function may be safely called without holding
- the Python GIL.
-
- The typical looping construct is as follows.
-
- .. code-block:: c
-
- NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL);
- char** dataptr = NpyIter_GetDataPtrArray(iter);
-
- do {
- /* use the addresses dataptr[0], ... dataptr[nop-1] */
- } while(iternext(iter));
-
- When :c:data:`NPY_ITER_EXTERNAL_LOOP` is specified, the typical
- inner loop construct is as follows.
-
- .. code-block:: c
-
- NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL);
- char** dataptr = NpyIter_GetDataPtrArray(iter);
- npy_intp* stride = NpyIter_GetInnerStrideArray(iter);
- npy_intp* size_ptr = NpyIter_GetInnerLoopSizePtr(iter), size;
- npy_intp iop, nop = NpyIter_GetNOp(iter);
-
- do {
- size = *size_ptr;
- while (size--) {
- /* use the addresses dataptr[0], ... dataptr[nop-1] */
- for (iop = 0; iop < nop; ++iop) {
- dataptr[iop] += stride[iop];
- }
- }
- } while (iternext());
-
- Observe that we are using the dataptr array inside the iterator, not
- copying the values to a local temporary. This is possible because
- when ``iternext()`` is called, these pointers will be overwritten
- with fresh values, not incrementally updated.
-
- If a compile-time fixed buffer is being used (both flags
- :c:data:`NPY_ITER_BUFFERED` and :c:data:`NPY_ITER_EXTERNAL_LOOP`), the
- inner size may be used as a signal as well. The size is guaranteed
- to become zero when ``iternext()`` returns false, enabling the
- following loop construct. Note that if you use this construct,
- you should not pass :c:data:`NPY_ITER_GROWINNER` as a flag, because it
- will cause larger sizes under some circumstances.
-
- .. code-block:: c
-
- /* The constructor should have buffersize passed as this value */
- #define FIXED_BUFFER_SIZE 1024
-
- NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL);
- char **dataptr = NpyIter_GetDataPtrArray(iter);
- npy_intp *stride = NpyIter_GetInnerStrideArray(iter);
- npy_intp *size_ptr = NpyIter_GetInnerLoopSizePtr(iter), size;
- npy_intp i, iop, nop = NpyIter_GetNOp(iter);
-
- /* One loop with a fixed inner size */
- size = *size_ptr;
- while (size == FIXED_BUFFER_SIZE) {
- /*
- * This loop could be manually unrolled by a factor
- * which divides into FIXED_BUFFER_SIZE
- */
- for (i = 0; i < FIXED_BUFFER_SIZE; ++i) {
- /* use the addresses dataptr[0], ... dataptr[nop-1] */
- for (iop = 0; iop < nop; ++iop) {
- dataptr[iop] += stride[iop];
- }
- }
- iternext();
- size = *size_ptr;
- }
-
- /* Finish-up loop with variable inner size */
- if (size > 0) do {
- size = *size_ptr;
- while (size--) {
- /* use the addresses dataptr[0], ... dataptr[nop-1] */
- for (iop = 0; iop < nop; ++iop) {
- dataptr[iop] += stride[iop];
- }
- }
- } while (iternext());
-
-.. c:function:: NpyIter_GetMultiIndexFunc *NpyIter_GetGetMultiIndex( \
- NpyIter* iter, char** errmsg)
-
- Returns a function pointer for getting the current multi-index
- of the iterator. Returns NULL if the iterator is not tracking
- a multi-index. It is recommended that this function
- pointer be cached in a local variable before the iteration
- loop.
-
- Returns NULL if there is an error. If errmsg is non-NULL,
- no Python exception is set when ``NPY_FAIL`` is returned.
- Instead, \*errmsg is set to an error message. When errmsg is
- non-NULL, the function may be safely called without holding
- the Python GIL.
-
-.. c:function:: char** NpyIter_GetDataPtrArray(NpyIter* iter)
-
- This gives back a pointer to the ``nop`` data pointers. If
- :c:data:`NPY_ITER_EXTERNAL_LOOP` was not specified, each data
- pointer points to the current data item of the iterator. If
- no inner iteration was specified, it points to the first data
- item of the inner loop.
-
- This pointer may be cached before the iteration loop, calling
- ``iternext`` will not change it. This function may be safely
- called without holding the Python GIL.
-
-.. c:function:: char** NpyIter_GetInitialDataPtrArray(NpyIter* iter)
-
- Gets the array of data pointers directly into the arrays (never
- into the buffers), corresponding to iteration index 0.
-
- These pointers are different from the pointers accepted by
- ``NpyIter_ResetBasePointers``, because the direction along
- some axes may have been reversed.
-
- This function may be safely called without holding the Python GIL.
-
-.. c:function:: npy_intp* NpyIter_GetIndexPtr(NpyIter* iter)
-
- This gives back a pointer to the index being tracked, or NULL
- if no index is being tracked. It is only useable if one of
- the flags :c:data:`NPY_ITER_C_INDEX` or :c:data:`NPY_ITER_F_INDEX`
- were specified during construction.
-
-When the flag :c:data:`NPY_ITER_EXTERNAL_LOOP` is used, the code
-needs to know the parameters for doing the inner loop. These
-functions provide that information.
-
-.. c:function:: npy_intp* NpyIter_GetInnerStrideArray(NpyIter* iter)
-
- Returns a pointer to an array of the ``nop`` strides,
- one for each iterated object, to be used by the inner loop.
-
- This pointer may be cached before the iteration loop, calling
- ``iternext`` will not change it. This function may be safely
- called without holding the Python GIL.
-
- **WARNING**: While the pointer may be cached, its values may
- change if the iterator is buffered.
-
-.. c:function:: npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* iter)
-
- Returns a pointer to the number of iterations the
- inner loop should execute.
-
- This address may be cached before the iteration loop, calling
- ``iternext`` will not change it. The value itself may change during
- iteration, in particular if buffering is enabled. This function
- may be safely called without holding the Python GIL.
-
-.. c:function:: void NpyIter_GetInnerFixedStrideArray( \
- NpyIter* iter, npy_intp* out_strides)
-
- Gets an array of strides which are fixed, or will not change during
- the entire iteration. For strides that may change, the value
- NPY_MAX_INTP is placed in the stride.
-
- Once the iterator is prepared for iteration (after a reset if
- :c:data:`NPY_DELAY_BUFALLOC` was used), call this to get the strides
- which may be used to select a fast inner loop function. For example,
- if the stride is 0, that means the inner loop can always load its
- value into a variable once, then use the variable throughout the loop,
- or if the stride equals the itemsize, a contiguous version for that
- operand may be used.
-
- This function may be safely called without holding the Python GIL.
-
-.. index::
- pair: iterator; C-API
-
-Converting from Previous NumPy Iterators
-----------------------------------------
-
-The old iterator API includes functions like PyArrayIter_Check,
-PyArray_Iter* and PyArray_ITER_*. The multi-iterator array includes
-PyArray_MultiIter*, PyArray_Broadcast, and PyArray_RemoveSmallest. The
-new iterator design replaces all of this functionality with a single object
-and associated API. One goal of the new API is that all uses of the
-existing iterator should be replaceable with the new iterator without
-significant effort. In 1.6, the major exception to this is the neighborhood
-iterator, which does not have corresponding features in this iterator.
-
-Here is a conversion table for which functions to use with the new iterator:
-
-===================================== ===================================================
-*Iterator Functions*
-:c:func:`PyArray_IterNew` :c:func:`NpyIter_New`
-:c:func:`PyArray_IterAllButAxis` :c:func:`NpyIter_New` + ``axes`` parameter **or**
- Iterator flag :c:data:`NPY_ITER_EXTERNAL_LOOP`
-:c:func:`PyArray_BroadcastToShape` **NOT SUPPORTED** (Use the support for
- multiple operands instead.)
-:c:func:`PyArrayIter_Check` Will need to add this in Python exposure
-:c:func:`PyArray_ITER_RESET` :c:func:`NpyIter_Reset`
-:c:func:`PyArray_ITER_NEXT` Function pointer from :c:func:`NpyIter_GetIterNext`
-:c:func:`PyArray_ITER_DATA` :c:func:`NpyIter_GetDataPtrArray`
-:c:func:`PyArray_ITER_GOTO` :c:func:`NpyIter_GotoMultiIndex`
-:c:func:`PyArray_ITER_GOTO1D` :c:func:`NpyIter_GotoIndex` or
- :c:func:`NpyIter_GotoIterIndex`
-:c:func:`PyArray_ITER_NOTDONE` Return value of ``iternext`` function pointer
-*Multi-iterator Functions*
-:c:func:`PyArray_MultiIterNew` :c:func:`NpyIter_MultiNew`
-:c:func:`PyArray_MultiIter_RESET` :c:func:`NpyIter_Reset`
-:c:func:`PyArray_MultiIter_NEXT` Function pointer from :c:func:`NpyIter_GetIterNext`
-:c:func:`PyArray_MultiIter_DATA` :c:func:`NpyIter_GetDataPtrArray`
-:c:func:`PyArray_MultiIter_NEXTi` **NOT SUPPORTED** (always lock-step iteration)
-:c:func:`PyArray_MultiIter_GOTO` :c:func:`NpyIter_GotoMultiIndex`
-:c:func:`PyArray_MultiIter_GOTO1D` :c:func:`NpyIter_GotoIndex` or
- :c:func:`NpyIter_GotoIterIndex`
-:c:func:`PyArray_MultiIter_NOTDONE` Return value of ``iternext`` function pointer
-:c:func:`PyArray_Broadcast` Handled by :c:func:`NpyIter_MultiNew`
-:c:func:`PyArray_RemoveSmallest` Iterator flag :c:data:`NPY_ITER_EXTERNAL_LOOP`
-*Other Functions*
-:c:func:`PyArray_ConvertToCommonType` Iterator flag :c:data:`NPY_ITER_COMMON_DTYPE`
-===================================== ===================================================
+++ /dev/null
-.. _c-api:
-
-###########
-NumPy C-API
-###########
-
-.. sectionauthor:: Travis E. Oliphant
-
-| Beware of the man who won't be bothered with details.
-| --- *William Feather, Sr.*
-
-| The truth is out there.
-| --- *Chris Carter, The X Files*
-
-
-NumPy provides a C-API to enable users to extend the system and get
-access to the array object for use in other routines. The best way to
-truly understand the C-API is to read the source code. If you are
-unfamiliar with (C) source code, however, this can be a daunting
-experience at first. Be assured that the task becomes easier with
-practice, and you may be surprised at how simple the C-code can be to
-understand. Even if you don't think you can write C-code from scratch,
-it is much easier to understand and modify already-written source code
-then create it *de novo*.
-
-Python extensions are especially straightforward to understand because
-they all have a very similar structure. Admittedly, NumPy is not a
-trivial extension to Python, and may take a little more snooping to
-grasp. This is especially true because of the code-generation
-techniques, which simplify maintenance of very similar code, but can
-make the code a little less readable to beginners. Still, with a
-little persistence, the code can be opened to your understanding. It
-is my hope, that this guide to the C-API can assist in the process of
-becoming familiar with the compiled-level work that can be done with
-NumPy in order to squeeze that last bit of necessary speed out of your
-code.
-
-.. currentmodule:: numpy-c-api
-
-.. toctree::
- :maxdepth: 2
-
- c-api.types-and-structures
- c-api.config
- c-api.dtype
- c-api.array
- c-api.iterator
- c-api.ufunc
- c-api.generalized-ufuncs
- c-api.coremath
- c-api.deprecations
+++ /dev/null
-*****************************
-Python Types and C-Structures
-*****************************
-
-.. sectionauthor:: Travis E. Oliphant
-
-Several new types are defined in the C-code. Most of these are
-accessible from Python, but a few are not exposed due to their limited
-use. Every new Python type has an associated :c:type:`PyObject *<PyObject>` with an
-internal structure that includes a pointer to a "method table" that
-defines how the new object behaves in Python. When you receive a
-Python object into C code, you always get a pointer to a
-:c:type:`PyObject` structure. Because a :c:type:`PyObject` structure is
-very generic and defines only :c:macro:`PyObject_HEAD`, by itself it
-is not very interesting. However, different objects contain more
-details after the :c:macro:`PyObject_HEAD` (but you have to cast to the
-correct type to access them --- or use accessor functions or macros).
-
-
-New Python Types Defined
-========================
-
-Python types are the functional equivalent in C of classes in Python.
-By constructing a new Python type you make available a new object for
-Python. The ndarray object is an example of a new type defined in C.
-New types are defined in C by two basic steps:
-
-1. creating a C-structure (usually named :c:type:`Py{Name}Object`) that is
- binary- compatible with the :c:type:`PyObject` structure itself but holds
- the additional information needed for that particular object;
-
-2. populating the :c:type:`PyTypeObject` table (pointed to by the ob_type
- member of the :c:type:`PyObject` structure) with pointers to functions
- that implement the desired behavior for the type.
-
-Instead of special method names which define behavior for Python
-classes, there are "function tables" which point to functions that
-implement the desired results. Since Python 2.2, the PyTypeObject
-itself has become dynamic which allows C types that can be "sub-typed
-"from other C-types in C, and sub-classed in Python. The children
-types inherit the attributes and methods from their parent(s).
-
-There are two major new types: the ndarray ( :c:data:`PyArray_Type` )
-and the ufunc ( :c:data:`PyUFunc_Type` ). Additional types play a
-supportive role: the :c:data:`PyArrayIter_Type`, the
-:c:data:`PyArrayMultiIter_Type`, and the :c:data:`PyArrayDescr_Type`
-. The :c:data:`PyArrayIter_Type` is the type for a flat iterator for an
-ndarray (the object that is returned when getting the flat
-attribute). The :c:data:`PyArrayMultiIter_Type` is the type of the
-object returned when calling ``broadcast`` (). It handles iteration
-and broadcasting over a collection of nested sequences. Also, the
-:c:data:`PyArrayDescr_Type` is the data-type-descriptor type whose
-instances describe the data. Finally, there are 21 new scalar-array
-types which are new Python scalars corresponding to each of the
-fundamental data types available for arrays. An additional 10 other
-types are place holders that allow the array scalars to fit into a
-hierarchy of actual Python types.
-
-
-PyArray_Type and PyArrayObject
-------------------------------
-
-.. c:var:: PyArray_Type
-
- The Python type of the ndarray is :c:data:`PyArray_Type`. In C, every
- ndarray is a pointer to a :c:type:`PyArrayObject` structure. The ob_type
- member of this structure contains a pointer to the :c:data:`PyArray_Type`
- typeobject.
-
-.. c:type:: PyArrayObject
-
- The :c:type:`PyArrayObject` C-structure contains all of the required
- information for an array. All instances of an ndarray (and its
- subclasses) will have this structure. For future compatibility,
- these structure members should normally be accessed using the
- provided macros. If you need a shorter name, then you can make use
- of :c:type:`NPY_AO` (deprecated) which is defined to be equivalent to
- :c:type:`PyArrayObject`.
-
- .. code-block:: c
-
- typedef struct PyArrayObject {
- PyObject_HEAD
- char *data;
- int nd;
- npy_intp *dimensions;
- npy_intp *strides;
- PyObject *base;
- PyArray_Descr *descr;
- int flags;
- PyObject *weakreflist;
- } PyArrayObject;
-
-.. c:macro:: PyArrayObject.PyObject_HEAD
-
- This is needed by all Python objects. It consists of (at least)
- a reference count member ( ``ob_refcnt`` ) and a pointer to the
- typeobject ( ``ob_type`` ). (Other elements may also be present
- if Python was compiled with special options see
- Include/object.h in the Python source tree for more
- information). The ob_type member points to a Python type
- object.
-
-.. c:member:: char *PyArrayObject.data
-
- A pointer to the first element of the array. This pointer can
- (and normally should) be recast to the data type of the array.
-
-.. c:member:: int PyArrayObject.nd
-
- An integer providing the number of dimensions for this
- array. When nd is 0, the array is sometimes called a rank-0
- array. Such arrays have undefined dimensions and strides and
- cannot be accessed. :c:data:`NPY_MAXDIMS` is the largest number of
- dimensions for any array.
-
-.. c:member:: npy_intp PyArrayObject.dimensions
-
- An array of integers providing the shape in each dimension as
- long as nd :math:`\geq` 1. The integer is always large enough
- to hold a pointer on the platform, so the dimension size is
- only limited by memory.
-
-.. c:member:: npy_intp *PyArrayObject.strides
-
- An array of integers providing for each dimension the number of
- bytes that must be skipped to get to the next element in that
- dimension.
-
-.. c:member:: PyObject *PyArrayObject.base
-
- This member is used to hold a pointer to another Python object that
- is related to this array. There are two use cases:
-
- - If this array does not own its own memory, then base points to the
- Python object that owns it (perhaps another array object)
- - If this array has the (deprecated) :c:data:`NPY_ARRAY_UPDATEIFCOPY` or
- :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set, then this array is a working
- copy of a "misbehaved" array.
-
- When ``PyArray_ResolveWritebackIfCopy`` is called, the array pointed to
- by base will be updated with the contents of this array.
-
-.. c:member:: PyArray_Descr *PyArrayObject.descr
-
- A pointer to a data-type descriptor object (see below). The
- data-type descriptor object is an instance of a new built-in
- type which allows a generic description of memory. There is a
- descriptor structure for each data type supported. This
- descriptor structure contains useful information about the type
- as well as a pointer to a table of function pointers to
- implement specific functionality.
-
-.. c:member:: int PyArrayObject.flags
-
- Flags indicating how the memory pointed to by data is to be
- interpreted. Possible flags are :c:data:`NPY_ARRAY_C_CONTIGUOUS`,
- :c:data:`NPY_ARRAY_F_CONTIGUOUS`, :c:data:`NPY_ARRAY_OWNDATA`,
- :c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_WRITEABLE`,
- :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, and :c:data:`NPY_ARRAY_UPDATEIFCOPY`.
-
-.. c:member:: PyObject *PyArrayObject.weakreflist
-
- This member allows array objects to have weak references (using the
- weakref module).
-
-
-PyArrayDescr_Type and PyArray_Descr
------------------------------------
-
-.. c:var:: PyArrayDescr_Type
-
- The :c:data:`PyArrayDescr_Type` is the built-in type of the
- data-type-descriptor objects used to describe how the bytes comprising
- the array are to be interpreted. There are 21 statically-defined
- :c:type:`PyArray_Descr` objects for the built-in data-types. While these
- participate in reference counting, their reference count should never
- reach zero. There is also a dynamic table of user-defined
- :c:type:`PyArray_Descr` objects that is also maintained. Once a
- data-type-descriptor object is "registered" it should never be
- deallocated either. The function :c:func:`PyArray_DescrFromType` (...) can
- be used to retrieve a :c:type:`PyArray_Descr` object from an enumerated
- type-number (either built-in or user- defined).
-
-.. c:type:: PyArray_Descr
-
- The :c:type:`PyArray_Descr` structure lies at the heart of the
- :c:data:`PyArrayDescr_Type`. While it is described here for
- completeness, it should be considered internal to NumPy and manipulated via
- ``PyArrayDescr_*`` or ``PyDataType*`` functions and macros. The size of this
- structure is subject to change across versions of NumPy. To ensure
- compatibility:
-
- - Never declare a non-pointer instance of the struct
- - Never perform pointer arithmatic
- - Never use ``sizof(PyArray_Descr)``
-
- It has the following structure:
-
- .. code-block:: c
-
- typedef struct {
- PyObject_HEAD
- PyTypeObject *typeobj;
- char kind;
- char type;
- char byteorder;
- char flags;
- int type_num;
- int elsize;
- int alignment;
- PyArray_ArrayDescr *subarray;
- PyObject *fields;
- PyObject *names;
- PyArray_ArrFuncs *f;
- PyObject *metadata;
- NpyAuxData *c_metadata;
- npy_hash_t hash;
- } PyArray_Descr;
-
-.. c:member:: PyTypeObject *PyArray_Descr.typeobj
-
- Pointer to a typeobject that is the corresponding Python type for
- the elements of this array. For the builtin types, this points to
- the corresponding array scalar. For user-defined types, this
- should point to a user-defined typeobject. This typeobject can
- either inherit from array scalars or not. If it does not inherit
- from array scalars, then the :c:data:`NPY_USE_GETITEM` and
- :c:data:`NPY_USE_SETITEM` flags should be set in the ``flags`` member.
-
-.. c:member:: char PyArray_Descr.kind
-
- A character code indicating the kind of array (using the array
- interface typestring notation). A 'b' represents Boolean, a 'i'
- represents signed integer, a 'u' represents unsigned integer, 'f'
- represents floating point, 'c' represents complex floating point, 'S'
- represents 8-bit zero-terminated bytes, 'U' represents 32-bit/character
- unicode string, and 'V' represents arbitrary.
-
-.. c:member:: char PyArray_Descr.type
-
- A traditional character code indicating the data type.
-
-.. c:member:: char PyArray_Descr.byteorder
-
- A character indicating the byte-order: '>' (big-endian), '<' (little-
- endian), '=' (native), '\|' (irrelevant, ignore). All builtin data-
- types have byteorder '='.
-
-.. c:member:: char PyArray_Descr.flags
-
- A data-type bit-flag that determines if the data-type exhibits object-
- array like behavior. Each bit in this member is a flag which are named
- as:
-
- .. c:var:: NPY_ITEM_REFCOUNT
-
- Indicates that items of this data-type must be reference
- counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ).
-
- .. c:var:: NPY_ITEM_HASOBJECT
-
- Same as :c:data:`NPY_ITEM_REFCOUNT`.
-
- .. c:var:: NPY_LIST_PICKLE
-
- Indicates arrays of this data-type must be converted to a list
- before pickling.
-
- .. c:var:: NPY_ITEM_IS_POINTER
-
- Indicates the item is a pointer to some other data-type
-
- .. c:var:: NPY_NEEDS_INIT
-
- Indicates memory for this data-type must be initialized (set
- to 0) on creation.
-
- .. c:var:: NPY_NEEDS_PYAPI
-
- Indicates this data-type requires the Python C-API during
- access (so don't give up the GIL if array access is going to
- be needed).
-
- .. c:var:: NPY_USE_GETITEM
-
- On array access use the ``f->getitem`` function pointer
- instead of the standard conversion to an array scalar. Must
- use if you don't define an array scalar to go along with
- the data-type.
-
- .. c:var:: NPY_USE_SETITEM
-
- When creating a 0-d array from an array scalar use
- ``f->setitem`` instead of the standard copy from an array
- scalar. Must use if you don't define an array scalar to go
- along with the data-type.
-
- .. c:var:: NPY_FROM_FIELDS
-
- The bits that are inherited for the parent data-type if these
- bits are set in any field of the data-type. Currently (
- :c:data:`NPY_NEEDS_INIT` \| :c:data:`NPY_LIST_PICKLE` \|
- :c:data:`NPY_ITEM_REFCOUNT` \| :c:data:`NPY_NEEDS_PYAPI` ).
-
- .. c:var:: NPY_OBJECT_DTYPE_FLAGS
-
- Bits set for the object data-type: ( :c:data:`NPY_LIST_PICKLE`
- \| :c:data:`NPY_USE_GETITEM` \| :c:data:`NPY_ITEM_IS_POINTER` \|
- :c:data:`NPY_REFCOUNT` \| :c:data:`NPY_NEEDS_INIT` \|
- :c:data:`NPY_NEEDS_PYAPI`).
-
- .. c:function:: PyDataType_FLAGCHK(PyArray_Descr *dtype, int flags)
-
- Return true if all the given flags are set for the data-type
- object.
-
- .. c:function:: PyDataType_REFCHK(PyArray_Descr *dtype)
-
- Equivalent to :c:func:`PyDataType_FLAGCHK` (*dtype*,
- :c:data:`NPY_ITEM_REFCOUNT`).
-
-.. c:member:: int PyArray_Descr.type_num
-
- A number that uniquely identifies the data type. For new data-types,
- this number is assigned when the data-type is registered.
-
-.. c:member:: int PyArray_Descr.elsize
-
- For data types that are always the same size (such as long), this
- holds the size of the data type. For flexible data types where
- different arrays can have a different elementsize, this should be
- 0.
-
-.. c:member:: int PyArray_Descr.alignment
-
- A number providing alignment information for this data type.
- Specifically, it shows how far from the start of a 2-element
- structure (whose first element is a ``char`` ), the compiler
- places an item of this type: ``offsetof(struct {char c; type v;},
- v)``
-
-.. c:member:: PyArray_ArrayDescr *PyArray_Descr.subarray
-
- If this is non- ``NULL``, then this data-type descriptor is a
- C-style contiguous array of another data-type descriptor. In
- other-words, each element that this descriptor describes is
- actually an array of some other base descriptor. This is most
- useful as the data-type descriptor for a field in another
- data-type descriptor. The fields member should be ``NULL`` if this
- is non- ``NULL`` (the fields member of the base descriptor can be
- non- ``NULL`` however). The :c:type:`PyArray_ArrayDescr` structure is
- defined using
-
- .. code-block:: c
-
- typedef struct {
- PyArray_Descr *base;
- PyObject *shape;
- } PyArray_ArrayDescr;
-
- The elements of this structure are:
-
- .. c:member:: PyArray_Descr *PyArray_ArrayDescr.base
-
- The data-type-descriptor object of the base-type.
-
- .. c:member:: PyObject *PyArray_ArrayDescr.shape
-
- The shape (always C-style contiguous) of the sub-array as a Python
- tuple.
-
-
-.. c:member:: PyObject *PyArray_Descr.fields
-
- If this is non-NULL, then this data-type-descriptor has fields
- described by a Python dictionary whose keys are names (and also
- titles if given) and whose values are tuples that describe the
- fields. Recall that a data-type-descriptor always describes a
- fixed-length set of bytes. A field is a named sub-region of that
- total, fixed-length collection. A field is described by a tuple
- composed of another data- type-descriptor and a byte
- offset. Optionally, the tuple may contain a title which is
- normally a Python string. These tuples are placed in this
- dictionary keyed by name (and also title if given).
-
-.. c:member:: PyObject *PyArray_Descr.names
-
- An ordered tuple of field names. It is NULL if no field is
- defined.
-
-.. c:member:: PyArray_ArrFuncs *PyArray_Descr.f
-
- A pointer to a structure containing functions that the type needs
- to implement internal features. These functions are not the same
- thing as the universal functions (ufuncs) described later. Their
- signatures can vary arbitrarily.
-
-.. c:member:: PyObject *PyArray_Descr.metadata
-
- Metadata about this dtype.
-
-.. c:member:: NpyAuxData *PyArray_Descr.c_metadata
-
- Metadata specific to the C implementation
- of the particular dtype. Added for NumPy 1.7.0.
-
-.. c:member:: Npy_hash_t *PyArray_Descr.hash
-
- Currently unused. Reserved for future use in caching
- hash values.
-
-.. c:type:: PyArray_ArrFuncs
-
- Functions implementing internal features. Not all of these
- function pointers must be defined for a given type. The required
- members are ``nonzero``, ``copyswap``, ``copyswapn``, ``setitem``,
- ``getitem``, and ``cast``. These are assumed to be non- ``NULL``
- and ``NULL`` entries will cause a program crash. The other
- functions may be ``NULL`` which will just mean reduced
- functionality for that data-type. (Also, the nonzero function will
- be filled in with a default function if it is ``NULL`` when you
- register a user-defined data-type).
-
- .. code-block:: c
-
- typedef struct {
- PyArray_VectorUnaryFunc *cast[NPY_NTYPES];
- PyArray_GetItemFunc *getitem;
- PyArray_SetItemFunc *setitem;
- PyArray_CopySwapNFunc *copyswapn;
- PyArray_CopySwapFunc *copyswap;
- PyArray_CompareFunc *compare;
- PyArray_ArgFunc *argmax;
- PyArray_DotFunc *dotfunc;
- PyArray_ScanFunc *scanfunc;
- PyArray_FromStrFunc *fromstr;
- PyArray_NonzeroFunc *nonzero;
- PyArray_FillFunc *fill;
- PyArray_FillWithScalarFunc *fillwithscalar;
- PyArray_SortFunc *sort[NPY_NSORTS];
- PyArray_ArgSortFunc *argsort[NPY_NSORTS];
- PyObject *castdict;
- PyArray_ScalarKindFunc *scalarkind;
- int **cancastscalarkindto;
- int *cancastto;
- PyArray_FastClipFunc *fastclip;
- PyArray_FastPutmaskFunc *fastputmask;
- PyArray_FastTakeFunc *fasttake;
- PyArray_ArgFunc *argmin;
- } PyArray_ArrFuncs;
-
- The concept of a behaved segment is used in the description of the
- function pointers. A behaved segment is one that is aligned and in
- native machine byte-order for the data-type. The ``nonzero``,
- ``copyswap``, ``copyswapn``, ``getitem``, and ``setitem``
- functions can (and must) deal with mis-behaved arrays. The other
- functions require behaved memory segments.
-
- .. c:member:: void cast( \
- void *from, void *to, npy_intp n, void *fromarr, void *toarr)
-
- An array of function pointers to cast from the current type to
- all of the other builtin types. Each function casts a
- contiguous, aligned, and notswapped buffer pointed at by
- *from* to a contiguous, aligned, and notswapped buffer pointed
- at by *to* The number of items to cast is given by *n*, and
- the arguments *fromarr* and *toarr* are interpreted as
- PyArrayObjects for flexible arrays to get itemsize
- information.
-
- .. c:member:: PyObject *getitem(void *data, void *arr)
-
- A pointer to a function that returns a standard Python object
- from a single element of the array object *arr* pointed to by
- *data*. This function must be able to deal with "misbehaved
- "(misaligned and/or swapped) arrays correctly.
-
- .. c:member:: int setitem(PyObject *item, void *data, void *arr)
-
- A pointer to a function that sets the Python object *item*
- into the array, *arr*, at the position pointed to by *data*
- . This function deals with "misbehaved" arrays. If successful,
- a zero is returned, otherwise, a negative one is returned (and
- a Python error set).
-
- .. c:member:: void copyswapn( \
- void *dest, npy_intp dstride, void *src, npy_intp sstride, \
- npy_intp n, int swap, void *arr)
-
- .. c:member:: void copyswap(void *dest, void *src, int swap, void *arr)
-
- These members are both pointers to functions to copy data from
- *src* to *dest* and *swap* if indicated. The value of arr is
- only used for flexible ( :c:data:`NPY_STRING`, :c:data:`NPY_UNICODE`,
- and :c:data:`NPY_VOID` ) arrays (and is obtained from
- ``arr->descr->elsize`` ). The second function copies a single
- value, while the first loops over n values with the provided
- strides. These functions can deal with misbehaved *src*
- data. If *src* is NULL then no copy is performed. If *swap* is
- 0, then no byteswapping occurs. It is assumed that *dest* and
- *src* do not overlap. If they overlap, then use ``memmove``
- (...) first followed by ``copyswap(n)`` with NULL valued
- ``src``.
-
- .. c:member:: int compare(const void* d1, const void* d2, void* arr)
-
- A pointer to a function that compares two elements of the
- array, ``arr``, pointed to by ``d1`` and ``d2``. This
- function requires behaved (aligned and not swapped) arrays.
- The return value is 1 if * ``d1`` > * ``d2``, 0 if * ``d1`` == *
- ``d2``, and -1 if * ``d1`` < * ``d2``. The array object ``arr`` is
- used to retrieve itemsize and field information for flexible arrays.
-
- .. c:member:: int argmax( \
- void* data, npy_intp n, npy_intp* max_ind, void* arr)
-
- A pointer to a function that retrieves the index of the
- largest of ``n`` elements in ``arr`` beginning at the element
- pointed to by ``data``. This function requires that the
- memory segment be contiguous and behaved. The return value is
- always 0. The index of the largest element is returned in
- ``max_ind``.
-
- .. c:member:: void dotfunc( \
- void* ip1, npy_intp is1, void* ip2, npy_intp is2, void* op, \
- npy_intp n, void* arr)
-
- A pointer to a function that multiplies two ``n`` -length
- sequences together, adds them, and places the result in
- element pointed to by ``op`` of ``arr``. The start of the two
- sequences are pointed to by ``ip1`` and ``ip2``. To get to
- the next element in each sequence requires a jump of ``is1``
- and ``is2`` *bytes*, respectively. This function requires
- behaved (though not necessarily contiguous) memory.
-
- .. c:member:: int scanfunc(FILE* fd, void* ip, void* arr)
-
- A pointer to a function that scans (scanf style) one element
- of the corresponding type from the file descriptor ``fd`` into
- the array memory pointed to by ``ip``. The array is assumed
- to be behaved.
- The last argument ``arr`` is the array to be scanned into.
- Returns number of receiving arguments successfully assigned (which
- may be zero in case a matching failure occurred before the first
- receiving argument was assigned), or EOF if input failure occurs
- before the first receiving argument was assigned.
- This function should be called without holding the Python GIL, and
- has to grab it for error reporting.
-
- .. c:member:: int fromstr(char* str, void* ip, char** endptr, void* arr)
-
- A pointer to a function that converts the string pointed to by
- ``str`` to one element of the corresponding type and places it
- in the memory location pointed to by ``ip``. After the
- conversion is completed, ``*endptr`` points to the rest of the
- string. The last argument ``arr`` is the array into which ip
- points (needed for variable-size data- types). Returns 0 on
- success or -1 on failure. Requires a behaved array.
- This function should be called without holding the Python GIL, and
- has to grab it for error reporting.
-
- .. c:member:: Bool nonzero(void* data, void* arr)
-
- A pointer to a function that returns TRUE if the item of
- ``arr`` pointed to by ``data`` is nonzero. This function can
- deal with misbehaved arrays.
-
- .. c:member:: void fill(void* data, npy_intp length, void* arr)
-
- A pointer to a function that fills a contiguous array of given
- length with data. The first two elements of the array must
- already be filled- in. From these two values, a delta will be
- computed and the values from item 3 to the end will be
- computed by repeatedly adding this computed delta. The data
- buffer must be well-behaved.
-
- .. c:member:: void fillwithscalar( \
- void* buffer, npy_intp length, void* value, void* arr)
-
- A pointer to a function that fills a contiguous ``buffer`` of
- the given ``length`` with a single scalar ``value`` whose
- address is given. The final argument is the array which is
- needed to get the itemsize for variable-length arrays.
-
- .. c:member:: int sort(void* start, npy_intp length, void* arr)
-
- An array of function pointers to a particular sorting
- algorithms. A particular sorting algorithm is obtained using a
- key (so far :c:data:`NPY_QUICKSORT`, :c:data:`NPY_HEAPSORT`,
- and :c:data:`NPY_MERGESORT` are defined). These sorts are done
- in-place assuming contiguous and aligned data.
-
- .. c:member:: int argsort( \
- void* start, npy_intp* result, npy_intp length, void *arr)
-
- An array of function pointers to sorting algorithms for this
- data type. The same sorting algorithms as for sort are
- available. The indices producing the sort are returned in
- ``result`` (which must be initialized with indices 0 to
- ``length-1`` inclusive).
-
- .. c:member:: PyObject *castdict
-
- Either ``NULL`` or a dictionary containing low-level casting
- functions for user- defined data-types. Each function is
- wrapped in a :c:type:`PyCObject *` and keyed by the data-type number.
-
- .. c:member:: NPY_SCALARKIND scalarkind(PyArrayObject* arr)
-
- A function to determine how scalars of this type should be
- interpreted. The argument is ``NULL`` or a 0-dimensional array
- containing the data (if that is needed to determine the kind
- of scalar). The return value must be of type
- :c:type:`NPY_SCALARKIND`.
-
- .. c:member:: int **cancastscalarkindto
-
- Either ``NULL`` or an array of :c:type:`NPY_NSCALARKINDS`
- pointers. These pointers should each be either ``NULL`` or a
- pointer to an array of integers (terminated by
- :c:data:`NPY_NOTYPE`) indicating data-types that a scalar of
- this data-type of the specified kind can be cast to safely
- (this usually means without losing precision).
-
- .. c:member:: int *cancastto
-
- Either ``NULL`` or an array of integers (terminated by
- :c:data:`NPY_NOTYPE` ) indicated data-types that this data-type
- can be cast to safely (this usually means without losing
- precision).
-
- .. c:member:: void fastclip( \
- void *in, npy_intp n_in, void *min, void *max, void *out)
-
- A function that reads ``n_in`` items from ``in``, and writes to
- ``out`` the read value if it is within the limits pointed to by
- ``min`` and ``max``, or the corresponding limit if outside. The
- memory segments must be contiguous and behaved, and either
- ``min`` or ``max`` may be ``NULL``, but not both.
-
- .. c:member:: void fastputmask( \
- void *in, void *mask, npy_intp n_in, void *values, npy_intp nv)
-
- A function that takes a pointer ``in`` to an array of ``n_in``
- items, a pointer ``mask`` to an array of ``n_in`` boolean
- values, and a pointer ``vals`` to an array of ``nv`` items.
- Items from ``vals`` are copied into ``in`` wherever the value
- in ``mask`` is non-zero, tiling ``vals`` as needed if
- ``nv < n_in``. All arrays must be contiguous and behaved.
-
- .. c:member:: void fasttake( \
- void *dest, void *src, npy_intp *indarray, npy_intp nindarray, \
- npy_intp n_outer, npy_intp m_middle, npy_intp nelem, \
- NPY_CLIPMODE clipmode)
-
- A function that takes a pointer ``src`` to a C contiguous,
- behaved segment, interpreted as a 3-dimensional array of shape
- ``(n_outer, nindarray, nelem)``, a pointer ``indarray`` to a
- contiguous, behaved segment of ``m_middle`` integer indices,
- and a pointer ``dest`` to a C contiguous, behaved segment,
- interpreted as a 3-dimensional array of shape
- ``(n_outer, m_middle, nelem)``. The indices in ``indarray`` are
- used to index ``src`` along the second dimension, and copy the
- corresponding chunks of ``nelem`` items into ``dest``.
- ``clipmode`` (which can take on the values :c:data:`NPY_RAISE`,
- :c:data:`NPY_WRAP` or :c:data:`NPY_CLIP`) determines how will
- indices smaller than 0 or larger than ``nindarray`` will be
- handled.
-
- .. c:member:: int argmin( \
- void* data, npy_intp n, npy_intp* min_ind, void* arr)
-
- A pointer to a function that retrieves the index of the
- smallest of ``n`` elements in ``arr`` beginning at the element
- pointed to by ``data``. This function requires that the
- memory segment be contiguous and behaved. The return value is
- always 0. The index of the smallest element is returned in
- ``min_ind``.
-
-
-The :c:data:`PyArray_Type` typeobject implements many of the features of
-:c:type:`Python objects <PyTypeObject>` including the :c:member:`tp_as_number
-<PyTypeObject.tp_as_number>`, :c:member:`tp_as_sequence
-<PyTypeObject.tp_as_sequence>`, :c:member:`tp_as_mapping
-<PyTypeObject.tp_as_mapping>`, and :c:member:`tp_as_buffer
-<PyTypeObject.tp_as_buffer>` interfaces. The :c:type:`rich comparison
-<richcmpfunc>`) is also used along with new-style attribute lookup for
-member (:c:member:`tp_members <PyTypeObject.tp_members>`) and properties
-(:c:member:`tp_getset <PyTypeObject.tp_getset>`).
-The :c:data:`PyArray_Type` can also be sub-typed.
-
-.. tip::
-
- The ``tp_as_number`` methods use a generic approach to call whatever
- function has been registered for handling the operation. When the
- ``_multiarray_umath module`` is imported, it sets the numeric operations
- for all arrays to the corresponding ufuncs. This choice can be changed with
- :c:func:`PyUFunc_ReplaceLoopBySignature` The ``tp_str`` and ``tp_repr``
- methods can also be altered using :c:func:`PyArray_SetStringFunction`.
-
-
-PyUFunc_Type and PyUFuncObject
-------------------------------
-
-.. c:var:: PyUFunc_Type
-
- The ufunc object is implemented by creation of the
- :c:data:`PyUFunc_Type`. It is a very simple type that implements only
- basic getattribute behavior, printing behavior, and has call
- behavior which allows these objects to act like functions. The
- basic idea behind the ufunc is to hold a reference to fast
- 1-dimensional (vector) loops for each data type that supports the
- operation. These one-dimensional loops all have the same signature
- and are the key to creating a new ufunc. They are called by the
- generic looping code as appropriate to implement the N-dimensional
- function. There are also some generic 1-d loops defined for
- floating and complexfloating arrays that allow you to define a
- ufunc using a single scalar function (*e.g.* atanh).
-
-
-.. c:type:: PyUFuncObject
-
- The core of the ufunc is the :c:type:`PyUFuncObject` which contains all
- the information needed to call the underlying C-code loops that
- perform the actual work. While it is described here for completeness, it
- should be considered internal to NumPy and manipulated via ``PyUFunc_*``
- functions. The size of this structure is subject to change across versions
- of NumPy. To ensure compatibility:
-
- - Never declare a non-pointer instance of the struct
- - Never perform pointer arithmetic
- - Never use ``sizeof(PyUFuncObject)``
-
- It has the following structure:
-
- .. code-block:: c
-
- typedef struct {
- PyObject_HEAD
- int nin;
- int nout;
- int nargs;
- int identity;
- PyUFuncGenericFunction *functions;
- void **data;
- int ntypes;
- int reserved1;
- const char *name;
- char *types;
- const char *doc;
- void *ptr;
- PyObject *obj;
- PyObject *userloops;
- int core_enabled;
- int core_num_dim_ix;
- int *core_num_dims;
- int *core_dim_ixs;
- int *core_offsets;
- char *core_signature;
- PyUFunc_TypeResolutionFunc *type_resolver;
- PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector;
- PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector;
- npy_uint32 *op_flags;
- npy_uint32 *iter_flags;
- /* new in API version 0x0000000D */
- npy_intp *core_dim_sizes;
- npy_intp *core_dim_flags;
-
- } PyUFuncObject;
-
- .. c:macro: PyUFuncObject.PyObject_HEAD
-
- required for all Python objects.
-
- .. c:member:: int PyUFuncObject.nin
-
- The number of input arguments.
-
- .. c:member:: int PyUFuncObject.nout
-
- The number of output arguments.
-
- .. c:member:: int PyUFuncObject.nargs
-
- The total number of arguments (*nin* + *nout*). This must be
- less than :c:data:`NPY_MAXARGS`.
-
- .. c:member:: int PyUFuncObject.identity
-
- Either :c:data:`PyUFunc_One`, :c:data:`PyUFunc_Zero`,
- :c:data:`PyUFunc_None` or :c:data:`PyUFunc_AllOnes` to indicate
- the identity for this operation. It is only used for a
- reduce-like call on an empty array.
-
- .. c:member:: void PyUFuncObject.functions( \
- char** args, npy_intp* dims, npy_intp* steps, void* extradata)
-
- An array of function pointers --- one for each data type
- supported by the ufunc. This is the vector loop that is called
- to implement the underlying function *dims* [0] times. The
- first argument, *args*, is an array of *nargs* pointers to
- behaved memory. Pointers to the data for the input arguments
- are first, followed by the pointers to the data for the output
- arguments. How many bytes must be skipped to get to the next
- element in the sequence is specified by the corresponding entry
- in the *steps* array. The last argument allows the loop to
- receive extra information. This is commonly used so that a
- single, generic vector loop can be used for multiple
- functions. In this case, the actual scalar function to call is
- passed in as *extradata*. The size of this function pointer
- array is ntypes.
-
- .. c:member:: void **PyUFuncObject.data
-
- Extra data to be passed to the 1-d vector loops or ``NULL`` if
- no extra-data is needed. This C-array must be the same size (
- *i.e.* ntypes) as the functions array. ``NULL`` is used if
- extra_data is not needed. Several C-API calls for UFuncs are
- just 1-d vector loops that make use of this extra data to
- receive a pointer to the actual function to call.
-
- .. c:member:: int PyUFuncObject.ntypes
-
- The number of supported data types for the ufunc. This number
- specifies how many different 1-d loops (of the builtin data
- types) are available.
-
- .. c:member:: int PyUFuncObject.reserved1
-
- Unused.
-
- .. c:member:: char *PyUFuncObject.name
-
- A string name for the ufunc. This is used dynamically to build
- the __doc\__ attribute of ufuncs.
-
- .. c:member:: char *PyUFuncObject.types
-
- An array of :math:`nargs \times ntypes` 8-bit type_numbers
- which contains the type signature for the function for each of
- the supported (builtin) data types. For each of the *ntypes*
- functions, the corresponding set of type numbers in this array
- shows how the *args* argument should be interpreted in the 1-d
- vector loop. These type numbers do not have to be the same type
- and mixed-type ufuncs are supported.
-
- .. c:member:: char *PyUFuncObject.doc
-
- Documentation for the ufunc. Should not contain the function
- signature as this is generated dynamically when __doc\__ is
- retrieved.
-
- .. c:member:: void *PyUFuncObject.ptr
-
- Any dynamically allocated memory. Currently, this is used for
- dynamic ufuncs created from a python function to store room for
- the types, data, and name members.
-
- .. c:member:: PyObject *PyUFuncObject.obj
-
- For ufuncs dynamically created from python functions, this member
- holds a reference to the underlying Python function.
-
- .. c:member:: PyObject *PyUFuncObject.userloops
-
- A dictionary of user-defined 1-d vector loops (stored as CObject
- ptrs) for user-defined types. A loop may be registered by the
- user for any user-defined type. It is retrieved by type number.
- User defined type numbers are always larger than
- :c:data:`NPY_USERDEF`.
-
- .. c:member:: int PyUFuncObject.core_enabled
-
- 0 for scalar ufuncs; 1 for generalized ufuncs
-
- .. c:member:: int PyUFuncObject.core_num_dim_ix
-
- Number of distinct core dimension names in the signature
-
- .. c:member:: int *PyUFuncObject.core_num_dims
-
- Number of core dimensions of each argument
-
- .. c:member:: int *PyUFuncObject.core_dim_ixs
-
- Dimension indices in a flattened form; indices of argument ``k`` are
- stored in ``core_dim_ixs[core_offsets[k] : core_offsets[k] +
- core_numdims[k]]``
-
- .. c:member:: int *PyUFuncObject.core_offsets
-
- Position of 1st core dimension of each argument in ``core_dim_ixs``,
- equivalent to cumsum(``core_num_dims``)
-
- .. c:member:: char *PyUFuncObject.core_signature
-
- Core signature string
-
- .. c:member:: PyUFunc_TypeResolutionFunc *PyUFuncObject.type_resolver
-
- A function which resolves the types and fills an array with the dtypes
- for the inputs and outputs
-
- .. c:member:: PyUFunc_LegacyInnerLoopSelectionFunc *PyUFuncObject.legacy_inner_loop_selector
-
- A function which returns an inner loop. The ``legacy`` in the name arises
- because for NumPy 1.6 a better variant had been planned. This variant
- has not yet come about.
-
- .. c:member:: void *PyUFuncObject.reserved2
-
- For a possible future loop selector with a different signature.
-
- .. c:member:: PyUFunc_MaskedInnerLoopSelectionFunc *PyUFuncObject.masked_inner_loop_selector
-
- Function which returns a masked inner loop for the ufunc
-
- .. c:member:: npy_uint32 PyUFuncObject.op_flags
-
- Override the default operand flags for each ufunc operand.
-
- .. c:member:: npy_uint32 PyUFuncObject.iter_flags
-
- Override the default nditer flags for the ufunc.
-
- Added in API version 0x0000000D
-
- .. c:member:: npy_intp *PyUFuncObject.core_dim_sizes
-
- For each distinct core dimension, the possible
- :ref:`frozen <frozen>` size if :c:data:`UFUNC_CORE_DIM_SIZE_INFERRED` is 0
-
- .. c:member:: npy_uint32 *PyUFuncObject.core_dim_flags
-
- For each distinct core dimension, a set of ``UFUNC_CORE_DIM*`` flags
-
- - :c:data:`UFUNC_CORE_DIM_CAN_IGNORE` if the dim name ends in ``?``
- - :c:data:`UFUNC_CORE_DIM_SIZE_INFERRED` if the dim size will be
- determined from the operands and not from a :ref:`frozen <frozen>` signature
-
-PyArrayIter_Type and PyArrayIterObject
---------------------------------------
-
-.. c:var:: PyArrayIter_Type
-
- This is an iterator object that makes it easy to loop over an
- N-dimensional array. It is the object returned from the flat
- attribute of an ndarray. It is also used extensively throughout the
- implementation internals to loop over an N-dimensional array. The
- tp_as_mapping interface is implemented so that the iterator object
- can be indexed (using 1-d indexing), and a few methods are
- implemented through the tp_methods table. This object implements the
- next method and can be used anywhere an iterator can be used in
- Python.
-
-.. c:type:: PyArrayIterObject
-
- The C-structure corresponding to an object of :c:data:`PyArrayIter_Type` is
- the :c:type:`PyArrayIterObject`. The :c:type:`PyArrayIterObject` is used to
- keep track of a pointer into an N-dimensional array. It contains associated
- information used to quickly march through the array. The pointer can
- be adjusted in three basic ways: 1) advance to the "next" position in
- the array in a C-style contiguous fashion, 2) advance to an arbitrary
- N-dimensional coordinate in the array, and 3) advance to an arbitrary
- one-dimensional index into the array. The members of the
- :c:type:`PyArrayIterObject` structure are used in these
- calculations. Iterator objects keep their own dimension and strides
- information about an array. This can be adjusted as needed for
- "broadcasting," or to loop over only specific dimensions.
-
- .. code-block:: c
-
- typedef struct {
- PyObject_HEAD
- int nd_m1;
- npy_intp index;
- npy_intp size;
- npy_intp coordinates[NPY_MAXDIMS];
- npy_intp dims_m1[NPY_MAXDIMS];
- npy_intp strides[NPY_MAXDIMS];
- npy_intp backstrides[NPY_MAXDIMS];
- npy_intp factors[NPY_MAXDIMS];
- PyArrayObject *ao;
- char *dataptr;
- Bool contiguous;
- } PyArrayIterObject;
-
- .. c:member:: int PyArrayIterObject.nd_m1
-
- :math:`N-1` where :math:`N` is the number of dimensions in the
- underlying array.
-
- .. c:member:: npy_intp PyArrayIterObject.index
-
- The current 1-d index into the array.
-
- .. c:member:: npy_intp PyArrayIterObject.size
-
- The total size of the underlying array.
-
- .. c:member:: npy_intp *PyArrayIterObject.coordinates
-
- An :math:`N` -dimensional index into the array.
-
- .. c:member:: npy_intp *PyArrayIterObject.dims_m1
-
- The size of the array minus 1 in each dimension.
-
- .. c:member:: npy_intp *PyArrayIterObject.strides
-
- The strides of the array. How many bytes needed to jump to the next
- element in each dimension.
-
- .. c:member:: npy_intp *PyArrayIterObject.backstrides
-
- How many bytes needed to jump from the end of a dimension back
- to its beginning. Note that ``backstrides[k] == strides[k] *
- dims_m1[k]``, but it is stored here as an optimization.
-
- .. c:member:: npy_intp *PyArrayIterObject.factors
-
- This array is used in computing an N-d index from a 1-d index. It
- contains needed products of the dimensions.
-
- .. c:member:: PyArrayObject *PyArrayIterObject.ao
-
- A pointer to the underlying ndarray this iterator was created to
- represent.
-
- .. c:member:: char *PyArrayIterObject.dataptr
-
- This member points to an element in the ndarray indicated by the
- index.
-
- .. c:member:: Bool PyArrayIterObject.contiguous
-
- This flag is true if the underlying array is
- :c:data:`NPY_ARRAY_C_CONTIGUOUS`. It is used to simplify
- calculations when possible.
-
-
-How to use an array iterator on a C-level is explained more fully in
-later sections. Typically, you do not need to concern yourself with
-the internal structure of the iterator object, and merely interact
-with it through the use of the macros :c:func:`PyArray_ITER_NEXT` (it),
-:c:func:`PyArray_ITER_GOTO` (it, dest), or :c:func:`PyArray_ITER_GOTO1D`
-(it, index). All of these macros require the argument *it* to be a
-:c:type:`PyArrayIterObject *`.
-
-
-PyArrayMultiIter_Type and PyArrayMultiIterObject
-------------------------------------------------
-
-.. c:var:: PyArrayMultiIter_Type
-
- This type provides an iterator that encapsulates the concept of
- broadcasting. It allows :math:`N` arrays to be broadcast together
- so that the loop progresses in C-style contiguous fashion over the
- broadcasted array. The corresponding C-structure is the
- :c:type:`PyArrayMultiIterObject` whose memory layout must begin any
- object, *obj*, passed in to the :c:func:`PyArray_Broadcast` (obj)
- function. Broadcasting is performed by adjusting array iterators so
- that each iterator represents the broadcasted shape and size, but
- has its strides adjusted so that the correct element from the array
- is used at each iteration.
-
-
-.. c:type:: PyArrayMultiIterObject
-
- .. code-block:: c
-
- typedef struct {
- PyObject_HEAD
- int numiter;
- npy_intp size;
- npy_intp index;
- int nd;
- npy_intp dimensions[NPY_MAXDIMS];
- PyArrayIterObject *iters[NPY_MAXDIMS];
- } PyArrayMultiIterObject;
-
- .. c:macro: PyArrayMultiIterObject.PyObject_HEAD
-
- Needed at the start of every Python object (holds reference count
- and type identification).
-
- .. c:member:: int PyArrayMultiIterObject.numiter
-
- The number of arrays that need to be broadcast to the same shape.
-
- .. c:member:: npy_intp PyArrayMultiIterObject.size
-
- The total broadcasted size.
-
- .. c:member:: npy_intp PyArrayMultiIterObject.index
-
- The current (1-d) index into the broadcasted result.
-
- .. c:member:: int PyArrayMultiIterObject.nd
-
- The number of dimensions in the broadcasted result.
-
- .. c:member:: npy_intp *PyArrayMultiIterObject.dimensions
-
- The shape of the broadcasted result (only ``nd`` slots are used).
-
- .. c:member:: PyArrayIterObject **PyArrayMultiIterObject.iters
-
- An array of iterator objects that holds the iterators for the
- arrays to be broadcast together. On return, the iterators are
- adjusted for broadcasting.
-
-PyArrayNeighborhoodIter_Type and PyArrayNeighborhoodIterObject
---------------------------------------------------------------
-
-.. c:var:: PyArrayNeighborhoodIter_Type
-
- This is an iterator object that makes it easy to loop over an
- N-dimensional neighborhood.
-
-.. c:type:: PyArrayNeighborhoodIterObject
-
- The C-structure corresponding to an object of
- :c:data:`PyArrayNeighborhoodIter_Type` is the
- :c:type:`PyArrayNeighborhoodIterObject`.
-
- .. code-block:: c
-
- typedef struct {
- PyObject_HEAD
- int nd_m1;
- npy_intp index, size;
- npy_intp coordinates[NPY_MAXDIMS]
- npy_intp dims_m1[NPY_MAXDIMS];
- npy_intp strides[NPY_MAXDIMS];
- npy_intp backstrides[NPY_MAXDIMS];
- npy_intp factors[NPY_MAXDIMS];
- PyArrayObject *ao;
- char *dataptr;
- npy_bool contiguous;
- npy_intp bounds[NPY_MAXDIMS][2];
- npy_intp limits[NPY_MAXDIMS][2];
- npy_intp limits_sizes[NPY_MAXDIMS];
- npy_iter_get_dataptr_t translate;
- npy_intp nd;
- npy_intp dimensions[NPY_MAXDIMS];
- PyArrayIterObject* _internal_iter;
- char* constant;
- int mode;
- } PyArrayNeighborhoodIterObject;
-
-PyArrayFlags_Type and PyArrayFlagsObject
-----------------------------------------
-
-.. c:var:: PyArrayFlags_Type
-
- When the flags attribute is retrieved from Python, a special
- builtin object of this type is constructed. This special type makes
- it easier to work with the different flags by accessing them as
- attributes or by accessing them as if the object were a dictionary
- with the flag names as entries.
-
-.. c:type:: PyArrayFlagsObject
-
- .. code-block:: c
-
- typedef struct PyArrayFlagsObject {
- PyObject_HEAD
- PyObject *arr;
- int flags;
- } PyArrayFlagsObject;
-
-
-ScalarArrayTypes
-----------------
-
-There is a Python type for each of the different built-in data types
-that can be present in the array Most of these are simple wrappers
-around the corresponding data type in C. The C-names for these types
-are :c:data:`Py{TYPE}ArrType_Type` where ``{TYPE}`` can be
-
- **Bool**, **Byte**, **Short**, **Int**, **Long**, **LongLong**,
- **UByte**, **UShort**, **UInt**, **ULong**, **ULongLong**,
- **Half**, **Float**, **Double**, **LongDouble**, **CFloat**,
- **CDouble**, **CLongDouble**, **String**, **Unicode**, **Void**, and
- **Object**.
-
-These type names are part of the C-API and can therefore be created in
-extension C-code. There is also a :c:data:`PyIntpArrType_Type` and a
-:c:data:`PyUIntpArrType_Type` that are simple substitutes for one of the
-integer types that can hold a pointer on the platform. The structure
-of these scalar objects is not exposed to C-code. The function
-:c:func:`PyArray_ScalarAsCtype` (..) can be used to extract the C-type
-value from the array scalar and the function :c:func:`PyArray_Scalar`
-(...) can be used to construct an array scalar from a C-value.
-
-
-Other C-Structures
-==================
-
-A few new C-structures were found to be useful in the development of
-NumPy. These C-structures are used in at least one C-API call and are
-therefore documented here. The main reason these structures were
-defined is to make it easy to use the Python ParseTuple C-API to
-convert from Python objects to a useful C-Object.
-
-
-PyArray_Dims
-------------
-
-.. c:type:: PyArray_Dims
-
- This structure is very useful when shape and/or strides information
- is supposed to be interpreted. The structure is:
-
- .. code-block:: c
-
- typedef struct {
- npy_intp *ptr;
- int len;
- } PyArray_Dims;
-
- The members of this structure are
-
- .. c:member:: npy_intp *PyArray_Dims.ptr
-
- A pointer to a list of (:c:type:`npy_intp`) integers which
- usually represent array shape or array strides.
-
- .. c:member:: int PyArray_Dims.len
-
- The length of the list of integers. It is assumed safe to
- access *ptr* [0] to *ptr* [len-1].
-
-
-PyArray_Chunk
--------------
-
-.. c:type:: PyArray_Chunk
-
- This is equivalent to the buffer object structure in Python up to
- the ptr member. On 32-bit platforms (*i.e.* if :c:data:`NPY_SIZEOF_INT`
- == :c:data:`NPY_SIZEOF_INTP`), the len member also matches an equivalent
- member of the buffer object. It is useful to represent a generic
- single-segment chunk of memory.
-
- .. code-block:: c
-
- typedef struct {
- PyObject_HEAD
- PyObject *base;
- void *ptr;
- npy_intp len;
- int flags;
- } PyArray_Chunk;
-
- The members are
-
- .. c:macro: PyArray_Chunk.PyObject_HEAD
-
- Necessary for all Python objects. Included here so that the
- :c:type:`PyArray_Chunk` structure matches that of the buffer object
- (at least to the len member).
-
- .. c:member:: PyObject *PyArray_Chunk.base
-
- The Python object this chunk of memory comes from. Needed so that
- memory can be accounted for properly.
-
- .. c:member:: void *PyArray_Chunk.ptr
-
- A pointer to the start of the single-segment chunk of memory.
-
- .. c:member:: npy_intp PyArray_Chunk.len
-
- The length of the segment in bytes.
-
- .. c:member:: int PyArray_Chunk.flags
-
- Any data flags (*e.g.* :c:data:`NPY_ARRAY_WRITEABLE` ) that should
- be used to interpret the memory.
-
-
-PyArrayInterface
-----------------
-
-.. seealso:: :ref:`arrays.interface`
-
-.. c:type:: PyArrayInterface
-
- The :c:type:`PyArrayInterface` structure is defined so that NumPy and
- other extension modules can use the rapid array interface
- protocol. The :obj:`__array_struct__` method of an object that
- supports the rapid array interface protocol should return a
- :c:type:`PyCObject` that contains a pointer to a :c:type:`PyArrayInterface`
- structure with the relevant details of the array. After the new
- array is created, the attribute should be ``DECREF``'d which will
- free the :c:type:`PyArrayInterface` structure. Remember to ``INCREF`` the
- object (whose :obj:`__array_struct__` attribute was retrieved) and
- point the base member of the new :c:type:`PyArrayObject` to this same
- object. In this way the memory for the array will be managed
- correctly.
-
- .. code-block:: c
-
- typedef struct {
- int two;
- int nd;
- char typekind;
- int itemsize;
- int flags;
- npy_intp *shape;
- npy_intp *strides;
- void *data;
- PyObject *descr;
- } PyArrayInterface;
-
- .. c:member:: int PyArrayInterface.two
-
- the integer 2 as a sanity check.
-
- .. c:member:: int PyArrayInterface.nd
-
- the number of dimensions in the array.
-
- .. c:member:: char PyArrayInterface.typekind
-
- A character indicating what kind of array is present according to the
- typestring convention with 't' -> bitfield, 'b' -> Boolean, 'i' ->
- signed integer, 'u' -> unsigned integer, 'f' -> floating point, 'c' ->
- complex floating point, 'O' -> object, 'S' -> (byte-)string, 'U' ->
- unicode, 'V' -> void.
-
- .. c:member:: int PyArrayInterface.itemsize
-
- The number of bytes each item in the array requires.
-
- .. c:member:: int PyArrayInterface.flags
-
- Any of the bits :c:data:`NPY_ARRAY_C_CONTIGUOUS` (1),
- :c:data:`NPY_ARRAY_F_CONTIGUOUS` (2), :c:data:`NPY_ARRAY_ALIGNED` (0x100),
- :c:data:`NPY_ARRAY_NOTSWAPPED` (0x200), or :c:data:`NPY_ARRAY_WRITEABLE`
- (0x400) to indicate something about the data. The
- :c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_C_CONTIGUOUS`, and
- :c:data:`NPY_ARRAY_F_CONTIGUOUS` flags can actually be determined from
- the other parameters. The flag :c:data:`NPY_ARR_HAS_DESCR`
- (0x800) can also be set to indicate to objects consuming the
- version 3 array interface that the descr member of the
- structure is present (it will be ignored by objects consuming
- version 2 of the array interface).
-
- .. c:member:: npy_intp *PyArrayInterface.shape
-
- An array containing the size of the array in each dimension.
-
- .. c:member:: npy_intp *PyArrayInterface.strides
-
- An array containing the number of bytes to jump to get to the next
- element in each dimension.
-
- .. c:member:: void *PyArrayInterface.data
-
- A pointer *to* the first element of the array.
-
- .. c:member:: PyObject *PyArrayInterface.descr
-
- A Python object describing the data-type in more detail (same
- as the *descr* key in :obj:`__array_interface__`). This can be
- ``NULL`` if *typekind* and *itemsize* provide enough
- information. This field is also ignored unless
- :c:data:`ARR_HAS_DESCR` flag is on in *flags*.
-
-
-Internally used structures
---------------------------
-
-Internally, the code uses some additional Python objects primarily for
-memory management. These types are not accessible directly from
-Python, and are not exposed to the C-API. They are included here only
-for completeness and assistance in understanding the code.
-
-
-.. c:type:: PyUFuncLoopObject
-
- A loose wrapper for a C-structure that contains the information
- needed for looping. This is useful if you are trying to understand
- the ufunc looping code. The :c:type:`PyUFuncLoopObject` is the associated
- C-structure. It is defined in the ``ufuncobject.h`` header.
-
-.. c:type:: PyUFuncReduceObject
-
- A loose wrapper for the C-structure that contains the information
- needed for reduce-like methods of ufuncs. This is useful if you are
- trying to understand the reduce, accumulate, and reduce-at
- code. The :c:type:`PyUFuncReduceObject` is the associated C-structure. It
- is defined in the ``ufuncobject.h`` header.
-
-.. c:type:: PyUFunc_Loop1d
-
- A simple linked-list of C-structures containing the information needed
- to define a 1-d loop for a ufunc for every defined signature of a
- user-defined data-type.
-
-.. c:var:: PyArrayMapIter_Type
-
- Advanced indexing is handled with this Python type. It is simply a
- loose wrapper around the C-structure containing the variables
- needed for advanced array indexing. The associated C-structure,
- :c:type:`PyArrayMapIterObject`, is useful if you are trying to
- understand the advanced-index mapping code. It is defined in the
- ``arrayobject.h`` header. This type is not exposed to Python and
- could be replaced with a C-structure. As a Python type it takes
- advantage of reference- counted memory management.
+++ /dev/null
-UFunc API
-=========
-
-.. sectionauthor:: Travis E. Oliphant
-
-.. index::
- pair: ufunc; C-API
-
-
-Constants
----------
-
-.. c:var:: UFUNC_ERR_{HANDLER}
-
- ``{HANDLER}`` can be **IGNORE**, **WARN**, **RAISE**, or **CALL**
-
-.. c:var:: UFUNC_{THING}_{ERR}
-
- ``{THING}`` can be **MASK**, **SHIFT**, or **FPE**, and ``{ERR}`` can
- be **DIVIDEBYZERO**, **OVERFLOW**, **UNDERFLOW**, and **INVALID**.
-
-.. c:var:: PyUFunc_{VALUE}
-
- ``{VALUE}`` can be **One** (1), **Zero** (0), or **None** (-1)
-
-
-Macros
-------
-
-.. c:macro:: NPY_LOOP_BEGIN_THREADS
-
- Used in universal function code to only release the Python GIL if
- loop->obj is not true (*i.e.* this is not an OBJECT array
- loop). Requires use of :c:macro:`NPY_BEGIN_THREADS_DEF` in variable
- declaration area.
-
-.. c:macro:: NPY_LOOP_END_THREADS
-
- Used in universal function code to re-acquire the Python GIL if it
- was released (because loop->obj was not true).
-
-.. c:function:: UFUNC_CHECK_ERROR(loop)
-
- A macro used internally to check for errors and goto fail if
- found. This macro requires a fail label in the current code
- block. The *loop* variable must have at least members (obj,
- errormask, and errorobj). If *loop* ->obj is nonzero, then
- :c:func:`PyErr_Occurred` () is called (meaning the GIL must be held). If
- *loop* ->obj is zero, then if *loop* ->errormask is nonzero,
- :c:func:`PyUFunc_checkfperr` is called with arguments *loop* ->errormask
- and *loop* ->errobj. If the result of this check of the IEEE
- floating point registers is true then the code redirects to the
- fail label which must be defined.
-
-.. c:function:: UFUNC_CHECK_STATUS(ret)
-
- Deprecated: use npy_clear_floatstatus from npy_math.h instead.
-
- A macro that expands to platform-dependent code. The *ret*
- variable can be any integer. The :c:data:`UFUNC_FPE_{ERR}` bits are
- set in *ret* according to the status of the corresponding error
- flags of the floating point processor.
-
-
-Functions
----------
-
-.. c:function:: PyObject* PyUFunc_FromFuncAndData( \
- PyUFuncGenericFunction* func, void** data, char* types, int ntypes, \
- int nin, int nout, int identity, char* name, char* doc, int unused)
-
- Create a new broadcasting universal function from required variables.
- Each ufunc builds around the notion of an element-by-element
- operation. Each ufunc object contains pointers to 1-d loops
- implementing the basic functionality for each supported type.
-
- .. note::
-
- The *func*, *data*, *types*, *name*, and *doc* arguments are not
- copied by :c:func:`PyUFunc_FromFuncAndData`. The caller must ensure
- that the memory used by these arrays is not freed as long as the
- ufunc object is alive.
-
- :param func:
- Must to an array of length *ntypes* containing
- :c:type:`PyUFuncGenericFunction` items. These items are pointers to
- functions that actually implement the underlying
- (element-by-element) function :math:`N` times with the following
- signature:
-
- .. c:function:: void loopfunc(
- char** args, npy_intp* dimensions, npy_intp* steps, void* data)
-
- *args*
-
- An array of pointers to the actual data for the input and output
- arrays. The input arguments are given first followed by the output
- arguments.
-
- *dimensions*
-
- A pointer to the size of the dimension over which this function is
- looping.
-
- *steps*
-
- A pointer to the number of bytes to jump to get to the
- next element in this dimension for each of the input and
- output arguments.
-
- *data*
-
- Arbitrary data (extra arguments, function names, *etc.* )
- that can be stored with the ufunc and will be passed in
- when it is called.
-
- This is an example of a func specialized for addition of doubles
- returning doubles.
-
- .. code-block:: c
-
- static void
- double_add(char **args, npy_intp *dimensions, npy_intp *steps,
- void *extra)
- {
- npy_intp i;
- npy_intp is1 = steps[0], is2 = steps[1];
- npy_intp os = steps[2], n = dimensions[0];
- char *i1 = args[0], *i2 = args[1], *op = args[2];
- for (i = 0; i < n; i++) {
- *((double *)op) = *((double *)i1) +
- *((double *)i2);
- i1 += is1;
- i2 += is2;
- op += os;
- }
- }
-
- :param data:
- Should be ``NULL`` or a pointer to an array of size *ntypes*
- . This array may contain arbitrary extra-data to be passed to
- the corresponding loop function in the func array.
-
- :param types:
- Length ``(nin + nout) * ntypes`` array of ``char`` encoding the
- `numpy.dtype.num` (built-in only) that the corresponding
- function in the ``func`` array accepts. For instance, for a comparison
- ufunc with three ``ntypes``, two ``nin`` and one ``nout``, where the
- first function accepts `numpy.int32` and the the second
- `numpy.int64`, with both returning `numpy.bool_`, ``types`` would
- be ``(char[]) {5, 5, 0, 7, 7, 0}`` since ``NPY_INT32`` is 5,
- ``NPY_INT64`` is 7, and ``NPY_BOOL`` is 0.
-
- The bit-width names can also be used (e.g. :c:data:`NPY_INT32`,
- :c:data:`NPY_COMPLEX128` ) if desired.
-
- :ref:`ufuncs.casting` will be used at runtime to find the first
- ``func`` callable by the input/output provided.
-
- :param ntypes:
- How many different data-type-specific functions the ufunc has implemented.
-
- :param nin:
- The number of inputs to this operation.
-
- :param nout:
- The number of outputs
-
- :param identity:
-
- Either :c:data:`PyUFunc_One`, :c:data:`PyUFunc_Zero`,
- :c:data:`PyUFunc_MinusOne`, or :c:data:`PyUFunc_None`.
- This specifies what should be returned when
- an empty array is passed to the reduce method of the ufunc.
- The special value :c:data:`PyUFunc_IdentityValue` may only be used with
- the :c:func:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity` method, to
- allow an arbitrary python object to be used as the identity.
-
- :param name:
- The name for the ufunc as a ``NULL`` terminated string. Specifying
- a name of 'add' or 'multiply' enables a special behavior for
- integer-typed reductions when no dtype is given. If the input type is an
- integer (or boolean) data type smaller than the size of the `numpy.int_`
- data type, it will be internally upcast to the `numpy.int_` (or
- `numpy.uint`) data type.
-
- :param doc:
- Allows passing in a documentation string to be stored with the
- ufunc. The documentation string should not contain the name
- of the function or the calling signature as that will be
- dynamically determined from the object and available when
- accessing the **__doc__** attribute of the ufunc.
-
- :param unused:
- Unused and present for backwards compatibility of the C-API.
-
-.. c:function:: PyObject* PyUFunc_FromFuncAndDataAndSignature( \
- PyUFuncGenericFunction* func, void** data, char* types, int ntypes, \
- int nin, int nout, int identity, char* name, char* doc, int unused, char *signature)
-
- This function is very similar to PyUFunc_FromFuncAndData above, but has
- an extra *signature* argument, to define a
- :ref:`generalized universal functions <c-api.generalized-ufuncs>`.
- Similarly to how ufuncs are built around an element-by-element operation,
- gufuncs are around subarray-by-subarray operations, the
- :ref:`signature <details-of-signature>` defining the subarrays to operate on.
-
- :param signature:
- The signature for the new gufunc. Setting it to NULL is equivalent
- to calling PyUFunc_FromFuncAndData. A copy of the string is made,
- so the passed in buffer can be freed.
-
-.. c:function:: PyObject* PyUFunc_FromFuncAndDataAndSignatureAndIdentity(
- PyUFuncGenericFunction *func, void **data, char *types, int ntypes, \
- int nin, int nout, int identity, char *name, char *doc, int unused, char *signature,
- PyObject *identity_value)
-
- This function is very similar to `PyUFunc_FromFuncAndDataAndSignature` above,
- but has an extra *identity_value* argument, to define an arbitrary identity
- for the ufunc when ``identity`` is passed as ``PyUFunc_IdentityValue``.
-
- :param identity_value:
- The identity for the new gufunc. Must be passed as ``NULL`` unless the
- ``identity`` argument is ``PyUFunc_IdentityValue``. Setting it to NULL
- is equivalent to calling PyUFunc_FromFuncAndDataAndSignature.
-
-
-.. c:function:: int PyUFunc_RegisterLoopForType( \
- PyUFuncObject* ufunc, int usertype, PyUFuncGenericFunction function, \
- int* arg_types, void* data)
-
- This function allows the user to register a 1-d loop with an
- already- created ufunc to be used whenever the ufunc is called
- with any of its input arguments as the user-defined
- data-type. This is needed in order to make ufuncs work with
- built-in data-types. The data-type must have been previously
- registered with the numpy system. The loop is passed in as
- *function*. This loop can take arbitrary data which should be
- passed in as *data*. The data-types the loop requires are passed
- in as *arg_types* which must be a pointer to memory at least as
- large as ufunc->nargs.
-
-.. c:function:: int PyUFunc_RegisterLoopForDescr( \
- PyUFuncObject* ufunc, PyArray_Descr* userdtype, \
- PyUFuncGenericFunction function, PyArray_Descr** arg_dtypes, void* data)
-
- This function behaves like PyUFunc_RegisterLoopForType above, except
- that it allows the user to register a 1-d loop using PyArray_Descr
- objects instead of dtype type num values. This allows a 1-d loop to be
- registered for structured array data-dtypes and custom data-types
- instead of scalar data-types.
-
-.. c:function:: int PyUFunc_ReplaceLoopBySignature( \
- PyUFuncObject* ufunc, PyUFuncGenericFunction newfunc, int* signature, \
- PyUFuncGenericFunction* oldfunc)
-
- Replace a 1-d loop matching the given *signature* in the
- already-created *ufunc* with the new 1-d loop newfunc. Return the
- old 1-d loop function in *oldfunc*. Return 0 on success and -1 on
- failure. This function works only with built-in types (use
- :c:func:`PyUFunc_RegisterLoopForType` for user-defined types). A
- signature is an array of data-type numbers indicating the inputs
- followed by the outputs assumed by the 1-d loop.
-
-.. c:function:: int PyUFunc_GenericFunction( \
- PyUFuncObject* self, PyObject* args, PyObject* kwds, PyArrayObject** mps)
-
- A generic ufunc call. The ufunc is passed in as *self*, the arguments
- to the ufunc as *args* and *kwds*. The *mps* argument is an array of
- :c:type:`PyArrayObject` pointers whose values are discarded and which
- receive the converted input arguments as well as the ufunc outputs
- when success is returned. The user is responsible for managing this
- array and receives a new reference for each array in *mps*. The total
- number of arrays in *mps* is given by *self* ->nin + *self* ->nout.
-
- Returns 0 on success, -1 on error.
-
-.. c:function:: int PyUFunc_checkfperr(int errmask, PyObject* errobj)
-
- A simple interface to the IEEE error-flag checking support. The
- *errmask* argument is a mask of :c:data:`UFUNC_MASK_{ERR}` bitmasks
- indicating which errors to check for (and how to check for
- them). The *errobj* must be a Python tuple with two elements: a
- string containing the name which will be used in any communication
- of error and either a callable Python object (call-back function)
- or :c:data:`Py_None`. The callable object will only be used if
- :c:data:`UFUNC_ERR_CALL` is set as the desired error checking
- method. This routine manages the GIL and is safe to call even
- after releasing the GIL. If an error in the IEEE-compatible
- hardware is determined a -1 is returned, otherwise a 0 is
- returned.
-
-.. c:function:: void PyUFunc_clearfperr()
-
- Clear the IEEE error flags.
-
-.. c:function:: void PyUFunc_GetPyValues( \
- char* name, int* bufsize, int* errmask, PyObject** errobj)
-
- Get the Python values used for ufunc processing from the
- thread-local storage area unless the defaults have been set in
- which case the name lookup is bypassed. The name is placed as a
- string in the first element of *\*errobj*. The second element is
- the looked-up function to call on error callback. The value of the
- looked-up buffer-size to use is passed into *bufsize*, and the
- value of the error mask is placed into *errmask*.
-
-
-Generic functions
------------------
-
-At the core of every ufunc is a collection of type-specific functions
-that defines the basic functionality for each of the supported types.
-These functions must evaluate the underlying function :math:`N\geq1`
-times. Extra-data may be passed in that may be used during the
-calculation. This feature allows some general functions to be used as
-these basic looping functions. The general function has all the code
-needed to point variables to the right place and set up a function
-call. The general function assumes that the actual function to call is
-passed in as the extra data and calls it with the correct values. All
-of these functions are suitable for placing directly in the array of
-functions stored in the functions member of the PyUFuncObject
-structure.
-
-.. c:function:: void PyUFunc_f_f_As_d_d( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_d_d( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_f_f( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_g_g( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_F_F_As_D_D( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_F_F( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_D_D( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_G_G( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_e_e( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_e_e_As_f_f( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_e_e_As_d_d( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
- Type specific, core 1-d functions for ufuncs where each
- calculation is obtained by calling a function taking one input
- argument and returning one output. This function is passed in
- ``func``. The letters correspond to dtypechar's of the supported
- data types ( ``e`` - half, ``f`` - float, ``d`` - double,
- ``g`` - long double, ``F`` - cfloat, ``D`` - cdouble,
- ``G`` - clongdouble). The argument *func* must support the same
- signature. The _As_X_X variants assume ndarray's of one data type
- but cast the values to use an underlying function that takes a
- different data type. Thus, :c:func:`PyUFunc_f_f_As_d_d` uses
- ndarrays of data type :c:data:`NPY_FLOAT` but calls out to a
- C-function that takes double and returns double.
-
-.. c:function:: void PyUFunc_ff_f_As_dd_d( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_ff_f( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_dd_d( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_gg_g( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_FF_F_As_DD_D( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_DD_D( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_FF_F( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_GG_G( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_ee_e( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_ee_e_As_ff_f( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_ee_e_As_dd_d( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
- Type specific, core 1-d functions for ufuncs where each
- calculation is obtained by calling a function taking two input
- arguments and returning one output. The underlying function to
- call is passed in as *func*. The letters correspond to
- dtypechar's of the specific data type supported by the
- general-purpose function. The argument ``func`` must support the
- corresponding signature. The ``_As_XX_X`` variants assume ndarrays
- of one data type but cast the values at each iteration of the loop
- to use the underlying function that takes a different data type.
-
-.. c:function:: void PyUFunc_O_O( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
-.. c:function:: void PyUFunc_OO_O( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
- One-input, one-output, and two-input, one-output core 1-d functions
- for the :c:data:`NPY_OBJECT` data type. These functions handle reference
- count issues and return early on error. The actual function to call is
- *func* and it must accept calls with the signature ``(PyObject*)
- (PyObject*)`` for :c:func:`PyUFunc_O_O` or ``(PyObject*)(PyObject *,
- PyObject *)`` for :c:func:`PyUFunc_OO_O`.
-
-.. c:function:: void PyUFunc_O_O_method( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
- This general purpose 1-d core function assumes that *func* is a string
- representing a method of the input object. For each
- iteration of the loop, the Python object is extracted from the array
- and its *func* method is called returning the result to the output array.
-
-.. c:function:: void PyUFunc_OO_O_method( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
- This general purpose 1-d core function assumes that *func* is a
- string representing a method of the input object that takes one
- argument. The first argument in *args* is the method whose function is
- called, the second argument in *args* is the argument passed to the
- function. The output of the function is stored in the third entry
- of *args*.
-
-.. c:function:: void PyUFunc_On_Om( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* func)
-
- This is the 1-d core function used by the dynamic ufuncs created
- by umath.frompyfunc(function, nin, nout). In this case *func* is a
- pointer to a :c:type:`PyUFunc_PyFuncData` structure which has definition
-
- .. c:type:: PyUFunc_PyFuncData
-
- .. code-block:: c
-
- typedef struct {
- int nin;
- int nout;
- PyObject *callable;
- } PyUFunc_PyFuncData;
-
- At each iteration of the loop, the *nin* input objects are extracted
- from their object arrays and placed into an argument tuple, the Python
- *callable* is called with the input arguments, and the nout
- outputs are placed into their object arrays.
-
-
-Importing the API
------------------
-
-.. c:var:: PY_UFUNC_UNIQUE_SYMBOL
-
-.. c:var:: NO_IMPORT_UFUNC
-
-.. c:function:: void import_ufunc(void)
-
- These are the constants and functions for accessing the ufunc
- C-API from extension modules in precisely the same way as the
- array C-API can be accessed. The ``import_ufunc`` () function must
- always be called (in the initialization subroutine of the
- extension module). If your extension module is in one file then
- that is all that is required. The other two constants are useful
- if your extension module makes use of multiple files. In that
- case, define :c:data:`PY_UFUNC_UNIQUE_SYMBOL` to something unique to
- your code and then in source files that do not contain the module
- initialization function but still need access to the UFUNC API,
- define :c:data:`PY_UFUNC_UNIQUE_SYMBOL` to the same name used previously
- and also define :c:data:`NO_IMPORT_UFUNC`.
-
- The C-API is actually an array of function pointers. This array is
- created (and pointed to by a global variable) by import_ufunc. The
- global variable is either statically defined or allowed to be seen
- by other files depending on the state of
- :c:data:`PY_UFUNC_UNIQUE_SYMBOL` and :c:data:`NO_IMPORT_UFUNC`.
-
-.. index::
- pair: ufunc; C-API
--- /dev/null
+Array API
+=========
+
+.. sectionauthor:: Travis E. Oliphant
+
+| The test of a first-rate intelligence is the ability to hold two
+| opposed ideas in the mind at the same time, and still retain the
+| ability to function.
+| --- *F. Scott Fitzgerald*
+
+| For a successful technology, reality must take precedence over public
+| relations, for Nature cannot be fooled.
+| --- *Richard P. Feynman*
+
+.. index::
+ pair: ndarray; C-API
+ pair: C-API; array
+
+
+Array structure and data access
+-------------------------------
+
+These macros access the :c:type:`PyArrayObject` structure members and are
+defined in ``ndarraytypes.h``. The input argument, *arr*, can be any
+:c:type:`PyObject *<PyObject>` that is directly interpretable as a
+:c:type:`PyArrayObject *` (any instance of the :c:data:`PyArray_Type`
+and itssub-types).
+
+.. c:function:: int PyArray_NDIM(PyArrayObject *arr)
+
+ The number of dimensions in the array.
+
+.. c:function:: int PyArray_FLAGS(PyArrayObject* arr)
+
+ Returns an integer representing the :ref:`array-flags<array-flags>`.
+
+.. c:function:: int PyArray_TYPE(PyArrayObject* arr)
+
+ Return the (builtin) typenumber for the elements of this array.
+
+.. c:function:: int PyArray_SETITEM( \
+ PyArrayObject* arr, void* itemptr, PyObject* obj)
+
+ Convert obj and place it in the ndarray, *arr*, at the place
+ pointed to by itemptr. Return -1 if an error occurs or 0 on
+ success.
+
+.. c:function:: void PyArray_ENABLEFLAGS(PyArrayObject* arr, int flags)
+
+ .. versionadded:: 1.7
+
+ Enables the specified array flags. This function does no validation,
+ and assumes that you know what you're doing.
+
+.. c:function:: void PyArray_CLEARFLAGS(PyArrayObject* arr, int flags)
+
+ .. versionadded:: 1.7
+
+ Clears the specified array flags. This function does no validation,
+ and assumes that you know what you're doing.
+
+.. c:function:: void *PyArray_DATA(PyArrayObject *arr)
+
+.. c:function:: char *PyArray_BYTES(PyArrayObject *arr)
+
+ These two macros are similar and obtain the pointer to the
+ data-buffer for the array. The first macro can (and should be)
+ assigned to a particular pointer where the second is for generic
+ processing. If you have not guaranteed a contiguous and/or aligned
+ array then be sure you understand how to access the data in the
+ array to avoid memory and/or alignment problems.
+
+.. c:function:: npy_intp *PyArray_DIMS(PyArrayObject *arr)
+
+ Returns a pointer to the dimensions/shape of the array. The
+ number of elements matches the number of dimensions
+ of the array. Can return ``NULL`` for 0-dimensional arrays.
+
+.. c:function:: npy_intp *PyArray_SHAPE(PyArrayObject *arr)
+
+ .. versionadded:: 1.7
+
+ A synonym for :c:func:`PyArray_DIMS`, named to be consistent with the
+ `shape <numpy.ndarray.shape>` usage within Python.
+
+.. c:function:: npy_intp *PyArray_STRIDES(PyArrayObject* arr)
+
+ Returns a pointer to the strides of the array. The
+ number of elements matches the number of dimensions
+ of the array.
+
+.. c:function:: npy_intp PyArray_DIM(PyArrayObject* arr, int n)
+
+ Return the shape in the *n* :math:`^{\textrm{th}}` dimension.
+
+.. c:function:: npy_intp PyArray_STRIDE(PyArrayObject* arr, int n)
+
+ Return the stride in the *n* :math:`^{\textrm{th}}` dimension.
+
+.. c:function:: npy_intp PyArray_ITEMSIZE(PyArrayObject* arr)
+
+ Return the itemsize for the elements of this array.
+
+ Note that, in the old API that was deprecated in version 1.7, this function
+ had the return type ``int``.
+
+.. c:function:: npy_intp PyArray_SIZE(PyArrayObject* arr)
+
+ Returns the total size (in number of elements) of the array.
+
+.. c:function:: npy_intp PyArray_Size(PyArrayObject* obj)
+
+ Returns 0 if *obj* is not a sub-class of ndarray. Otherwise,
+ returns the total number of elements in the array. Safer version
+ of :c:func:`PyArray_SIZE` (*obj*).
+
+.. c:function:: npy_intp PyArray_NBYTES(PyArrayObject* arr)
+
+ Returns the total number of bytes consumed by the array.
+
+.. c:function:: PyObject *PyArray_BASE(PyArrayObject* arr)
+
+ This returns the base object of the array. In most cases, this
+ means the object which owns the memory the array is pointing at.
+
+ If you are constructing an array using the C API, and specifying
+ your own memory, you should use the function :c:func:`PyArray_SetBaseObject`
+ to set the base to an object which owns the memory.
+
+ If the (deprecated) :c:data:`NPY_ARRAY_UPDATEIFCOPY` or the
+ :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flags are set, it has a different
+ meaning, namely base is the array into which the current array will
+ be copied upon copy resolution. This overloading of the base property
+ for two functions is likely to change in a future version of NumPy.
+
+.. c:function:: PyArray_Descr *PyArray_DESCR(PyArrayObject* arr)
+
+ Returns a borrowed reference to the dtype property of the array.
+
+.. c:function:: PyArray_Descr *PyArray_DTYPE(PyArrayObject* arr)
+
+ .. versionadded:: 1.7
+
+ A synonym for PyArray_DESCR, named to be consistent with the
+ 'dtype' usage within Python.
+
+.. c:function:: PyObject *PyArray_GETITEM(PyArrayObject* arr, void* itemptr)
+
+ Get a Python object of a builtin type from the ndarray, *arr*,
+ at the location pointed to by itemptr. Return ``NULL`` on failure.
+
+ `numpy.ndarray.item` is identical to PyArray_GETITEM.
+
+
+Data access
+^^^^^^^^^^^
+
+These functions and macros provide easy access to elements of the
+ndarray from C. These work for all arrays. You may need to take care
+when accessing the data in the array, however, if it is not in machine
+byte-order, misaligned, or not writeable. In other words, be sure to
+respect the state of the flags unless you know what you are doing, or
+have previously guaranteed an array that is writeable, aligned, and in
+machine byte-order using :c:func:`PyArray_FromAny`. If you wish to handle all
+types of arrays, the copyswap function for each type is useful for
+handling misbehaved arrays. Some platforms (e.g. Solaris) do not like
+misaligned data and will crash if you de-reference a misaligned
+pointer. Other platforms (e.g. x86 Linux) will just work more slowly
+with misaligned data.
+
+.. c:function:: void* PyArray_GetPtr(PyArrayObject* aobj, npy_intp* ind)
+
+ Return a pointer to the data of the ndarray, *aobj*, at the
+ N-dimensional index given by the c-array, *ind*, (which must be
+ at least *aobj* ->nd in size). You may want to typecast the
+ returned pointer to the data type of the ndarray.
+
+.. c:function:: void* PyArray_GETPTR1(PyArrayObject* obj, npy_intp i)
+
+.. c:function:: void* PyArray_GETPTR2( \
+ PyArrayObject* obj, npy_intp i, npy_intp j)
+
+.. c:function:: void* PyArray_GETPTR3( \
+ PyArrayObject* obj, npy_intp i, npy_intp j, npy_intp k)
+
+.. c:function:: void* PyArray_GETPTR4( \
+ PyArrayObject* obj, npy_intp i, npy_intp j, npy_intp k, npy_intp l)
+
+ Quick, inline access to the element at the given coordinates in
+ the ndarray, *obj*, which must have respectively 1, 2, 3, or 4
+ dimensions (this is not checked). The corresponding *i*, *j*,
+ *k*, and *l* coordinates can be any integer but will be
+ interpreted as ``npy_intp``. You may want to typecast the
+ returned pointer to the data type of the ndarray.
+
+
+Creating arrays
+---------------
+
+
+From scratch
+^^^^^^^^^^^^
+
+.. c:function:: PyObject* PyArray_NewFromDescr( \
+ PyTypeObject* subtype, PyArray_Descr* descr, int nd, npy_intp const* dims, \
+ npy_intp const* strides, void* data, int flags, PyObject* obj)
+
+ This function steals a reference to *descr*. The easiest way to get one
+ is using :c:func:`PyArray_DescrFromType`.
+
+ This is the main array creation function. Most new arrays are
+ created with this flexible function.
+
+ The returned object is an object of Python-type *subtype*, which
+ must be a subtype of :c:data:`PyArray_Type`. The array has *nd*
+ dimensions, described by *dims*. The data-type descriptor of the
+ new array is *descr*.
+
+ If *subtype* is of an array subclass instead of the base
+ :c:data:`&PyArray_Type<PyArray_Type>`, then *obj* is the object to pass to
+ the :obj:`~numpy.class.__array_finalize__` method of the subclass.
+
+ If *data* is ``NULL``, then new unitinialized memory will be allocated and
+ *flags* can be non-zero to indicate a Fortran-style contiguous array. Use
+ :c:func:`PyArray_FILLWBYTE` to initialize the memory.
+
+ If *data* is not ``NULL``, then it is assumed to point to the memory
+ to be used for the array and the *flags* argument is used as the
+ new flags for the array (except the state of :c:data:`NPY_ARRAY_OWNDATA`,
+ :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` and :c:data:`NPY_ARRAY_UPDATEIFCOPY`
+ flags of the new array will be reset).
+
+ In addition, if *data* is non-NULL, then *strides* can
+ also be provided. If *strides* is ``NULL``, then the array strides
+ are computed as C-style contiguous (default) or Fortran-style
+ contiguous (*flags* is nonzero for *data* = ``NULL`` or *flags* &
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS` is nonzero non-NULL *data*). Any
+ provided *dims* and *strides* are copied into newly allocated
+ dimension and strides arrays for the new array object.
+
+ :c:func:`PyArray_CheckStrides` can help verify non- ``NULL`` stride
+ information.
+
+ If ``data`` is provided, it must stay alive for the life of the array. One
+ way to manage this is through :c:func:`PyArray_SetBaseObject`
+
+.. c:function:: PyObject* PyArray_NewLikeArray( \
+ PyArrayObject* prototype, NPY_ORDER order, PyArray_Descr* descr, \
+ int subok)
+
+ .. versionadded:: 1.6
+
+ This function steals a reference to *descr* if it is not NULL.
+
+ This array creation routine allows for the convenient creation of
+ a new array matching an existing array's shapes and memory layout,
+ possibly changing the layout and/or data type.
+
+ When *order* is :c:data:`NPY_ANYORDER`, the result order is
+ :c:data:`NPY_FORTRANORDER` if *prototype* is a fortran array,
+ :c:data:`NPY_CORDER` otherwise. When *order* is
+ :c:data:`NPY_KEEPORDER`, the result order matches that of *prototype*, even
+ when the axes of *prototype* aren't in C or Fortran order.
+
+ If *descr* is NULL, the data type of *prototype* is used.
+
+ If *subok* is 1, the newly created array will use the sub-type of
+ *prototype* to create the new array, otherwise it will create a
+ base-class array.
+
+.. c:function:: PyObject* PyArray_New( \
+ PyTypeObject* subtype, int nd, npy_intp const* dims, int type_num, \
+ npy_intp const* strides, void* data, int itemsize, int flags, \
+ PyObject* obj)
+
+ This is similar to :c:func:`PyArray_NewFromDescr` (...) except you
+ specify the data-type descriptor with *type_num* and *itemsize*,
+ where *type_num* corresponds to a builtin (or user-defined)
+ type. If the type always has the same number of bytes, then
+ itemsize is ignored. Otherwise, itemsize specifies the particular
+ size of this array.
+
+
+
+.. warning::
+
+ If data is passed to :c:func:`PyArray_NewFromDescr` or :c:func:`PyArray_New`,
+ this memory must not be deallocated until the new array is
+ deleted. If this data came from another Python object, this can
+ be accomplished using :c:func:`Py_INCREF` on that object and setting the
+ base member of the new array to point to that object. If strides
+ are passed in they must be consistent with the dimensions, the
+ itemsize, and the data of the array.
+
+.. c:function:: PyObject* PyArray_SimpleNew(int nd, npy_intp const* dims, int typenum)
+
+ Create a new uninitialized array of type, *typenum*, whose size in
+ each of *nd* dimensions is given by the integer array, *dims*.The memory
+ for the array is uninitialized (unless typenum is :c:data:`NPY_OBJECT`
+ in which case each element in the array is set to NULL). The
+ *typenum* argument allows specification of any of the builtin
+ data-types such as :c:data:`NPY_FLOAT` or :c:data:`NPY_LONG`. The
+ memory for the array can be set to zero if desired using
+ :c:func:`PyArray_FILLWBYTE` (return_object, 0).This function cannot be
+ used to create a flexible-type array (no itemsize given).
+
+.. c:function:: PyObject* PyArray_SimpleNewFromData( \
+ int nd, npy_intp const* dims, int typenum, void* data)
+
+ Create an array wrapper around *data* pointed to by the given
+ pointer. The array flags will have a default that the data area is
+ well-behaved and C-style contiguous. The shape of the array is
+ given by the *dims* c-array of length *nd*. The data-type of the
+ array is indicated by *typenum*. If data comes from another
+ reference-counted Python object, the reference count on this object
+ should be increased after the pointer is passed in, and the base member
+ of the returned ndarray should point to the Python object that owns
+ the data. This will ensure that the provided memory is not
+ freed while the returned array is in existence. To free memory as soon
+ as the ndarray is deallocated, set the OWNDATA flag on the returned ndarray.
+
+.. c:function:: PyObject* PyArray_SimpleNewFromDescr( \
+ int nd, npy_int const* dims, PyArray_Descr* descr)
+
+ This function steals a reference to *descr*.
+
+ Create a new array with the provided data-type descriptor, *descr*,
+ of the shape determined by *nd* and *dims*.
+
+.. c:function:: PyArray_FILLWBYTE(PyObject* obj, int val)
+
+ Fill the array pointed to by *obj* ---which must be a (subclass
+ of) ndarray---with the contents of *val* (evaluated as a byte).
+ This macro calls memset, so obj must be contiguous.
+
+.. c:function:: PyObject* PyArray_Zeros( \
+ int nd, npy_intp const* dims, PyArray_Descr* dtype, int fortran)
+
+ Construct a new *nd* -dimensional array with shape given by *dims*
+ and data type given by *dtype*. If *fortran* is non-zero, then a
+ Fortran-order array is created, otherwise a C-order array is
+ created. Fill the memory with zeros (or the 0 object if *dtype*
+ corresponds to :c:type:`NPY_OBJECT` ).
+
+.. c:function:: PyObject* PyArray_ZEROS( \
+ int nd, npy_intp const* dims, int type_num, int fortran)
+
+ Macro form of :c:func:`PyArray_Zeros` which takes a type-number instead
+ of a data-type object.
+
+.. c:function:: PyObject* PyArray_Empty( \
+ int nd, npy_intp const* dims, PyArray_Descr* dtype, int fortran)
+
+ Construct a new *nd* -dimensional array with shape given by *dims*
+ and data type given by *dtype*. If *fortran* is non-zero, then a
+ Fortran-order array is created, otherwise a C-order array is
+ created. The array is uninitialized unless the data type
+ corresponds to :c:type:`NPY_OBJECT` in which case the array is
+ filled with :c:data:`Py_None`.
+
+.. c:function:: PyObject* PyArray_EMPTY( \
+ int nd, npy_intp const* dims, int typenum, int fortran)
+
+ Macro form of :c:func:`PyArray_Empty` which takes a type-number,
+ *typenum*, instead of a data-type object.
+
+.. c:function:: PyObject* PyArray_Arange( \
+ double start, double stop, double step, int typenum)
+
+ Construct a new 1-dimensional array of data-type, *typenum*, that
+ ranges from *start* to *stop* (exclusive) in increments of *step*
+ . Equivalent to **arange** (*start*, *stop*, *step*, dtype).
+
+.. c:function:: PyObject* PyArray_ArangeObj( \
+ PyObject* start, PyObject* stop, PyObject* step, PyArray_Descr* descr)
+
+ Construct a new 1-dimensional array of data-type determined by
+ ``descr``, that ranges from ``start`` to ``stop`` (exclusive) in
+ increments of ``step``. Equivalent to arange( ``start``,
+ ``stop``, ``step``, ``typenum`` ).
+
+.. c:function:: int PyArray_SetBaseObject(PyArrayObject* arr, PyObject* obj)
+
+ .. versionadded:: 1.7
+
+ This function **steals a reference** to ``obj`` and sets it as the
+ base property of ``arr``.
+
+ If you construct an array by passing in your own memory buffer as
+ a parameter, you need to set the array's `base` property to ensure
+ the lifetime of the memory buffer is appropriate.
+
+ The return value is 0 on success, -1 on failure.
+
+ If the object provided is an array, this function traverses the
+ chain of `base` pointers so that each array points to the owner
+ of the memory directly. Once the base is set, it may not be changed
+ to another value.
+
+From other objects
+^^^^^^^^^^^^^^^^^^
+
+.. c:function:: PyObject* PyArray_FromAny( \
+ PyObject* op, PyArray_Descr* dtype, int min_depth, int max_depth, \
+ int requirements, PyObject* context)
+
+ This is the main function used to obtain an array from any nested
+ sequence, or object that exposes the array interface, *op*. The
+ parameters allow specification of the required *dtype*, the
+ minimum (*min_depth*) and maximum (*max_depth*) number of
+ dimensions acceptable, and other *requirements* for the array. This
+ function **steals a reference** to the dtype argument, which needs
+ to be a :c:type:`PyArray_Descr` structure
+ indicating the desired data-type (including required
+ byteorder). The *dtype* argument may be ``NULL``, indicating that any
+ data-type (and byteorder) is acceptable. Unless
+ :c:data:`NPY_ARRAY_FORCECAST` is present in ``flags``,
+ this call will generate an error if the data
+ type cannot be safely obtained from the object. If you want to use
+ ``NULL`` for the *dtype* and ensure the array is notswapped then
+ use :c:func:`PyArray_CheckFromAny`. A value of 0 for either of the
+ depth parameters causes the parameter to be ignored. Any of the
+ following array flags can be added (*e.g.* using \|) to get the
+ *requirements* argument. If your code can handle general (*e.g.*
+ strided, byte-swapped, or unaligned arrays) then *requirements*
+ may be 0. Also, if *op* is not already an array (or does not
+ expose the array interface), then a new array will be created (and
+ filled from *op* using the sequence protocol). The new array will
+ have :c:data:`NPY_ARRAY_DEFAULT` as its flags member. The *context* argument
+ is passed to the :obj:`~numpy.class.__array__` method of *op* and is only used if
+ the array is constructed that way. Almost always this
+ parameter is ``NULL``.
+
+ .. c:var:: NPY_ARRAY_C_CONTIGUOUS
+
+ Make sure the returned array is C-style contiguous
+
+ .. c:var:: NPY_ARRAY_F_CONTIGUOUS
+
+ Make sure the returned array is Fortran-style contiguous.
+
+ .. c:var:: NPY_ARRAY_ALIGNED
+
+ Make sure the returned array is aligned on proper boundaries for its
+ data type. An aligned array has the data pointer and every strides
+ factor as a multiple of the alignment factor for the data-type-
+ descriptor.
+
+ .. c:var:: NPY_ARRAY_WRITEABLE
+
+ Make sure the returned array can be written to.
+
+ .. c:var:: NPY_ARRAY_ENSURECOPY
+
+ Make sure a copy is made of *op*. If this flag is not
+ present, data is not copied if it can be avoided.
+
+ .. c:var:: NPY_ARRAY_ENSUREARRAY
+
+ Make sure the result is a base-class ndarray. By
+ default, if *op* is an instance of a subclass of
+ ndarray, an instance of that same subclass is returned. If
+ this flag is set, an ndarray object will be returned instead.
+
+ .. c:var:: NPY_ARRAY_FORCECAST
+
+ Force a cast to the output type even if it cannot be done
+ safely. Without this flag, a data cast will occur only if it
+ can be done safely, otherwise an error is raised.
+
+ .. c:var:: NPY_ARRAY_WRITEBACKIFCOPY
+
+ If *op* is already an array, but does not satisfy the
+ requirements, then a copy is made (which will satisfy the
+ requirements). If this flag is present and a copy (of an object
+ that is already an array) must be made, then the corresponding
+ :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag is set in the returned
+ copy and *op* is made to be read-only. You must be sure to call
+ :c:func:`PyArray_ResolveWritebackIfCopy` to copy the contents
+ back into *op* and the *op* array
+ will be made writeable again. If *op* is not writeable to begin
+ with, or if it is not already an array, then an error is raised.
+
+ .. c:var:: NPY_ARRAY_UPDATEIFCOPY
+
+ Deprecated. Use :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, which is similar.
+ This flag "automatically" copies the data back when the returned
+ array is deallocated, which is not supported in all python
+ implementations.
+
+ .. c:var:: NPY_ARRAY_BEHAVED
+
+ :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE`
+
+ .. c:var:: NPY_ARRAY_CARRAY
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_BEHAVED`
+
+ .. c:var:: NPY_ARRAY_CARRAY_RO
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
+
+ .. c:var:: NPY_ARRAY_FARRAY
+
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_BEHAVED`
+
+ .. c:var:: NPY_ARRAY_FARRAY_RO
+
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
+
+ .. c:var:: NPY_ARRAY_DEFAULT
+
+ :c:data:`NPY_ARRAY_CARRAY`
+
+ .. c:var:: NPY_ARRAY_IN_ARRAY
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
+
+ .. c:var:: NPY_ARRAY_IN_FARRAY
+
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
+
+ .. c:var:: NPY_OUT_ARRAY
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
+ :c:data:`NPY_ARRAY_ALIGNED`
+
+ .. c:var:: NPY_ARRAY_OUT_ARRAY
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` \|
+ :c:data:`NPY_ARRAY_WRITEABLE`
+
+ .. c:var:: NPY_ARRAY_OUT_FARRAY
+
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
+ :c:data:`NPY_ARRAY_ALIGNED`
+
+ .. c:var:: NPY_ARRAY_INOUT_ARRAY
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
+ :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \|
+ :c:data:`NPY_ARRAY_UPDATEIFCOPY`
+
+ .. c:var:: NPY_ARRAY_INOUT_FARRAY
+
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
+ :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \|
+ :c:data:`NPY_ARRAY_UPDATEIFCOPY`
+
+.. c:function:: int PyArray_GetArrayParamsFromObject( \
+ PyObject* op, PyArray_Descr* requested_dtype, npy_bool writeable, \
+ PyArray_Descr** out_dtype, int* out_ndim, npy_intp* out_dims, \
+ PyArrayObject** out_arr, PyObject* context)
+
+ .. versionadded:: 1.6
+
+ Retrieves the array parameters for viewing/converting an arbitrary
+ PyObject* to a NumPy array. This allows the "innate type and shape"
+ of Python list-of-lists to be discovered without
+ actually converting to an array. PyArray_FromAny calls this function
+ to analyze its input.
+
+ In some cases, such as structured arrays and the :obj:`~numpy.class.__array__` interface,
+ a data type needs to be used to make sense of the object. When
+ this is needed, provide a Descr for 'requested_dtype', otherwise
+ provide NULL. This reference is not stolen. Also, if the requested
+ dtype doesn't modify the interpretation of the input, out_dtype will
+ still get the "innate" dtype of the object, not the dtype passed
+ in 'requested_dtype'.
+
+ If writing to the value in 'op' is desired, set the boolean
+ 'writeable' to 1. This raises an error when 'op' is a scalar, list
+ of lists, or other non-writeable 'op'. This differs from passing
+ :c:data:`NPY_ARRAY_WRITEABLE` to PyArray_FromAny, where the writeable array may
+ be a copy of the input.
+
+ When success (0 return value) is returned, either out_arr
+ is filled with a non-NULL PyArrayObject and
+ the rest of the parameters are untouched, or out_arr is
+ filled with NULL, and the rest of the parameters are filled.
+
+ Typical usage:
+
+ .. code-block:: c
+
+ PyArrayObject *arr = NULL;
+ PyArray_Descr *dtype = NULL;
+ int ndim = 0;
+ npy_intp dims[NPY_MAXDIMS];
+
+ if (PyArray_GetArrayParamsFromObject(op, NULL, 1, &dtype,
+ &ndim, &dims, &arr, NULL) < 0) {
+ return NULL;
+ }
+ if (arr == NULL) {
+ /*
+ ... validate/change dtype, validate flags, ndim, etc ...
+ Could make custom strides here too */
+ arr = PyArray_NewFromDescr(&PyArray_Type, dtype, ndim,
+ dims, NULL,
+ fortran ? NPY_ARRAY_F_CONTIGUOUS : 0,
+ NULL);
+ if (arr == NULL) {
+ return NULL;
+ }
+ if (PyArray_CopyObject(arr, op) < 0) {
+ Py_DECREF(arr);
+ return NULL;
+ }
+ }
+ else {
+ /*
+ ... in this case the other parameters weren't filled, just
+ validate and possibly copy arr itself ...
+ */
+ }
+ /*
+ ... use arr ...
+ */
+
+.. c:function:: PyObject* PyArray_CheckFromAny( \
+ PyObject* op, PyArray_Descr* dtype, int min_depth, int max_depth, \
+ int requirements, PyObject* context)
+
+ Nearly identical to :c:func:`PyArray_FromAny` (...) except
+ *requirements* can contain :c:data:`NPY_ARRAY_NOTSWAPPED` (over-riding the
+ specification in *dtype*) and :c:data:`NPY_ARRAY_ELEMENTSTRIDES` which
+ indicates that the array should be aligned in the sense that the
+ strides are multiples of the element size.
+
+ In versions 1.6 and earlier of NumPy, the following flags
+ did not have the _ARRAY_ macro namespace in them. That form
+ of the constant names is deprecated in 1.7.
+
+.. c:var:: NPY_ARRAY_NOTSWAPPED
+
+ Make sure the returned array has a data-type descriptor that is in
+ machine byte-order, over-riding any specification in the *dtype*
+ argument. Normally, the byte-order requirement is determined by
+ the *dtype* argument. If this flag is set and the dtype argument
+ does not indicate a machine byte-order descriptor (or is NULL and
+ the object is already an array with a data-type descriptor that is
+ not in machine byte- order), then a new data-type descriptor is
+ created and used with its byte-order field set to native.
+
+.. c:var:: NPY_ARRAY_BEHAVED_NS
+
+ :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE` \| :c:data:`NPY_ARRAY_NOTSWAPPED`
+
+.. c:var:: NPY_ARRAY_ELEMENTSTRIDES
+
+ Make sure the returned array has strides that are multiples of the
+ element size.
+
+.. c:function:: PyObject* PyArray_FromArray( \
+ PyArrayObject* op, PyArray_Descr* newtype, int requirements)
+
+ Special case of :c:func:`PyArray_FromAny` for when *op* is already an
+ array but it needs to be of a specific *newtype* (including
+ byte-order) or has certain *requirements*.
+
+.. c:function:: PyObject* PyArray_FromStructInterface(PyObject* op)
+
+ Returns an ndarray object from a Python object that exposes the
+ :obj:`__array_struct__` attribute and follows the array interface
+ protocol. If the object does not contain this attribute then a
+ borrowed reference to :c:data:`Py_NotImplemented` is returned.
+
+.. c:function:: PyObject* PyArray_FromInterface(PyObject* op)
+
+ Returns an ndarray object from a Python object that exposes the
+ :obj:`__array_interface__` attribute following the array interface
+ protocol. If the object does not contain this attribute then a
+ borrowed reference to :c:data:`Py_NotImplemented` is returned.
+
+.. c:function:: PyObject* PyArray_FromArrayAttr( \
+ PyObject* op, PyArray_Descr* dtype, PyObject* context)
+
+ Return an ndarray object from a Python object that exposes the
+ :obj:`~numpy.class.__array__` method. The :obj:`~numpy.class.__array__` method can take 0, 1, or 2
+ arguments ([dtype, context]) where *context* is used to pass
+ information about where the :obj:`~numpy.class.__array__` method is being called
+ from (currently only used in ufuncs).
+
+.. c:function:: PyObject* PyArray_ContiguousFromAny( \
+ PyObject* op, int typenum, int min_depth, int max_depth)
+
+ This function returns a (C-style) contiguous and behaved function
+ array from any nested sequence or array interface exporting
+ object, *op*, of (non-flexible) type given by the enumerated
+ *typenum*, of minimum depth *min_depth*, and of maximum depth
+ *max_depth*. Equivalent to a call to :c:func:`PyArray_FromAny` with
+ requirements set to :c:data:`NPY_ARRAY_DEFAULT` and the type_num member of the
+ type argument set to *typenum*.
+
+.. c:function:: PyObject *PyArray_FromObject( \
+ PyObject *op, int typenum, int min_depth, int max_depth)
+
+ Return an aligned and in native-byteorder array from any nested
+ sequence or array-interface exporting object, op, of a type given by
+ the enumerated typenum. The minimum number of dimensions the array can
+ have is given by min_depth while the maximum is max_depth. This is
+ equivalent to a call to :c:func:`PyArray_FromAny` with requirements set to
+ BEHAVED.
+
+.. c:function:: PyObject* PyArray_EnsureArray(PyObject* op)
+
+ This function **steals a reference** to ``op`` and makes sure that
+ ``op`` is a base-class ndarray. It special cases array scalars,
+ but otherwise calls :c:func:`PyArray_FromAny` ( ``op``, NULL, 0, 0,
+ :c:data:`NPY_ARRAY_ENSUREARRAY`, NULL).
+
+.. c:function:: PyObject* PyArray_FromString( \
+ char* string, npy_intp slen, PyArray_Descr* dtype, npy_intp num, \
+ char* sep)
+
+ Construct a one-dimensional ndarray of a single type from a binary
+ or (ASCII) text ``string`` of length ``slen``. The data-type of
+ the array to-be-created is given by ``dtype``. If num is -1, then
+ **copy** the entire string and return an appropriately sized
+ array, otherwise, ``num`` is the number of items to **copy** from
+ the string. If ``sep`` is NULL (or ""), then interpret the string
+ as bytes of binary data, otherwise convert the sub-strings
+ separated by ``sep`` to items of data-type ``dtype``. Some
+ data-types may not be readable in text mode and an error will be
+ raised if that occurs. All errors return NULL.
+
+.. c:function:: PyObject* PyArray_FromFile( \
+ FILE* fp, PyArray_Descr* dtype, npy_intp num, char* sep)
+
+ Construct a one-dimensional ndarray of a single type from a binary
+ or text file. The open file pointer is ``fp``, the data-type of
+ the array to be created is given by ``dtype``. This must match
+ the data in the file. If ``num`` is -1, then read until the end of
+ the file and return an appropriately sized array, otherwise,
+ ``num`` is the number of items to read. If ``sep`` is NULL (or
+ ""), then read from the file in binary mode, otherwise read from
+ the file in text mode with ``sep`` providing the item
+ separator. Some array types cannot be read in text mode in which
+ case an error is raised.
+
+.. c:function:: PyObject* PyArray_FromBuffer( \
+ PyObject* buf, PyArray_Descr* dtype, npy_intp count, npy_intp offset)
+
+ Construct a one-dimensional ndarray of a single type from an
+ object, ``buf``, that exports the (single-segment) buffer protocol
+ (or has an attribute __buffer\__ that returns an object that
+ exports the buffer protocol). A writeable buffer will be tried
+ first followed by a read- only buffer. The :c:data:`NPY_ARRAY_WRITEABLE`
+ flag of the returned array will reflect which one was
+ successful. The data is assumed to start at ``offset`` bytes from
+ the start of the memory location for the object. The type of the
+ data in the buffer will be interpreted depending on the data- type
+ descriptor, ``dtype.`` If ``count`` is negative then it will be
+ determined from the size of the buffer and the requested itemsize,
+ otherwise, ``count`` represents how many elements should be
+ converted from the buffer.
+
+.. c:function:: int PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src)
+
+ Copy from the source array, ``src``, into the destination array,
+ ``dest``, performing a data-type conversion if necessary. If an
+ error occurs return -1 (otherwise 0). The shape of ``src`` must be
+ broadcastable to the shape of ``dest``. The data areas of dest
+ and src must not overlap.
+
+.. c:function:: int PyArray_MoveInto(PyArrayObject* dest, PyArrayObject* src)
+
+ Move data from the source array, ``src``, into the destination
+ array, ``dest``, performing a data-type conversion if
+ necessary. If an error occurs return -1 (otherwise 0). The shape
+ of ``src`` must be broadcastable to the shape of ``dest``. The
+ data areas of dest and src may overlap.
+
+.. c:function:: PyArrayObject* PyArray_GETCONTIGUOUS(PyObject* op)
+
+ If ``op`` is already (C-style) contiguous and well-behaved then
+ just return a reference, otherwise return a (contiguous and
+ well-behaved) copy of the array. The parameter op must be a
+ (sub-class of an) ndarray and no checking for that is done.
+
+.. c:function:: PyObject* PyArray_FROM_O(PyObject* obj)
+
+ Convert ``obj`` to an ndarray. The argument can be any nested
+ sequence or object that exports the array interface. This is a
+ macro form of :c:func:`PyArray_FromAny` using ``NULL``, 0, 0, 0 for the
+ other arguments. Your code must be able to handle any data-type
+ descriptor and any combination of data-flags to use this macro.
+
+.. c:function:: PyObject* PyArray_FROM_OF(PyObject* obj, int requirements)
+
+ Similar to :c:func:`PyArray_FROM_O` except it can take an argument
+ of *requirements* indicating properties the resulting array must
+ have. Available requirements that can be enforced are
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS`, :c:data:`NPY_ARRAY_F_CONTIGUOUS`,
+ :c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_WRITEABLE`,
+ :c:data:`NPY_ARRAY_NOTSWAPPED`, :c:data:`NPY_ARRAY_ENSURECOPY`,
+ :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, :c:data:`NPY_ARRAY_UPDATEIFCOPY`,
+ :c:data:`NPY_ARRAY_FORCECAST`, and
+ :c:data:`NPY_ARRAY_ENSUREARRAY`. Standard combinations of flags can also
+ be used:
+
+.. c:function:: PyObject* PyArray_FROM_OT(PyObject* obj, int typenum)
+
+ Similar to :c:func:`PyArray_FROM_O` except it can take an argument of
+ *typenum* specifying the type-number the returned array.
+
+.. c:function:: PyObject* PyArray_FROM_OTF( \
+ PyObject* obj, int typenum, int requirements)
+
+ Combination of :c:func:`PyArray_FROM_OF` and :c:func:`PyArray_FROM_OT`
+ allowing both a *typenum* and a *flags* argument to be provided.
+
+.. c:function:: PyObject* PyArray_FROMANY( \
+ PyObject* obj, int typenum, int min, int max, int requirements)
+
+ Similar to :c:func:`PyArray_FromAny` except the data-type is
+ specified using a typenumber. :c:func:`PyArray_DescrFromType`
+ (*typenum*) is passed directly to :c:func:`PyArray_FromAny`. This
+ macro also adds :c:data:`NPY_ARRAY_DEFAULT` to requirements if
+ :c:data:`NPY_ARRAY_ENSURECOPY` is passed in as requirements.
+
+.. c:function:: PyObject *PyArray_CheckAxis( \
+ PyObject* obj, int* axis, int requirements)
+
+ Encapsulate the functionality of functions and methods that take
+ the axis= keyword and work properly with None as the axis
+ argument. The input array is ``obj``, while ``*axis`` is a
+ converted integer (so that >=MAXDIMS is the None value), and
+ ``requirements`` gives the needed properties of ``obj``. The
+ output is a converted version of the input so that requirements
+ are met and if needed a flattening has occurred. On output
+ negative values of ``*axis`` are converted and the new value is
+ checked to ensure consistency with the shape of ``obj``.
+
+
+Dealing with types
+------------------
+
+
+General check of Python Type
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. c:function:: PyArray_Check(PyObject *op)
+
+ Evaluates true if *op* is a Python object whose type is a sub-type
+ of :c:data:`PyArray_Type`.
+
+.. c:function:: PyArray_CheckExact(PyObject *op)
+
+ Evaluates true if *op* is a Python object with type
+ :c:data:`PyArray_Type`.
+
+.. c:function:: PyArray_HasArrayInterface(PyObject *op, PyObject *out)
+
+ If ``op`` implements any part of the array interface, then ``out``
+ will contain a new reference to the newly created ndarray using
+ the interface or ``out`` will contain ``NULL`` if an error during
+ conversion occurs. Otherwise, out will contain a borrowed
+ reference to :c:data:`Py_NotImplemented` and no error condition is set.
+
+.. c:function:: PyArray_HasArrayInterfaceType(op, type, context, out)
+
+ If ``op`` implements any part of the array interface, then ``out``
+ will contain a new reference to the newly created ndarray using
+ the interface or ``out`` will contain ``NULL`` if an error during
+ conversion occurs. Otherwise, out will contain a borrowed
+ reference to Py_NotImplemented and no error condition is set.
+ This version allows setting of the type and context in the part of
+ the array interface that looks for the :obj:`~numpy.class.__array__` attribute.
+
+.. c:function:: PyArray_IsZeroDim(op)
+
+ Evaluates true if *op* is an instance of (a subclass of)
+ :c:data:`PyArray_Type` and has 0 dimensions.
+
+.. c:function:: PyArray_IsScalar(op, cls)
+
+ Evaluates true if *op* is an instance of :c:data:`Py{cls}ArrType_Type`.
+
+.. c:function:: PyArray_CheckScalar(op)
+
+ Evaluates true if *op* is either an array scalar (an instance of a
+ sub-type of :c:data:`PyGenericArr_Type` ), or an instance of (a
+ sub-class of) :c:data:`PyArray_Type` whose dimensionality is 0.
+
+.. c:function:: PyArray_IsPythonNumber(op)
+
+ Evaluates true if *op* is an instance of a builtin numeric type (int,
+ float, complex, long, bool)
+
+.. c:function:: PyArray_IsPythonScalar(op)
+
+ Evaluates true if *op* is a builtin Python scalar object (int,
+ float, complex, str, unicode, long, bool).
+
+.. c:function:: PyArray_IsAnyScalar(op)
+
+ Evaluates true if *op* is either a Python scalar object (see
+ :c:func:`PyArray_IsPythonScalar`) or an array scalar (an instance of a sub-
+ type of :c:data:`PyGenericArr_Type` ).
+
+.. c:function:: PyArray_CheckAnyScalar(op)
+
+ Evaluates true if *op* is a Python scalar object (see
+ :c:func:`PyArray_IsPythonScalar`), an array scalar (an instance of a
+ sub-type of :c:data:`PyGenericArr_Type`) or an instance of a sub-type of
+ :c:data:`PyArray_Type` whose dimensionality is 0.
+
+
+Data-type checking
+^^^^^^^^^^^^^^^^^^
+
+For the typenum macros, the argument is an integer representing an
+enumerated array data type. For the array type checking macros the
+argument must be a :c:type:`PyObject *<PyObject>` that can be directly interpreted as a
+:c:type:`PyArrayObject *`.
+
+.. c:function:: PyTypeNum_ISUNSIGNED(int num)
+
+.. c:function:: PyDataType_ISUNSIGNED(PyArray_Descr *descr)
+
+.. c:function:: PyArray_ISUNSIGNED(PyArrayObject *obj)
+
+ Type represents an unsigned integer.
+
+.. c:function:: PyTypeNum_ISSIGNED(int num)
+
+.. c:function:: PyDataType_ISSIGNED(PyArray_Descr *descr)
+
+.. c:function:: PyArray_ISSIGNED(PyArrayObject *obj)
+
+ Type represents a signed integer.
+
+.. c:function:: PyTypeNum_ISINTEGER(int num)
+
+.. c:function:: PyDataType_ISINTEGER(PyArray_Descr* descr)
+
+.. c:function:: PyArray_ISINTEGER(PyArrayObject *obj)
+
+ Type represents any integer.
+
+.. c:function:: PyTypeNum_ISFLOAT(int num)
+
+.. c:function:: PyDataType_ISFLOAT(PyArray_Descr* descr)
+
+.. c:function:: PyArray_ISFLOAT(PyArrayObject *obj)
+
+ Type represents any floating point number.
+
+.. c:function:: PyTypeNum_ISCOMPLEX(int num)
+
+.. c:function:: PyDataType_ISCOMPLEX(PyArray_Descr* descr)
+
+.. c:function:: PyArray_ISCOMPLEX(PyArrayObject *obj)
+
+ Type represents any complex floating point number.
+
+.. c:function:: PyTypeNum_ISNUMBER(int num)
+
+.. c:function:: PyDataType_ISNUMBER(PyArray_Descr* descr)
+
+.. c:function:: PyArray_ISNUMBER(PyArrayObject *obj)
+
+ Type represents any integer, floating point, or complex floating point
+ number.
+
+.. c:function:: PyTypeNum_ISSTRING(int num)
+
+.. c:function:: PyDataType_ISSTRING(PyArray_Descr* descr)
+
+.. c:function:: PyArray_ISSTRING(PyArrayObject *obj)
+
+ Type represents a string data type.
+
+.. c:function:: PyTypeNum_ISPYTHON(int num)
+
+.. c:function:: PyDataType_ISPYTHON(PyArray_Descr* descr)
+
+.. c:function:: PyArray_ISPYTHON(PyArrayObject *obj)
+
+ Type represents an enumerated type corresponding to one of the
+ standard Python scalar (bool, int, float, or complex).
+
+.. c:function:: PyTypeNum_ISFLEXIBLE(int num)
+
+.. c:function:: PyDataType_ISFLEXIBLE(PyArray_Descr* descr)
+
+.. c:function:: PyArray_ISFLEXIBLE(PyArrayObject *obj)
+
+ Type represents one of the flexible array types ( :c:data:`NPY_STRING`,
+ :c:data:`NPY_UNICODE`, or :c:data:`NPY_VOID` ).
+
+.. c:function:: PyDataType_ISUNSIZED(PyArray_Descr* descr):
+
+ Type has no size information attached, and can be resized. Should only be
+ called on flexible dtypes. Types that are attached to an array will always
+ be sized, hence the array form of this macro not existing.
+
+ .. versionchanged:: 1.18
+
+ For structured datatypes with no fields this function now returns False.
+
+.. c:function:: PyTypeNum_ISUSERDEF(int num)
+
+.. c:function:: PyDataType_ISUSERDEF(PyArray_Descr* descr)
+
+.. c:function:: PyArray_ISUSERDEF(PyArrayObject *obj)
+
+ Type represents a user-defined type.
+
+.. c:function:: PyTypeNum_ISEXTENDED(int num)
+
+.. c:function:: PyDataType_ISEXTENDED(PyArray_Descr* descr)
+
+.. c:function:: PyArray_ISEXTENDED(PyArrayObject *obj)
+
+ Type is either flexible or user-defined.
+
+.. c:function:: PyTypeNum_ISOBJECT(int num)
+
+.. c:function:: PyDataType_ISOBJECT(PyArray_Descr* descr)
+
+.. c:function:: PyArray_ISOBJECT(PyArrayObject *obj)
+
+ Type represents object data type.
+
+.. c:function:: PyTypeNum_ISBOOL(int num)
+
+.. c:function:: PyDataType_ISBOOL(PyArray_Descr* descr)
+
+.. c:function:: PyArray_ISBOOL(PyArrayObject *obj)
+
+ Type represents Boolean data type.
+
+.. c:function:: PyDataType_HASFIELDS(PyArray_Descr* descr)
+
+.. c:function:: PyArray_HASFIELDS(PyArrayObject *obj)
+
+ Type has fields associated with it.
+
+.. c:function:: PyArray_ISNOTSWAPPED(m)
+
+ Evaluates true if the data area of the ndarray *m* is in machine
+ byte-order according to the array's data-type descriptor.
+
+.. c:function:: PyArray_ISBYTESWAPPED(m)
+
+ Evaluates true if the data area of the ndarray *m* is **not** in
+ machine byte-order according to the array's data-type descriptor.
+
+.. c:function:: Bool PyArray_EquivTypes( \
+ PyArray_Descr* type1, PyArray_Descr* type2)
+
+ Return :c:data:`NPY_TRUE` if *type1* and *type2* actually represent
+ equivalent types for this platform (the fortran member of each
+ type is ignored). For example, on 32-bit platforms,
+ :c:data:`NPY_LONG` and :c:data:`NPY_INT` are equivalent. Otherwise
+ return :c:data:`NPY_FALSE`.
+
+.. c:function:: Bool PyArray_EquivArrTypes( \
+ PyArrayObject* a1, PyArrayObject * a2)
+
+ Return :c:data:`NPY_TRUE` if *a1* and *a2* are arrays with equivalent
+ types for this platform.
+
+.. c:function:: Bool PyArray_EquivTypenums(int typenum1, int typenum2)
+
+ Special case of :c:func:`PyArray_EquivTypes` (...) that does not accept
+ flexible data types but may be easier to call.
+
+.. c:function:: int PyArray_EquivByteorders({byteorder} b1, {byteorder} b2)
+
+ True if byteorder characters ( :c:data:`NPY_LITTLE`,
+ :c:data:`NPY_BIG`, :c:data:`NPY_NATIVE`, :c:data:`NPY_IGNORE` ) are
+ either equal or equivalent as to their specification of a native
+ byte order. Thus, on a little-endian machine :c:data:`NPY_LITTLE`
+ and :c:data:`NPY_NATIVE` are equivalent where they are not
+ equivalent on a big-endian machine.
+
+
+Converting data types
+^^^^^^^^^^^^^^^^^^^^^
+
+.. c:function:: PyObject* PyArray_Cast(PyArrayObject* arr, int typenum)
+
+ Mainly for backwards compatibility to the Numeric C-API and for
+ simple casts to non-flexible types. Return a new array object with
+ the elements of *arr* cast to the data-type *typenum* which must
+ be one of the enumerated types and not a flexible type.
+
+.. c:function:: PyObject* PyArray_CastToType( \
+ PyArrayObject* arr, PyArray_Descr* type, int fortran)
+
+ Return a new array of the *type* specified, casting the elements
+ of *arr* as appropriate. The fortran argument specifies the
+ ordering of the output array.
+
+.. c:function:: int PyArray_CastTo(PyArrayObject* out, PyArrayObject* in)
+
+ As of 1.6, this function simply calls :c:func:`PyArray_CopyInto`,
+ which handles the casting.
+
+ Cast the elements of the array *in* into the array *out*. The
+ output array should be writeable, have an integer-multiple of the
+ number of elements in the input array (more than one copy can be
+ placed in out), and have a data type that is one of the builtin
+ types. Returns 0 on success and -1 if an error occurs.
+
+.. c:function:: PyArray_VectorUnaryFunc* PyArray_GetCastFunc( \
+ PyArray_Descr* from, int totype)
+
+ Return the low-level casting function to cast from the given
+ descriptor to the builtin type number. If no casting function
+ exists return ``NULL`` and set an error. Using this function
+ instead of direct access to *from* ->f->cast will allow support of
+ any user-defined casting functions added to a descriptors casting
+ dictionary.
+
+.. c:function:: int PyArray_CanCastSafely(int fromtype, int totype)
+
+ Returns non-zero if an array of data type *fromtype* can be cast
+ to an array of data type *totype* without losing information. An
+ exception is that 64-bit integers are allowed to be cast to 64-bit
+ floating point values even though this can lose precision on large
+ integers so as not to proliferate the use of long doubles without
+ explicit requests. Flexible array types are not checked according
+ to their lengths with this function.
+
+.. c:function:: int PyArray_CanCastTo( \
+ PyArray_Descr* fromtype, PyArray_Descr* totype)
+
+ :c:func:`PyArray_CanCastTypeTo` supersedes this function in
+ NumPy 1.6 and later.
+
+ Equivalent to PyArray_CanCastTypeTo(fromtype, totype, NPY_SAFE_CASTING).
+
+.. c:function:: int PyArray_CanCastTypeTo( \
+ PyArray_Descr* fromtype, PyArray_Descr* totype, NPY_CASTING casting)
+
+ .. versionadded:: 1.6
+
+ Returns non-zero if an array of data type *fromtype* (which can
+ include flexible types) can be cast safely to an array of data
+ type *totype* (which can include flexible types) according to
+ the casting rule *casting*. For simple types with :c:data:`NPY_SAFE_CASTING`,
+ this is basically a wrapper around :c:func:`PyArray_CanCastSafely`, but
+ for flexible types such as strings or unicode, it produces results
+ taking into account their sizes. Integer and float types can only be cast
+ to a string or unicode type using :c:data:`NPY_SAFE_CASTING` if the string
+ or unicode type is big enough to hold the max value of the integer/float
+ type being cast from.
+
+.. c:function:: int PyArray_CanCastArrayTo( \
+ PyArrayObject* arr, PyArray_Descr* totype, NPY_CASTING casting)
+
+ .. versionadded:: 1.6
+
+ Returns non-zero if *arr* can be cast to *totype* according
+ to the casting rule given in *casting*. If *arr* is an array
+ scalar, its value is taken into account, and non-zero is also
+ returned when the value will not overflow or be truncated to
+ an integer when converting to a smaller type.
+
+ This is almost the same as the result of
+ PyArray_CanCastTypeTo(PyArray_MinScalarType(arr), totype, casting),
+ but it also handles a special case arising because the set
+ of uint values is not a subset of the int values for types with the
+ same number of bits.
+
+.. c:function:: PyArray_Descr* PyArray_MinScalarType(PyArrayObject* arr)
+
+ .. versionadded:: 1.6
+
+ If *arr* is an array, returns its data type descriptor, but if
+ *arr* is an array scalar (has 0 dimensions), it finds the data type
+ of smallest size to which the value may be converted
+ without overflow or truncation to an integer.
+
+ This function will not demote complex to float or anything to
+ boolean, but will demote a signed integer to an unsigned integer
+ when the scalar value is positive.
+
+.. c:function:: PyArray_Descr* PyArray_PromoteTypes( \
+ PyArray_Descr* type1, PyArray_Descr* type2)
+
+ .. versionadded:: 1.6
+
+ Finds the data type of smallest size and kind to which *type1* and
+ *type2* may be safely converted. This function is symmetric and
+ associative. A string or unicode result will be the proper size for
+ storing the max value of the input types converted to a string or unicode.
+
+.. c:function:: PyArray_Descr* PyArray_ResultType( \
+ npy_intp narrs, PyArrayObject**arrs, npy_intp ndtypes, \
+ PyArray_Descr**dtypes)
+
+ .. versionadded:: 1.6
+
+ This applies type promotion to all the inputs,
+ using the NumPy rules for combining scalars and arrays, to
+ determine the output type of a set of operands. This is the
+ same result type that ufuncs produce. The specific algorithm
+ used is as follows.
+
+ Categories are determined by first checking which of boolean,
+ integer (int/uint), or floating point (float/complex) the maximum
+ kind of all the arrays and the scalars are.
+
+ If there are only scalars or the maximum category of the scalars
+ is higher than the maximum category of the arrays,
+ the data types are combined with :c:func:`PyArray_PromoteTypes`
+ to produce the return value.
+
+ Otherwise, PyArray_MinScalarType is called on each array, and
+ the resulting data types are all combined with
+ :c:func:`PyArray_PromoteTypes` to produce the return value.
+
+ The set of int values is not a subset of the uint values for types
+ with the same number of bits, something not reflected in
+ :c:func:`PyArray_MinScalarType`, but handled as a special case in
+ PyArray_ResultType.
+
+.. c:function:: int PyArray_ObjectType(PyObject* op, int mintype)
+
+ This function is superceded by :c:func:`PyArray_MinScalarType` and/or
+ :c:func:`PyArray_ResultType`.
+
+ This function is useful for determining a common type that two or
+ more arrays can be converted to. It only works for non-flexible
+ array types as no itemsize information is passed. The *mintype*
+ argument represents the minimum type acceptable, and *op*
+ represents the object that will be converted to an array. The
+ return value is the enumerated typenumber that represents the
+ data-type that *op* should have.
+
+.. c:function:: void PyArray_ArrayType( \
+ PyObject* op, PyArray_Descr* mintype, PyArray_Descr* outtype)
+
+ This function is superceded by :c:func:`PyArray_ResultType`.
+
+ This function works similarly to :c:func:`PyArray_ObjectType` (...)
+ except it handles flexible arrays. The *mintype* argument can have
+ an itemsize member and the *outtype* argument will have an
+ itemsize member at least as big but perhaps bigger depending on
+ the object *op*.
+
+.. c:function:: PyArrayObject** PyArray_ConvertToCommonType( \
+ PyObject* op, int* n)
+
+ The functionality this provides is largely superceded by iterator
+ :c:type:`NpyIter` introduced in 1.6, with flag
+ :c:data:`NPY_ITER_COMMON_DTYPE` or with the same dtype parameter for
+ all operands.
+
+ Convert a sequence of Python objects contained in *op* to an array
+ of ndarrays each having the same data type. The type is selected
+ based on the typenumber (larger type number is chosen over a
+ smaller one) ignoring objects that are only scalars. The length of
+ the sequence is returned in *n*, and an *n* -length array of
+ :c:type:`PyArrayObject` pointers is the return value (or ``NULL`` if an
+ error occurs). The returned array must be freed by the caller of
+ this routine (using :c:func:`PyDataMem_FREE` ) and all the array objects
+ in it ``DECREF`` 'd or a memory-leak will occur. The example
+ template-code below shows a typically usage:
+
+ .. code-block:: c
+
+ mps = PyArray_ConvertToCommonType(obj, &n);
+ if (mps==NULL) return NULL;
+ {code}
+ <before return>
+ for (i=0; i<n; i++) Py_DECREF(mps[i]);
+ PyDataMem_FREE(mps);
+ {return}
+
+.. c:function:: char* PyArray_Zero(PyArrayObject* arr)
+
+ A pointer to newly created memory of size *arr* ->itemsize that
+ holds the representation of 0 for that type. The returned pointer,
+ *ret*, **must be freed** using :c:func:`PyDataMem_FREE` (ret) when it is
+ not needed anymore.
+
+.. c:function:: char* PyArray_One(PyArrayObject* arr)
+
+ A pointer to newly created memory of size *arr* ->itemsize that
+ holds the representation of 1 for that type. The returned pointer,
+ *ret*, **must be freed** using :c:func:`PyDataMem_FREE` (ret) when it
+ is not needed anymore.
+
+.. c:function:: int PyArray_ValidType(int typenum)
+
+ Returns :c:data:`NPY_TRUE` if *typenum* represents a valid type-number
+ (builtin or user-defined or character code). Otherwise, this
+ function returns :c:data:`NPY_FALSE`.
+
+
+New data types
+^^^^^^^^^^^^^^
+
+.. c:function:: void PyArray_InitArrFuncs(PyArray_ArrFuncs* f)
+
+ Initialize all function pointers and members to ``NULL``.
+
+.. c:function:: int PyArray_RegisterDataType(PyArray_Descr* dtype)
+
+ Register a data-type as a new user-defined data type for
+ arrays. The type must have most of its entries filled in. This is
+ not always checked and errors can produce segfaults. In
+ particular, the typeobj member of the ``dtype`` structure must be
+ filled with a Python type that has a fixed-size element-size that
+ corresponds to the elsize member of *dtype*. Also the ``f``
+ member must have the required functions: nonzero, copyswap,
+ copyswapn, getitem, setitem, and cast (some of the cast functions
+ may be ``NULL`` if no support is desired). To avoid confusion, you
+ should choose a unique character typecode but this is not enforced
+ and not relied on internally.
+
+ A user-defined type number is returned that uniquely identifies
+ the type. A pointer to the new structure can then be obtained from
+ :c:func:`PyArray_DescrFromType` using the returned type number. A -1 is
+ returned if an error occurs. If this *dtype* has already been
+ registered (checked only by the address of the pointer), then
+ return the previously-assigned type-number.
+
+.. c:function:: int PyArray_RegisterCastFunc( \
+ PyArray_Descr* descr, int totype, PyArray_VectorUnaryFunc* castfunc)
+
+ Register a low-level casting function, *castfunc*, to convert
+ from the data-type, *descr*, to the given data-type number,
+ *totype*. Any old casting function is over-written. A ``0`` is
+ returned on success or a ``-1`` on failure.
+
+.. c:function:: int PyArray_RegisterCanCast( \
+ PyArray_Descr* descr, int totype, NPY_SCALARKIND scalar)
+
+ Register the data-type number, *totype*, as castable from
+ data-type object, *descr*, of the given *scalar* kind. Use
+ *scalar* = :c:data:`NPY_NOSCALAR` to register that an array of data-type
+ *descr* can be cast safely to a data-type whose type_number is
+ *totype*.
+
+
+Special functions for NPY_OBJECT
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. c:function:: int PyArray_INCREF(PyArrayObject* op)
+
+ Used for an array, *op*, that contains any Python objects. It
+ increments the reference count of every object in the array
+ according to the data-type of *op*. A -1 is returned if an error
+ occurs, otherwise 0 is returned.
+
+.. c:function:: void PyArray_Item_INCREF(char* ptr, PyArray_Descr* dtype)
+
+ A function to INCREF all the objects at the location *ptr*
+ according to the data-type *dtype*. If *ptr* is the start of a
+ structured type with an object at any offset, then this will (recursively)
+ increment the reference count of all object-like items in the
+ structured type.
+
+.. c:function:: int PyArray_XDECREF(PyArrayObject* op)
+
+ Used for an array, *op*, that contains any Python objects. It
+ decrements the reference count of every object in the array
+ according to the data-type of *op*. Normal return value is 0. A
+ -1 is returned if an error occurs.
+
+.. c:function:: void PyArray_Item_XDECREF(char* ptr, PyArray_Descr* dtype)
+
+ A function to XDECREF all the object-like items at the location
+ *ptr* as recorded in the data-type, *dtype*. This works
+ recursively so that if ``dtype`` itself has fields with data-types
+ that contain object-like items, all the object-like fields will be
+ XDECREF ``'d``.
+
+.. c:function:: void PyArray_FillObjectArray(PyArrayObject* arr, PyObject* obj)
+
+ Fill a newly created array with a single value obj at all
+ locations in the structure with object data-types. No checking is
+ performed but *arr* must be of data-type :c:type:`NPY_OBJECT` and be
+ single-segment and uninitialized (no previous objects in
+ position). Use :c:func:`PyArray_DECREF` (*arr*) if you need to
+ decrement all the items in the object array prior to calling this
+ function.
+
+.. c:function:: int PyArray_SetUpdateIfCopyBase(PyArrayObject* arr, PyArrayObject* base)
+
+ Precondition: ``arr`` is a copy of ``base`` (though possibly with different
+ strides, ordering, etc.) Set the UPDATEIFCOPY flag and ``arr->base`` so
+ that when ``arr`` is destructed, it will copy any changes back to ``base``.
+ DEPRECATED, use :c:func:`PyArray_SetWritebackIfCopyBase``.
+
+ Returns 0 for success, -1 for failure.
+
+.. c:function:: int PyArray_SetWritebackIfCopyBase(PyArrayObject* arr, PyArrayObject* base)
+
+ Precondition: ``arr`` is a copy of ``base`` (though possibly with different
+ strides, ordering, etc.) Sets the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag
+ and ``arr->base``, and set ``base`` to READONLY. Call
+ :c:func:`PyArray_ResolveWritebackIfCopy` before calling
+ `Py_DECREF`` in order copy any changes back to ``base`` and
+ reset the READONLY flag.
+
+ Returns 0 for success, -1 for failure.
+
+.. _array-flags:
+
+Array flags
+-----------
+
+The ``flags`` attribute of the ``PyArrayObject`` structure contains
+important information about the memory used by the array (pointed to
+by the data member) This flag information must be kept accurate or
+strange results and even segfaults may result.
+
+There are 6 (binary) flags that describe the memory area used by the
+data buffer. These constants are defined in ``arrayobject.h`` and
+determine the bit-position of the flag. Python exposes a nice
+attribute- based interface as well as a dictionary-like interface for
+getting (and, if appropriate, setting) these flags.
+
+Memory areas of all kinds can be pointed to by an ndarray, necessitating
+these flags. If you get an arbitrary ``PyArrayObject`` in C-code, you
+need to be aware of the flags that are set. If you need to guarantee
+a certain kind of array (like :c:data:`NPY_ARRAY_C_CONTIGUOUS` and
+:c:data:`NPY_ARRAY_BEHAVED`), then pass these requirements into the
+PyArray_FromAny function.
+
+
+Basic Array Flags
+^^^^^^^^^^^^^^^^^
+
+An ndarray can have a data segment that is not a simple contiguous
+chunk of well-behaved memory you can manipulate. It may not be aligned
+with word boundaries (very important on some platforms). It might have
+its data in a different byte-order than the machine recognizes. It
+might not be writeable. It might be in Fortan-contiguous order. The
+array flags are used to indicate what can be said about data
+associated with an array.
+
+In versions 1.6 and earlier of NumPy, the following flags
+did not have the _ARRAY_ macro namespace in them. That form
+of the constant names is deprecated in 1.7.
+
+.. c:var:: NPY_ARRAY_C_CONTIGUOUS
+
+ The data area is in C-style contiguous order (last index varies the
+ fastest).
+
+.. c:var:: NPY_ARRAY_F_CONTIGUOUS
+
+ The data area is in Fortran-style contiguous order (first index varies
+ the fastest).
+
+.. note::
+
+ Arrays can be both C-style and Fortran-style contiguous simultaneously.
+ This is clear for 1-dimensional arrays, but can also be true for higher
+ dimensional arrays.
+
+ Even for contiguous arrays a stride for a given dimension
+ ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
+ or the array has no elements.
+ It does *not* generally hold that ``self.strides[-1] == self.itemsize``
+ for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
+ Fortran-style contiguous arrays is true. The correct way to access the
+ ``itemsize`` of an array from the C API is ``PyArray_ITEMSIZE(arr)``.
+
+ .. seealso:: :ref:`Internal memory layout of an ndarray <arrays.ndarray>`
+
+.. c:var:: NPY_ARRAY_OWNDATA
+
+ The data area is owned by this array.
+
+.. c:var:: NPY_ARRAY_ALIGNED
+
+ The data area and all array elements are aligned appropriately.
+
+.. c:var:: NPY_ARRAY_WRITEABLE
+
+ The data area can be written to.
+
+ Notice that the above 3 flags are defined so that a new, well-
+ behaved array has these flags defined as true.
+
+.. c:var:: NPY_ARRAY_WRITEBACKIFCOPY
+
+ The data area represents a (well-behaved) copy whose information
+ should be transferred back to the original when
+ :c:func:`PyArray_ResolveWritebackIfCopy` is called.
+
+ This is a special flag that is set if this array represents a copy
+ made because a user required certain flags in
+ :c:func:`PyArray_FromAny` and a copy had to be made of some other
+ array (and the user asked for this flag to be set in such a
+ situation). The base attribute then points to the "misbehaved"
+ array (which is set read_only). :c:func`PyArray_ResolveWritebackIfCopy`
+ will copy its contents back to the "misbehaved"
+ array (casting if necessary) and will reset the "misbehaved" array
+ to :c:data:`NPY_ARRAY_WRITEABLE`. If the "misbehaved" array was not
+ :c:data:`NPY_ARRAY_WRITEABLE` to begin with then :c:func:`PyArray_FromAny`
+ would have returned an error because :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`
+ would not have been possible.
+
+.. c:var:: NPY_ARRAY_UPDATEIFCOPY
+
+ A deprecated version of :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` which
+ depends upon ``dealloc`` to trigger the writeback. For backwards
+ compatibility, :c:func:`PyArray_ResolveWritebackIfCopy` is called at
+ ``dealloc`` but relying
+ on that behavior is deprecated and not supported in PyPy.
+
+:c:func:`PyArray_UpdateFlags` (obj, flags) will update the ``obj->flags``
+for ``flags`` which can be any of :c:data:`NPY_ARRAY_C_CONTIGUOUS`,
+:c:data:`NPY_ARRAY_F_CONTIGUOUS`, :c:data:`NPY_ARRAY_ALIGNED`, or
+:c:data:`NPY_ARRAY_WRITEABLE`.
+
+
+Combinations of array flags
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. c:var:: NPY_ARRAY_BEHAVED
+
+ :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE`
+
+.. c:var:: NPY_ARRAY_CARRAY
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_BEHAVED`
+
+.. c:var:: NPY_ARRAY_CARRAY_RO
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
+
+.. c:var:: NPY_ARRAY_FARRAY
+
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_BEHAVED`
+
+.. c:var:: NPY_ARRAY_FARRAY_RO
+
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
+
+.. c:var:: NPY_ARRAY_DEFAULT
+
+ :c:data:`NPY_ARRAY_CARRAY`
+
+.. c:var:: NPY_ARRAY_UPDATE_ALL
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
+
+
+Flag-like constants
+^^^^^^^^^^^^^^^^^^^
+
+These constants are used in :c:func:`PyArray_FromAny` (and its macro forms) to
+specify desired properties of the new array.
+
+.. c:var:: NPY_ARRAY_FORCECAST
+
+ Cast to the desired type, even if it can't be done without losing
+ information.
+
+.. c:var:: NPY_ARRAY_ENSURECOPY
+
+ Make sure the resulting array is a copy of the original.
+
+.. c:var:: NPY_ARRAY_ENSUREARRAY
+
+ Make sure the resulting object is an actual ndarray, and not a sub-class.
+
+.. c:var:: NPY_ARRAY_NOTSWAPPED
+
+ Only used in :c:func:`PyArray_CheckFromAny` to over-ride the byteorder
+ of the data-type object passed in.
+
+.. c:var:: NPY_ARRAY_BEHAVED_NS
+
+ :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE` \| :c:data:`NPY_ARRAY_NOTSWAPPED`
+
+
+Flag checking
+^^^^^^^^^^^^^
+
+For all of these macros *arr* must be an instance of a (subclass of)
+:c:data:`PyArray_Type`.
+
+.. c:function:: PyArray_CHKFLAGS(PyObject *arr, flags)
+
+ The first parameter, arr, must be an ndarray or subclass. The
+ parameter, *flags*, should be an integer consisting of bitwise
+ combinations of the possible flags an array can have:
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS`, :c:data:`NPY_ARRAY_F_CONTIGUOUS`,
+ :c:data:`NPY_ARRAY_OWNDATA`, :c:data:`NPY_ARRAY_ALIGNED`,
+ :c:data:`NPY_ARRAY_WRITEABLE`, :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`,
+ :c:data:`NPY_ARRAY_UPDATEIFCOPY`.
+
+.. c:function:: PyArray_IS_C_CONTIGUOUS(PyObject *arr)
+
+ Evaluates true if *arr* is C-style contiguous.
+
+.. c:function:: PyArray_IS_F_CONTIGUOUS(PyObject *arr)
+
+ Evaluates true if *arr* is Fortran-style contiguous.
+
+.. c:function:: PyArray_ISFORTRAN(PyObject *arr)
+
+ Evaluates true if *arr* is Fortran-style contiguous and *not*
+ C-style contiguous. :c:func:`PyArray_IS_F_CONTIGUOUS`
+ is the correct way to test for Fortran-style contiguity.
+
+.. c:function:: PyArray_ISWRITEABLE(PyObject *arr)
+
+ Evaluates true if the data area of *arr* can be written to
+
+.. c:function:: PyArray_ISALIGNED(PyObject *arr)
+
+ Evaluates true if the data area of *arr* is properly aligned on
+ the machine.
+
+.. c:function:: PyArray_ISBEHAVED(PyObject *arr)
+
+ Evaluates true if the data area of *arr* is aligned and writeable
+ and in machine byte-order according to its descriptor.
+
+.. c:function:: PyArray_ISBEHAVED_RO(PyObject *arr)
+
+ Evaluates true if the data area of *arr* is aligned and in machine
+ byte-order.
+
+.. c:function:: PyArray_ISCARRAY(PyObject *arr)
+
+ Evaluates true if the data area of *arr* is C-style contiguous,
+ and :c:func:`PyArray_ISBEHAVED` (*arr*) is true.
+
+.. c:function:: PyArray_ISFARRAY(PyObject *arr)
+
+ Evaluates true if the data area of *arr* is Fortran-style
+ contiguous and :c:func:`PyArray_ISBEHAVED` (*arr*) is true.
+
+.. c:function:: PyArray_ISCARRAY_RO(PyObject *arr)
+
+ Evaluates true if the data area of *arr* is C-style contiguous,
+ aligned, and in machine byte-order.
+
+.. c:function:: PyArray_ISFARRAY_RO(PyObject *arr)
+
+ Evaluates true if the data area of *arr* is Fortran-style
+ contiguous, aligned, and in machine byte-order **.**
+
+.. c:function:: PyArray_ISONESEGMENT(PyObject *arr)
+
+ Evaluates true if the data area of *arr* consists of a single
+ (C-style or Fortran-style) contiguous segment.
+
+.. c:function:: void PyArray_UpdateFlags(PyArrayObject* arr, int flagmask)
+
+ The :c:data:`NPY_ARRAY_C_CONTIGUOUS`, :c:data:`NPY_ARRAY_ALIGNED`, and
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS` array flags can be "calculated" from the
+ array object itself. This routine updates one or more of these
+ flags of *arr* as specified in *flagmask* by performing the
+ required calculation.
+
+
+.. warning::
+
+ It is important to keep the flags updated (using
+ :c:func:`PyArray_UpdateFlags` can help) whenever a manipulation with an
+ array is performed that might cause them to change. Later
+ calculations in NumPy that rely on the state of these flags do not
+ repeat the calculation to update them.
+
+
+Array method alternative API
+----------------------------
+
+
+Conversion
+^^^^^^^^^^
+
+.. c:function:: PyObject* PyArray_GetField( \
+ PyArrayObject* self, PyArray_Descr* dtype, int offset)
+
+ Equivalent to :meth:`ndarray.getfield<numpy.ndarray.getfield>`
+ (*self*, *dtype*, *offset*). This function `steals a reference
+ <https://docs.python.org/3/c-api/intro.html?reference-count-details>`_
+ to `PyArray_Descr` and returns a new array of the given `dtype` using
+ the data in the current array at a specified `offset` in bytes. The
+ `offset` plus the itemsize of the new array type must be less than ``self
+ ->descr->elsize`` or an error is raised. The same shape and strides
+ as the original array are used. Therefore, this function has the
+ effect of returning a field from a structured array. But, it can also
+ be used to select specific bytes or groups of bytes from any array
+ type.
+
+.. c:function:: int PyArray_SetField( \
+ PyArrayObject* self, PyArray_Descr* dtype, int offset, PyObject* val)
+
+ Equivalent to :meth:`ndarray.setfield<numpy.ndarray.setfield>` (*self*, *val*, *dtype*, *offset*
+ ). Set the field starting at *offset* in bytes and of the given
+ *dtype* to *val*. The *offset* plus *dtype* ->elsize must be less
+ than *self* ->descr->elsize or an error is raised. Otherwise, the
+ *val* argument is converted to an array and copied into the field
+ pointed to. If necessary, the elements of *val* are repeated to
+ fill the destination array, But, the number of elements in the
+ destination must be an integer multiple of the number of elements
+ in *val*.
+
+.. c:function:: PyObject* PyArray_Byteswap(PyArrayObject* self, Bool inplace)
+
+ Equivalent to :meth:`ndarray.byteswap<numpy.ndarray.byteswap>` (*self*, *inplace*). Return an array
+ whose data area is byteswapped. If *inplace* is non-zero, then do
+ the byteswap inplace and return a reference to self. Otherwise,
+ create a byteswapped copy and leave self unchanged.
+
+.. c:function:: PyObject* PyArray_NewCopy(PyArrayObject* old, NPY_ORDER order)
+
+ Equivalent to :meth:`ndarray.copy<numpy.ndarray.copy>` (*self*, *fortran*). Make a copy of the
+ *old* array. The returned array is always aligned and writeable
+ with data interpreted the same as the old array. If *order* is
+ :c:data:`NPY_CORDER`, then a C-style contiguous array is returned. If
+ *order* is :c:data:`NPY_FORTRANORDER`, then a Fortran-style contiguous
+ array is returned. If *order is* :c:data:`NPY_ANYORDER`, then the array
+ returned is Fortran-style contiguous only if the old one is;
+ otherwise, it is C-style contiguous.
+
+.. c:function:: PyObject* PyArray_ToList(PyArrayObject* self)
+
+ Equivalent to :meth:`ndarray.tolist<numpy.ndarray.tolist>` (*self*). Return a nested Python list
+ from *self*.
+
+.. c:function:: PyObject* PyArray_ToString(PyArrayObject* self, NPY_ORDER order)
+
+ Equivalent to :meth:`ndarray.tobytes<numpy.ndarray.tobytes>` (*self*, *order*). Return the bytes
+ of this array in a Python string.
+
+.. c:function:: PyObject* PyArray_ToFile( \
+ PyArrayObject* self, FILE* fp, char* sep, char* format)
+
+ Write the contents of *self* to the file pointer *fp* in C-style
+ contiguous fashion. Write the data as binary bytes if *sep* is the
+ string ""or ``NULL``. Otherwise, write the contents of *self* as
+ text using the *sep* string as the item separator. Each item will
+ be printed to the file. If the *format* string is not ``NULL`` or
+ "", then it is a Python print statement format string showing how
+ the items are to be written.
+
+.. c:function:: int PyArray_Dump(PyObject* self, PyObject* file, int protocol)
+
+ Pickle the object in *self* to the given *file* (either a string
+ or a Python file object). If *file* is a Python string it is
+ considered to be the name of a file which is then opened in binary
+ mode. The given *protocol* is used (if *protocol* is negative, or
+ the highest available is used). This is a simple wrapper around
+ cPickle.dump(*self*, *file*, *protocol*).
+
+.. c:function:: PyObject* PyArray_Dumps(PyObject* self, int protocol)
+
+ Pickle the object in *self* to a Python string and return it. Use
+ the Pickle *protocol* provided (or the highest available if
+ *protocol* is negative).
+
+.. c:function:: int PyArray_FillWithScalar(PyArrayObject* arr, PyObject* obj)
+
+ Fill the array, *arr*, with the given scalar object, *obj*. The
+ object is first converted to the data type of *arr*, and then
+ copied into every location. A -1 is returned if an error occurs,
+ otherwise 0 is returned.
+
+.. c:function:: PyObject* PyArray_View( \
+ PyArrayObject* self, PyArray_Descr* dtype, PyTypeObject *ptype)
+
+ Equivalent to :meth:`ndarray.view<numpy.ndarray.view>` (*self*, *dtype*). Return a new
+ view of the array *self* as possibly a different data-type, *dtype*,
+ and different array subclass *ptype*.
+
+ If *dtype* is ``NULL``, then the returned array will have the same
+ data type as *self*. The new data-type must be consistent with the
+ size of *self*. Either the itemsizes must be identical, or *self* must
+ be single-segment and the total number of bytes must be the same.
+ In the latter case the dimensions of the returned array will be
+ altered in the last (or first for Fortran-style contiguous arrays)
+ dimension. The data area of the returned array and self is exactly
+ the same.
+
+
+Shape Manipulation
+^^^^^^^^^^^^^^^^^^
+
+.. c:function:: PyObject* PyArray_Newshape( \
+ PyArrayObject* self, PyArray_Dims* newshape, NPY_ORDER order)
+
+ Result will be a new array (pointing to the same memory location
+ as *self* if possible), but having a shape given by *newshape*.
+ If the new shape is not compatible with the strides of *self*,
+ then a copy of the array with the new specified shape will be
+ returned.
+
+.. c:function:: PyObject* PyArray_Reshape(PyArrayObject* self, PyObject* shape)
+
+ Equivalent to :meth:`ndarray.reshape<numpy.ndarray.reshape>` (*self*, *shape*) where *shape* is a
+ sequence. Converts *shape* to a :c:type:`PyArray_Dims` structure and
+ calls :c:func:`PyArray_Newshape` internally.
+ For back-ward compatibility -- Not recommended
+
+.. c:function:: PyObject* PyArray_Squeeze(PyArrayObject* self)
+
+ Equivalent to :meth:`ndarray.squeeze<numpy.ndarray.squeeze>` (*self*). Return a new view of *self*
+ with all of the dimensions of length 1 removed from the shape.
+
+.. warning::
+
+ matrix objects are always 2-dimensional. Therefore,
+ :c:func:`PyArray_Squeeze` has no effect on arrays of matrix sub-class.
+
+.. c:function:: PyObject* PyArray_SwapAxes(PyArrayObject* self, int a1, int a2)
+
+ Equivalent to :meth:`ndarray.swapaxes<numpy.ndarray.swapaxes>` (*self*, *a1*, *a2*). The returned
+ array is a new view of the data in *self* with the given axes,
+ *a1* and *a2*, swapped.
+
+.. c:function:: PyObject* PyArray_Resize( \
+ PyArrayObject* self, PyArray_Dims* newshape, int refcheck, \
+ NPY_ORDER fortran)
+
+ Equivalent to :meth:`ndarray.resize<numpy.ndarray.resize>` (*self*, *newshape*, refcheck
+ ``=`` *refcheck*, order= fortran ). This function only works on
+ single-segment arrays. It changes the shape of *self* inplace and
+ will reallocate the memory for *self* if *newshape* has a
+ different total number of elements then the old shape. If
+ reallocation is necessary, then *self* must own its data, have
+ *self* - ``>base==NULL``, have *self* - ``>weakrefs==NULL``, and
+ (unless refcheck is 0) not be referenced by any other array.
+ The fortran argument can be :c:data:`NPY_ANYORDER`, :c:data:`NPY_CORDER`,
+ or :c:data:`NPY_FORTRANORDER`. It currently has no effect. Eventually
+ it could be used to determine how the resize operation should view
+ the data when constructing a differently-dimensioned array.
+ Returns None on success and NULL on error.
+
+.. c:function:: PyObject* PyArray_Transpose( \
+ PyArrayObject* self, PyArray_Dims* permute)
+
+ Equivalent to :meth:`ndarray.transpose<numpy.ndarray.transpose>` (*self*, *permute*). Permute the
+ axes of the ndarray object *self* according to the data structure
+ *permute* and return the result. If *permute* is ``NULL``, then
+ the resulting array has its axes reversed. For example if *self*
+ has shape :math:`10\times20\times30`, and *permute* ``.ptr`` is
+ (0,2,1) the shape of the result is :math:`10\times30\times20.` If
+ *permute* is ``NULL``, the shape of the result is
+ :math:`30\times20\times10.`
+
+.. c:function:: PyObject* PyArray_Flatten(PyArrayObject* self, NPY_ORDER order)
+
+ Equivalent to :meth:`ndarray.flatten<numpy.ndarray.flatten>` (*self*, *order*). Return a 1-d copy
+ of the array. If *order* is :c:data:`NPY_FORTRANORDER` the elements are
+ scanned out in Fortran order (first-dimension varies the
+ fastest). If *order* is :c:data:`NPY_CORDER`, the elements of ``self``
+ are scanned in C-order (last dimension varies the fastest). If
+ *order* :c:data:`NPY_ANYORDER`, then the result of
+ :c:func:`PyArray_ISFORTRAN` (*self*) is used to determine which order
+ to flatten.
+
+.. c:function:: PyObject* PyArray_Ravel(PyArrayObject* self, NPY_ORDER order)
+
+ Equivalent to *self*.ravel(*order*). Same basic functionality
+ as :c:func:`PyArray_Flatten` (*self*, *order*) except if *order* is 0
+ and *self* is C-style contiguous, the shape is altered but no copy
+ is performed.
+
+
+Item selection and manipulation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. c:function:: PyObject* PyArray_TakeFrom( \
+ PyArrayObject* self, PyObject* indices, int axis, PyArrayObject* ret, \
+ NPY_CLIPMODE clipmode)
+
+ Equivalent to :meth:`ndarray.take<numpy.ndarray.take>` (*self*, *indices*, *axis*, *ret*,
+ *clipmode*) except *axis* =None in Python is obtained by setting
+ *axis* = :c:data:`NPY_MAXDIMS` in C. Extract the items from self
+ indicated by the integer-valued *indices* along the given *axis.*
+ The clipmode argument can be :c:data:`NPY_RAISE`, :c:data:`NPY_WRAP`, or
+ :c:data:`NPY_CLIP` to indicate what to do with out-of-bound indices. The
+ *ret* argument can specify an output array rather than having one
+ created internally.
+
+.. c:function:: PyObject* PyArray_PutTo( \
+ PyArrayObject* self, PyObject* values, PyObject* indices, \
+ NPY_CLIPMODE clipmode)
+
+ Equivalent to *self*.put(*values*, *indices*, *clipmode*
+ ). Put *values* into *self* at the corresponding (flattened)
+ *indices*. If *values* is too small it will be repeated as
+ necessary.
+
+.. c:function:: PyObject* PyArray_PutMask( \
+ PyArrayObject* self, PyObject* values, PyObject* mask)
+
+ Place the *values* in *self* wherever corresponding positions
+ (using a flattened context) in *mask* are true. The *mask* and
+ *self* arrays must have the same total number of elements. If
+ *values* is too small, it will be repeated as necessary.
+
+.. c:function:: PyObject* PyArray_Repeat( \
+ PyArrayObject* self, PyObject* op, int axis)
+
+ Equivalent to :meth:`ndarray.repeat<numpy.ndarray.repeat>` (*self*, *op*, *axis*). Copy the
+ elements of *self*, *op* times along the given *axis*. Either
+ *op* is a scalar integer or a sequence of length *self*
+ ->dimensions[ *axis* ] indicating how many times to repeat each
+ item along the axis.
+
+.. c:function:: PyObject* PyArray_Choose( \
+ PyArrayObject* self, PyObject* op, PyArrayObject* ret, \
+ NPY_CLIPMODE clipmode)
+
+ Equivalent to :meth:`ndarray.choose<numpy.ndarray.choose>` (*self*, *op*, *ret*, *clipmode*).
+ Create a new array by selecting elements from the sequence of
+ arrays in *op* based on the integer values in *self*. The arrays
+ must all be broadcastable to the same shape and the entries in
+ *self* should be between 0 and len(*op*). The output is placed
+ in *ret* unless it is ``NULL`` in which case a new output is
+ created. The *clipmode* argument determines behavior for when
+ entries in *self* are not between 0 and len(*op*).
+
+ .. c:var:: NPY_RAISE
+
+ raise a ValueError;
+
+ .. c:var:: NPY_WRAP
+
+ wrap values < 0 by adding len(*op*) and values >=len(*op*)
+ by subtracting len(*op*) until they are in range;
+
+ .. c:var:: NPY_CLIP
+
+ all values are clipped to the region [0, len(*op*) ).
+
+
+.. c:function:: PyObject* PyArray_Sort(PyArrayObject* self, int axis, NPY_SORTKIND kind)
+
+ Equivalent to :meth:`ndarray.sort<numpy.ndarray.sort>` (*self*, *axis*, *kind*).
+ Return an array with the items of *self* sorted along *axis*. The array
+ is sorted using the algorithm denoted by *kind* , which is an integer/enum pointing
+ to the type of sorting algorithms used.
+
+.. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis)
+
+ Equivalent to :meth:`ndarray.argsort<numpy.ndarray.argsort>` (*self*, *axis*).
+ Return an array of indices such that selection of these indices
+ along the given ``axis`` would return a sorted version of *self*. If *self* ->descr
+ is a data-type with fields defined, then self->descr->names is used
+ to determine the sort order. A comparison where the first field is equal
+ will use the second field and so on. To alter the sort order of a
+ structured array, create a new data-type with a different order of names
+ and construct a view of the array with that new data-type.
+
+.. c:function:: PyObject* PyArray_LexSort(PyObject* sort_keys, int axis)
+
+ Given a sequence of arrays (*sort_keys*) of the same shape,
+ return an array of indices (similar to :c:func:`PyArray_ArgSort` (...))
+ that would sort the arrays lexicographically. A lexicographic sort
+ specifies that when two keys are found to be equal, the order is
+ based on comparison of subsequent keys. A merge sort (which leaves
+ equal entries unmoved) is required to be defined for the
+ types. The sort is accomplished by sorting the indices first using
+ the first *sort_key* and then using the second *sort_key* and so
+ forth. This is equivalent to the lexsort(*sort_keys*, *axis*)
+ Python command. Because of the way the merge-sort works, be sure
+ to understand the order the *sort_keys* must be in (reversed from
+ the order you would use when comparing two elements).
+
+ If these arrays are all collected in a structured array, then
+ :c:func:`PyArray_Sort` (...) can also be used to sort the array
+ directly.
+
+.. c:function:: PyObject* PyArray_SearchSorted( \
+ PyArrayObject* self, PyObject* values, NPY_SEARCHSIDE side, \
+ PyObject* perm)
+
+ Equivalent to :meth:`ndarray.searchsorted<numpy.ndarray.searchsorted>` (*self*, *values*, *side*,
+ *perm*). Assuming *self* is a 1-d array in ascending order, then the
+ output is an array of indices the same shape as *values* such that, if
+ the elements in *values* were inserted before the indices, the order of
+ *self* would be preserved. No checking is done on whether or not self is
+ in ascending order.
+
+ The *side* argument indicates whether the index returned should be that of
+ the first suitable location (if :c:data:`NPY_SEARCHLEFT`) or of the last
+ (if :c:data:`NPY_SEARCHRIGHT`).
+
+ The *sorter* argument, if not ``NULL``, must be a 1D array of integer
+ indices the same length as *self*, that sorts it into ascending order.
+ This is typically the result of a call to :c:func:`PyArray_ArgSort` (...)
+ Binary search is used to find the required insertion points.
+
+.. c:function:: int PyArray_Partition( \
+ PyArrayObject *self, PyArrayObject * ktharray, int axis, \
+ NPY_SELECTKIND which)
+
+ Equivalent to :meth:`ndarray.partition<numpy.ndarray.partition>` (*self*, *ktharray*, *axis*,
+ *kind*). Partitions the array so that the values of the element indexed by
+ *ktharray* are in the positions they would be if the array is fully sorted
+ and places all elements smaller than the kth before and all elements equal
+ or greater after the kth element. The ordering of all elements within the
+ partitions is undefined.
+ If *self*->descr is a data-type with fields defined, then
+ self->descr->names is used to determine the sort order. A comparison where
+ the first field is equal will use the second field and so on. To alter the
+ sort order of a structured array, create a new data-type with a different
+ order of names and construct a view of the array with that new data-type.
+ Returns zero on success and -1 on failure.
+
+.. c:function:: PyObject* PyArray_ArgPartition( \
+ PyArrayObject *op, PyArrayObject * ktharray, int axis, \
+ NPY_SELECTKIND which)
+
+ Equivalent to :meth:`ndarray.argpartition<numpy.ndarray.argpartition>` (*self*, *ktharray*, *axis*,
+ *kind*). Return an array of indices such that selection of these indices
+ along the given ``axis`` would return a partitioned version of *self*.
+
+.. c:function:: PyObject* PyArray_Diagonal( \
+ PyArrayObject* self, int offset, int axis1, int axis2)
+
+ Equivalent to :meth:`ndarray.diagonal<numpy.ndarray.diagonal>` (*self*, *offset*, *axis1*, *axis2*
+ ). Return the *offset* diagonals of the 2-d arrays defined by
+ *axis1* and *axis2*.
+
+.. c:function:: npy_intp PyArray_CountNonzero(PyArrayObject* self)
+
+ .. versionadded:: 1.6
+
+ Counts the number of non-zero elements in the array object *self*.
+
+.. c:function:: PyObject* PyArray_Nonzero(PyArrayObject* self)
+
+ Equivalent to :meth:`ndarray.nonzero<numpy.ndarray.nonzero>` (*self*). Returns a tuple of index
+ arrays that select elements of *self* that are nonzero. If (nd=
+ :c:func:`PyArray_NDIM` ( ``self`` ))==1, then a single index array is
+ returned. The index arrays have data type :c:data:`NPY_INTP`. If a
+ tuple is returned (nd :math:`\neq` 1), then its length is nd.
+
+.. c:function:: PyObject* PyArray_Compress( \
+ PyArrayObject* self, PyObject* condition, int axis, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.compress<numpy.ndarray.compress>` (*self*, *condition*, *axis*
+ ). Return the elements along *axis* corresponding to elements of
+ *condition* that are true.
+
+
+Calculation
+^^^^^^^^^^^
+
+.. tip::
+
+ Pass in :c:data:`NPY_MAXDIMS` for axis in order to achieve the same
+ effect that is obtained by passing in ``axis=None`` in Python
+ (treating the array as a 1-d array).
+
+
+.. note::
+
+ The out argument specifies where to place the result. If out is
+ NULL, then the output array is created, otherwise the output is
+ placed in out which must be the correct size and type. A new
+ reference to the output array is always returned even when out
+ is not NULL. The caller of the routine has the responsibility
+ to ``Py_DECREF`` out if not NULL or a memory-leak will occur.
+
+
+.. c:function:: PyObject* PyArray_ArgMax( \
+ PyArrayObject* self, int axis, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.argmax<numpy.ndarray.argmax>` (*self*, *axis*). Return the index of
+ the largest element of *self* along *axis*.
+
+.. c:function:: PyObject* PyArray_ArgMin( \
+ PyArrayObject* self, int axis, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.argmin<numpy.ndarray.argmin>` (*self*, *axis*). Return the index of
+ the smallest element of *self* along *axis*.
+
+.. c:function:: PyObject* PyArray_Max( \
+ PyArrayObject* self, int axis, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.max<numpy.ndarray.max>` (*self*, *axis*). Returns the largest
+ element of *self* along the given *axis*. When the result is a single
+ element, returns a numpy scalar instead of an ndarray.
+
+.. c:function:: PyObject* PyArray_Min( \
+ PyArrayObject* self, int axis, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.min<numpy.ndarray.min>` (*self*, *axis*). Return the smallest
+ element of *self* along the given *axis*. When the result is a single
+ element, returns a numpy scalar instead of an ndarray.
+
+
+.. c:function:: PyObject* PyArray_Ptp( \
+ PyArrayObject* self, int axis, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.ptp<numpy.ndarray.ptp>` (*self*, *axis*). Return the difference
+ between the largest element of *self* along *axis* and the
+ smallest element of *self* along *axis*. When the result is a single
+ element, returns a numpy scalar instead of an ndarray.
+
+
+
+
+.. note::
+
+ The rtype argument specifies the data-type the reduction should
+ take place over. This is important if the data-type of the array
+ is not "large" enough to handle the output. By default, all
+ integer data-types are made at least as large as :c:data:`NPY_LONG`
+ for the "add" and "multiply" ufuncs (which form the basis for
+ mean, sum, cumsum, prod, and cumprod functions).
+
+.. c:function:: PyObject* PyArray_Mean( \
+ PyArrayObject* self, int axis, int rtype, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.mean<numpy.ndarray.mean>` (*self*, *axis*, *rtype*). Returns the
+ mean of the elements along the given *axis*, using the enumerated
+ type *rtype* as the data type to sum in. Default sum behavior is
+ obtained using :c:data:`NPY_NOTYPE` for *rtype*.
+
+.. c:function:: PyObject* PyArray_Trace( \
+ PyArrayObject* self, int offset, int axis1, int axis2, int rtype, \
+ PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.trace<numpy.ndarray.trace>` (*self*, *offset*, *axis1*, *axis2*,
+ *rtype*). Return the sum (using *rtype* as the data type of
+ summation) over the *offset* diagonal elements of the 2-d arrays
+ defined by *axis1* and *axis2* variables. A positive offset
+ chooses diagonals above the main diagonal. A negative offset
+ selects diagonals below the main diagonal.
+
+.. c:function:: PyObject* PyArray_Clip( \
+ PyArrayObject* self, PyObject* min, PyObject* max)
+
+ Equivalent to :meth:`ndarray.clip<numpy.ndarray.clip>` (*self*, *min*, *max*). Clip an array,
+ *self*, so that values larger than *max* are fixed to *max* and
+ values less than *min* are fixed to *min*.
+
+.. c:function:: PyObject* PyArray_Conjugate(PyArrayObject* self)
+
+ Equivalent to :meth:`ndarray.conjugate<numpy.ndarray.conjugate>` (*self*).
+ Return the complex conjugate of *self*. If *self* is not of
+ complex data type, then return *self* with a reference.
+
+.. c:function:: PyObject* PyArray_Round( \
+ PyArrayObject* self, int decimals, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.round<numpy.ndarray.round>` (*self*, *decimals*, *out*). Returns
+ the array with elements rounded to the nearest decimal place. The
+ decimal place is defined as the :math:`10^{-\textrm{decimals}}`
+ digit so that negative *decimals* cause rounding to the nearest 10's, 100's, etc. If out is ``NULL``, then the output array is created, otherwise the output is placed in *out* which must be the correct size and type.
+
+.. c:function:: PyObject* PyArray_Std( \
+ PyArrayObject* self, int axis, int rtype, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.std<numpy.ndarray.std>` (*self*, *axis*, *rtype*). Return the
+ standard deviation using data along *axis* converted to data type
+ *rtype*.
+
+.. c:function:: PyObject* PyArray_Sum( \
+ PyArrayObject* self, int axis, int rtype, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.sum<numpy.ndarray.sum>` (*self*, *axis*, *rtype*). Return 1-d
+ vector sums of elements in *self* along *axis*. Perform the sum
+ after converting data to data type *rtype*.
+
+.. c:function:: PyObject* PyArray_CumSum( \
+ PyArrayObject* self, int axis, int rtype, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.cumsum<numpy.ndarray.cumsum>` (*self*, *axis*, *rtype*). Return
+ cumulative 1-d sums of elements in *self* along *axis*. Perform
+ the sum after converting data to data type *rtype*.
+
+.. c:function:: PyObject* PyArray_Prod( \
+ PyArrayObject* self, int axis, int rtype, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.prod<numpy.ndarray.prod>` (*self*, *axis*, *rtype*). Return 1-d
+ products of elements in *self* along *axis*. Perform the product
+ after converting data to data type *rtype*.
+
+.. c:function:: PyObject* PyArray_CumProd( \
+ PyArrayObject* self, int axis, int rtype, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.cumprod<numpy.ndarray.cumprod>` (*self*, *axis*, *rtype*). Return
+ 1-d cumulative products of elements in ``self`` along ``axis``.
+ Perform the product after converting data to data type ``rtype``.
+
+.. c:function:: PyObject* PyArray_All( \
+ PyArrayObject* self, int axis, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.all<numpy.ndarray.all>` (*self*, *axis*). Return an array with
+ True elements for every 1-d sub-array of ``self`` defined by
+ ``axis`` in which all the elements are True.
+
+.. c:function:: PyObject* PyArray_Any( \
+ PyArrayObject* self, int axis, PyArrayObject* out)
+
+ Equivalent to :meth:`ndarray.any<numpy.ndarray.any>` (*self*, *axis*). Return an array with
+ True elements for every 1-d sub-array of *self* defined by *axis*
+ in which any of the elements are True.
+
+Functions
+---------
+
+
+Array Functions
+^^^^^^^^^^^^^^^
+
+.. c:function:: int PyArray_AsCArray( \
+ PyObject** op, void* ptr, npy_intp* dims, int nd, int typenum, \
+ int itemsize)
+
+ Sometimes it is useful to access a multidimensional array as a
+ C-style multi-dimensional array so that algorithms can be
+ implemented using C's a[i][j][k] syntax. This routine returns a
+ pointer, *ptr*, that simulates this kind of C-style array, for
+ 1-, 2-, and 3-d ndarrays.
+
+ :param op:
+
+ The address to any Python object. This Python object will be replaced
+ with an equivalent well-behaved, C-style contiguous, ndarray of the
+ given data type specified by the last two arguments. Be sure that
+ stealing a reference in this way to the input object is justified.
+
+ :param ptr:
+
+ The address to a (ctype* for 1-d, ctype** for 2-d or ctype*** for 3-d)
+ variable where ctype is the equivalent C-type for the data type. On
+ return, *ptr* will be addressable as a 1-d, 2-d, or 3-d array.
+
+ :param dims:
+
+ An output array that contains the shape of the array object. This
+ array gives boundaries on any looping that will take place.
+
+ :param nd:
+
+ The dimensionality of the array (1, 2, or 3).
+
+ :param typenum:
+
+ The expected data type of the array.
+
+ :param itemsize:
+
+ This argument is only needed when *typenum* represents a
+ flexible array. Otherwise it should be 0.
+
+.. note::
+
+ The simulation of a C-style array is not complete for 2-d and 3-d
+ arrays. For example, the simulated arrays of pointers cannot be passed
+ to subroutines expecting specific, statically-defined 2-d and 3-d
+ arrays. To pass to functions requiring those kind of inputs, you must
+ statically define the required array and copy data.
+
+.. c:function:: int PyArray_Free(PyObject* op, void* ptr)
+
+ Must be called with the same objects and memory locations returned
+ from :c:func:`PyArray_AsCArray` (...). This function cleans up memory
+ that otherwise would get leaked.
+
+.. c:function:: PyObject* PyArray_Concatenate(PyObject* obj, int axis)
+
+ Join the sequence of objects in *obj* together along *axis* into a
+ single array. If the dimensions or types are not compatible an
+ error is raised.
+
+.. c:function:: PyObject* PyArray_InnerProduct(PyObject* obj1, PyObject* obj2)
+
+ Compute a product-sum over the last dimensions of *obj1* and
+ *obj2*. Neither array is conjugated.
+
+.. c:function:: PyObject* PyArray_MatrixProduct(PyObject* obj1, PyObject* obj)
+
+ Compute a product-sum over the last dimension of *obj1* and the
+ second-to-last dimension of *obj2*. For 2-d arrays this is a
+ matrix-product. Neither array is conjugated.
+
+.. c:function:: PyObject* PyArray_MatrixProduct2( \
+ PyObject* obj1, PyObject* obj, PyArrayObject* out)
+
+ .. versionadded:: 1.6
+
+ Same as PyArray_MatrixProduct, but store the result in *out*. The
+ output array must have the correct shape, type, and be
+ C-contiguous, or an exception is raised.
+
+.. c:function:: PyObject* PyArray_EinsteinSum( \
+ char* subscripts, npy_intp nop, PyArrayObject** op_in, \
+ PyArray_Descr* dtype, NPY_ORDER order, NPY_CASTING casting, \
+ PyArrayObject* out)
+
+ .. versionadded:: 1.6
+
+ Applies the Einstein summation convention to the array operands
+ provided, returning a new array or placing the result in *out*.
+ The string in *subscripts* is a comma separated list of index
+ letters. The number of operands is in *nop*, and *op_in* is an
+ array containing those operands. The data type of the output can
+ be forced with *dtype*, the output order can be forced with *order*
+ (:c:data:`NPY_KEEPORDER` is recommended), and when *dtype* is specified,
+ *casting* indicates how permissive the data conversion should be.
+
+ See the :func:`~numpy.einsum` function for more details.
+
+.. c:function:: PyObject* PyArray_CopyAndTranspose(PyObject \* op)
+
+ A specialized copy and transpose function that works only for 2-d
+ arrays. The returned array is a transposed copy of *op*.
+
+.. c:function:: PyObject* PyArray_Correlate( \
+ PyObject* op1, PyObject* op2, int mode)
+
+ Compute the 1-d correlation of the 1-d arrays *op1* and *op2*
+ . The correlation is computed at each output point by multiplying
+ *op1* by a shifted version of *op2* and summing the result. As a
+ result of the shift, needed values outside of the defined range of
+ *op1* and *op2* are interpreted as zero. The mode determines how
+ many shifts to return: 0 - return only shifts that did not need to
+ assume zero- values; 1 - return an object that is the same size as
+ *op1*, 2 - return all possible shifts (any overlap at all is
+ accepted).
+
+ .. rubric:: Notes
+
+ This does not compute the usual correlation: if op2 is larger than op1, the
+ arguments are swapped, and the conjugate is never taken for complex arrays.
+ See PyArray_Correlate2 for the usual signal processing correlation.
+
+.. c:function:: PyObject* PyArray_Correlate2( \
+ PyObject* op1, PyObject* op2, int mode)
+
+ Updated version of PyArray_Correlate, which uses the usual definition of
+ correlation for 1d arrays. The correlation is computed at each output point
+ by multiplying *op1* by a shifted version of *op2* and summing the result.
+ As a result of the shift, needed values outside of the defined range of
+ *op1* and *op2* are interpreted as zero. The mode determines how many
+ shifts to return: 0 - return only shifts that did not need to assume zero-
+ values; 1 - return an object that is the same size as *op1*, 2 - return all
+ possible shifts (any overlap at all is accepted).
+
+ .. rubric:: Notes
+
+ Compute z as follows::
+
+ z[k] = sum_n op1[n] * conj(op2[n+k])
+
+.. c:function:: PyObject* PyArray_Where( \
+ PyObject* condition, PyObject* x, PyObject* y)
+
+ If both ``x`` and ``y`` are ``NULL``, then return
+ :c:func:`PyArray_Nonzero` (*condition*). Otherwise, both *x* and *y*
+ must be given and the object returned is shaped like *condition*
+ and has elements of *x* and *y* where *condition* is respectively
+ True or False.
+
+
+Other functions
+^^^^^^^^^^^^^^^
+
+.. c:function:: Bool PyArray_CheckStrides( \
+ int elsize, int nd, npy_intp numbytes, npy_intp const* dims, \
+ npy_intp const* newstrides)
+
+ Determine if *newstrides* is a strides array consistent with the
+ memory of an *nd* -dimensional array with shape ``dims`` and
+ element-size, *elsize*. The *newstrides* array is checked to see
+ if jumping by the provided number of bytes in each direction will
+ ever mean jumping more than *numbytes* which is the assumed size
+ of the available memory segment. If *numbytes* is 0, then an
+ equivalent *numbytes* is computed assuming *nd*, *dims*, and
+ *elsize* refer to a single-segment array. Return :c:data:`NPY_TRUE` if
+ *newstrides* is acceptable, otherwise return :c:data:`NPY_FALSE`.
+
+.. c:function:: npy_intp PyArray_MultiplyList(npy_intp const* seq, int n)
+
+.. c:function:: int PyArray_MultiplyIntList(int const* seq, int n)
+
+ Both of these routines multiply an *n* -length array, *seq*, of
+ integers and return the result. No overflow checking is performed.
+
+.. c:function:: int PyArray_CompareLists(npy_intp const* l1, npy_intp const* l2, int n)
+
+ Given two *n* -length arrays of integers, *l1*, and *l2*, return
+ 1 if the lists are identical; otherwise, return 0.
+
+
+Auxiliary Data With Object Semantics
+------------------------------------
+
+.. versionadded:: 1.7.0
+
+.. c:type:: NpyAuxData
+
+When working with more complex dtypes which are composed of other dtypes,
+such as the struct dtype, creating inner loops that manipulate the dtypes
+requires carrying along additional data. NumPy supports this idea
+through a struct :c:type:`NpyAuxData`, mandating a few conventions so that
+it is possible to do this.
+
+Defining an :c:type:`NpyAuxData` is similar to defining a class in C++,
+but the object semantics have to be tracked manually since the API is in C.
+Here's an example for a function which doubles up an element using
+an element copier function as a primitive.::
+
+ typedef struct {
+ NpyAuxData base;
+ ElementCopier_Func *func;
+ NpyAuxData *funcdata;
+ } eldoubler_aux_data;
+
+ void free_element_doubler_aux_data(NpyAuxData *data)
+ {
+ eldoubler_aux_data *d = (eldoubler_aux_data *)data;
+ /* Free the memory owned by this auxdata */
+ NPY_AUXDATA_FREE(d->funcdata);
+ PyArray_free(d);
+ }
+
+ NpyAuxData *clone_element_doubler_aux_data(NpyAuxData *data)
+ {
+ eldoubler_aux_data *ret = PyArray_malloc(sizeof(eldoubler_aux_data));
+ if (ret == NULL) {
+ return NULL;
+ }
+
+ /* Raw copy of all data */
+ memcpy(ret, data, sizeof(eldoubler_aux_data));
+
+ /* Fix up the owned auxdata so we have our own copy */
+ ret->funcdata = NPY_AUXDATA_CLONE(ret->funcdata);
+ if (ret->funcdata == NULL) {
+ PyArray_free(ret);
+ return NULL;
+ }
+
+ return (NpyAuxData *)ret;
+ }
+
+ NpyAuxData *create_element_doubler_aux_data(
+ ElementCopier_Func *func,
+ NpyAuxData *funcdata)
+ {
+ eldoubler_aux_data *ret = PyArray_malloc(sizeof(eldoubler_aux_data));
+ if (ret == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+ memset(&ret, 0, sizeof(eldoubler_aux_data));
+ ret->base->free = &free_element_doubler_aux_data;
+ ret->base->clone = &clone_element_doubler_aux_data;
+ ret->func = func;
+ ret->funcdata = funcdata;
+
+ return (NpyAuxData *)ret;
+ }
+
+.. c:type:: NpyAuxData_FreeFunc
+
+ The function pointer type for NpyAuxData free functions.
+
+.. c:type:: NpyAuxData_CloneFunc
+
+ The function pointer type for NpyAuxData clone functions. These
+ functions should never set the Python exception on error, because
+ they may be called from a multi-threaded context.
+
+.. c:function:: NPY_AUXDATA_FREE(auxdata)
+
+ A macro which calls the auxdata's free function appropriately,
+ does nothing if auxdata is NULL.
+
+.. c:function:: NPY_AUXDATA_CLONE(auxdata)
+
+ A macro which calls the auxdata's clone function appropriately,
+ returning a deep copy of the auxiliary data.
+
+Array Iterators
+---------------
+
+As of NumPy 1.6.0, these array iterators are superceded by
+the new array iterator, :c:type:`NpyIter`.
+
+An array iterator is a simple way to access the elements of an
+N-dimensional array quickly and efficiently. Section `2
+<#sec-array-iterator>`__ provides more description and examples of
+this useful approach to looping over an array.
+
+.. c:function:: PyObject* PyArray_IterNew(PyObject* arr)
+
+ Return an array iterator object from the array, *arr*. This is
+ equivalent to *arr*. **flat**. The array iterator object makes
+ it easy to loop over an N-dimensional non-contiguous array in
+ C-style contiguous fashion.
+
+.. c:function:: PyObject* PyArray_IterAllButAxis(PyObject* arr, int \*axis)
+
+ Return an array iterator that will iterate over all axes but the
+ one provided in *\*axis*. The returned iterator cannot be used
+ with :c:func:`PyArray_ITER_GOTO1D`. This iterator could be used to
+ write something similar to what ufuncs do wherein the loop over
+ the largest axis is done by a separate sub-routine. If *\*axis* is
+ negative then *\*axis* will be set to the axis having the smallest
+ stride and that axis will be used.
+
+.. c:function:: PyObject *PyArray_BroadcastToShape( \
+ PyObject* arr, npy_intp *dimensions, int nd)
+
+ Return an array iterator that is broadcast to iterate as an array
+ of the shape provided by *dimensions* and *nd*.
+
+.. c:function:: int PyArrayIter_Check(PyObject* op)
+
+ Evaluates true if *op* is an array iterator (or instance of a
+ subclass of the array iterator type).
+
+.. c:function:: void PyArray_ITER_RESET(PyObject* iterator)
+
+ Reset an *iterator* to the beginning of the array.
+
+.. c:function:: void PyArray_ITER_NEXT(PyObject* iterator)
+
+ Incremement the index and the dataptr members of the *iterator* to
+ point to the next element of the array. If the array is not
+ (C-style) contiguous, also increment the N-dimensional coordinates
+ array.
+
+.. c:function:: void *PyArray_ITER_DATA(PyObject* iterator)
+
+ A pointer to the current element of the array.
+
+.. c:function:: void PyArray_ITER_GOTO( \
+ PyObject* iterator, npy_intp* destination)
+
+ Set the *iterator* index, dataptr, and coordinates members to the
+ location in the array indicated by the N-dimensional c-array,
+ *destination*, which must have size at least *iterator*
+ ->nd_m1+1.
+
+.. c:function:: PyArray_ITER_GOTO1D(PyObject* iterator, npy_intp index)
+
+ Set the *iterator* index and dataptr to the location in the array
+ indicated by the integer *index* which points to an element in the
+ C-styled flattened array.
+
+.. c:function:: int PyArray_ITER_NOTDONE(PyObject* iterator)
+
+ Evaluates TRUE as long as the iterator has not looped through all of
+ the elements, otherwise it evaluates FALSE.
+
+
+Broadcasting (multi-iterators)
+------------------------------
+
+.. c:function:: PyObject* PyArray_MultiIterNew(int num, ...)
+
+ A simplified interface to broadcasting. This function takes the
+ number of arrays to broadcast and then *num* extra ( :c:type:`PyObject *<PyObject>`
+ ) arguments. These arguments are converted to arrays and iterators
+ are created. :c:func:`PyArray_Broadcast` is then called on the resulting
+ multi-iterator object. The resulting, broadcasted mult-iterator
+ object is then returned. A broadcasted operation can then be
+ performed using a single loop and using :c:func:`PyArray_MultiIter_NEXT`
+ (..)
+
+.. c:function:: void PyArray_MultiIter_RESET(PyObject* multi)
+
+ Reset all the iterators to the beginning in a multi-iterator
+ object, *multi*.
+
+.. c:function:: void PyArray_MultiIter_NEXT(PyObject* multi)
+
+ Advance each iterator in a multi-iterator object, *multi*, to its
+ next (broadcasted) element.
+
+.. c:function:: void *PyArray_MultiIter_DATA(PyObject* multi, int i)
+
+ Return the data-pointer of the *i* :math:`^{\textrm{th}}` iterator
+ in a multi-iterator object.
+
+.. c:function:: void PyArray_MultiIter_NEXTi(PyObject* multi, int i)
+
+ Advance the pointer of only the *i* :math:`^{\textrm{th}}` iterator.
+
+.. c:function:: void PyArray_MultiIter_GOTO( \
+ PyObject* multi, npy_intp* destination)
+
+ Advance each iterator in a multi-iterator object, *multi*, to the
+ given :math:`N` -dimensional *destination* where :math:`N` is the
+ number of dimensions in the broadcasted array.
+
+.. c:function:: void PyArray_MultiIter_GOTO1D(PyObject* multi, npy_intp index)
+
+ Advance each iterator in a multi-iterator object, *multi*, to the
+ corresponding location of the *index* into the flattened
+ broadcasted array.
+
+.. c:function:: int PyArray_MultiIter_NOTDONE(PyObject* multi)
+
+ Evaluates TRUE as long as the multi-iterator has not looped
+ through all of the elements (of the broadcasted result), otherwise
+ it evaluates FALSE.
+
+.. c:function:: int PyArray_Broadcast(PyArrayMultiIterObject* mit)
+
+ This function encapsulates the broadcasting rules. The *mit*
+ container should already contain iterators for all the arrays that
+ need to be broadcast. On return, these iterators will be adjusted
+ so that iteration over each simultaneously will accomplish the
+ broadcasting. A negative number is returned if an error occurs.
+
+.. c:function:: int PyArray_RemoveSmallest(PyArrayMultiIterObject* mit)
+
+ This function takes a multi-iterator object that has been
+ previously "broadcasted," finds the dimension with the smallest
+ "sum of strides" in the broadcasted result and adapts all the
+ iterators so as not to iterate over that dimension (by effectively
+ making them of length-1 in that dimension). The corresponding
+ dimension is returned unless *mit* ->nd is 0, then -1 is
+ returned. This function is useful for constructing ufunc-like
+ routines that broadcast their inputs correctly and then call a
+ strided 1-d version of the routine as the inner-loop. This 1-d
+ version is usually optimized for speed and for this reason the
+ loop should be performed over the axis that won't require large
+ stride jumps.
+
+Neighborhood iterator
+---------------------
+
+.. versionadded:: 1.4.0
+
+Neighborhood iterators are subclasses of the iterator object, and can be used
+to iter over a neighborhood of a point. For example, you may want to iterate
+over every voxel of a 3d image, and for every such voxel, iterate over an
+hypercube. Neighborhood iterator automatically handle boundaries, thus making
+this kind of code much easier to write than manual boundaries handling, at the
+cost of a slight overhead.
+
+.. c:function:: PyObject* PyArray_NeighborhoodIterNew( \
+ PyArrayIterObject* iter, npy_intp bounds, int mode, \
+ PyArrayObject* fill_value)
+
+ This function creates a new neighborhood iterator from an existing
+ iterator. The neighborhood will be computed relatively to the position
+ currently pointed by *iter*, the bounds define the shape of the
+ neighborhood iterator, and the mode argument the boundaries handling mode.
+
+ The *bounds* argument is expected to be a (2 * iter->ao->nd) arrays, such
+ as the range bound[2*i]->bounds[2*i+1] defines the range where to walk for
+ dimension i (both bounds are included in the walked coordinates). The
+ bounds should be ordered for each dimension (bounds[2*i] <= bounds[2*i+1]).
+
+ The mode should be one of:
+
+ .. c:macro:: NPY_NEIGHBORHOOD_ITER_ZERO_PADDING
+
+ Zero padding. Outside bounds values will be 0.
+
+ .. c:macro:: NPY_NEIGHBORHOOD_ITER_ONE_PADDING
+
+ One padding, Outside bounds values will be 1.
+
+ .. c:macro:: NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING
+
+ Constant padding. Outside bounds values will be the
+ same as the first item in fill_value.
+
+ .. c:macro:: NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING
+
+ Mirror padding. Outside bounds values will be as if the
+ array items were mirrored. For example, for the array [1, 2, 3, 4],
+ x[-2] will be 2, x[-2] will be 1, x[4] will be 4, x[5] will be 1,
+ etc...
+
+ .. c:macro:: NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING
+
+ Circular padding. Outside bounds values will be as if the array
+ was repeated. For example, for the array [1, 2, 3, 4], x[-2] will
+ be 3, x[-2] will be 4, x[4] will be 1, x[5] will be 2, etc...
+
+ If the mode is constant filling (`NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING`),
+ fill_value should point to an array object which holds the filling value
+ (the first item will be the filling value if the array contains more than
+ one item). For other cases, fill_value may be NULL.
+
+ - The iterator holds a reference to iter
+ - Return NULL on failure (in which case the reference count of iter is not
+ changed)
+ - iter itself can be a Neighborhood iterator: this can be useful for .e.g
+ automatic boundaries handling
+ - the object returned by this function should be safe to use as a normal
+ iterator
+ - If the position of iter is changed, any subsequent call to
+ PyArrayNeighborhoodIter_Next is undefined behavior, and
+ PyArrayNeighborhoodIter_Reset must be called.
+
+ .. code-block:: c
+
+ PyArrayIterObject *iter;
+ PyArrayNeighborhoodIterObject *neigh_iter;
+ iter = PyArray_IterNew(x);
+
+ /*For a 3x3 kernel */
+ bounds = {-1, 1, -1, 1};
+ neigh_iter = (PyArrayNeighborhoodIterObject*)PyArrayNeighborhoodIter_New(
+ iter, bounds, NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, NULL);
+
+ for(i = 0; i < iter->size; ++i) {
+ for (j = 0; j < neigh_iter->size; ++j) {
+ /* Walk around the item currently pointed by iter->dataptr */
+ PyArrayNeighborhoodIter_Next(neigh_iter);
+ }
+
+ /* Move to the next point of iter */
+ PyArrayIter_Next(iter);
+ PyArrayNeighborhoodIter_Reset(neigh_iter);
+ }
+
+.. c:function:: int PyArrayNeighborhoodIter_Reset( \
+ PyArrayNeighborhoodIterObject* iter)
+
+ Reset the iterator position to the first point of the neighborhood. This
+ should be called whenever the iter argument given at
+ PyArray_NeighborhoodIterObject is changed (see example)
+
+.. c:function:: int PyArrayNeighborhoodIter_Next( \
+ PyArrayNeighborhoodIterObject* iter)
+
+ After this call, iter->dataptr points to the next point of the
+ neighborhood. Calling this function after every point of the
+ neighborhood has been visited is undefined.
+
+Array Scalars
+-------------
+
+.. c:function:: PyObject* PyArray_Return(PyArrayObject* arr)
+
+ This function steals a reference to *arr*.
+
+ This function checks to see if *arr* is a 0-dimensional array and,
+ if so, returns the appropriate array scalar. It should be used
+ whenever 0-dimensional arrays could be returned to Python.
+
+.. c:function:: PyObject* PyArray_Scalar( \
+ void* data, PyArray_Descr* dtype, PyObject* itemsize)
+
+ Return an array scalar object of the given enumerated *typenum*
+ and *itemsize* by **copying** from memory pointed to by *data*
+ . If *swap* is nonzero then this function will byteswap the data
+ if appropriate to the data-type because array scalars are always
+ in correct machine-byte order.
+
+.. c:function:: PyObject* PyArray_ToScalar(void* data, PyArrayObject* arr)
+
+ Return an array scalar object of the type and itemsize indicated
+ by the array object *arr* copied from the memory pointed to by
+ *data* and swapping if the data in *arr* is not in machine
+ byte-order.
+
+.. c:function:: PyObject* PyArray_FromScalar( \
+ PyObject* scalar, PyArray_Descr* outcode)
+
+ Return a 0-dimensional array of type determined by *outcode* from
+ *scalar* which should be an array-scalar object. If *outcode* is
+ NULL, then the type is determined from *scalar*.
+
+.. c:function:: void PyArray_ScalarAsCtype(PyObject* scalar, void* ctypeptr)
+
+ Return in *ctypeptr* a pointer to the actual value in an array
+ scalar. There is no error checking so *scalar* must be an
+ array-scalar object, and ctypeptr must have enough space to hold
+ the correct type. For flexible-sized types, a pointer to the data
+ is copied into the memory of *ctypeptr*, for all other types, the
+ actual data is copied into the address pointed to by *ctypeptr*.
+
+.. c:function:: void PyArray_CastScalarToCtype( \
+ PyObject* scalar, void* ctypeptr, PyArray_Descr* outcode)
+
+ Return the data (cast to the data type indicated by *outcode*)
+ from the array-scalar, *scalar*, into the memory pointed to by
+ *ctypeptr* (which must be large enough to handle the incoming
+ memory).
+
+.. c:function:: PyObject* PyArray_TypeObjectFromType(int type)
+
+ Returns a scalar type-object from a type-number, *type*
+ . Equivalent to :c:func:`PyArray_DescrFromType` (*type*)->typeobj
+ except for reference counting and error-checking. Returns a new
+ reference to the typeobject on success or ``NULL`` on failure.
+
+.. c:function:: NPY_SCALARKIND PyArray_ScalarKind( \
+ int typenum, PyArrayObject** arr)
+
+ See the function :c:func:`PyArray_MinScalarType` for an alternative
+ mechanism introduced in NumPy 1.6.0.
+
+ Return the kind of scalar represented by *typenum* and the array
+ in *\*arr* (if *arr* is not ``NULL`` ). The array is assumed to be
+ rank-0 and only used if *typenum* represents a signed integer. If
+ *arr* is not ``NULL`` and the first element is negative then
+ :c:data:`NPY_INTNEG_SCALAR` is returned, otherwise
+ :c:data:`NPY_INTPOS_SCALAR` is returned. The possible return values
+ are the enumerated values in :c:type:`NPY_SCALARKIND`.
+
+.. c:function:: int PyArray_CanCoerceScalar( \
+ char thistype, char neededtype, NPY_SCALARKIND scalar)
+
+ See the function :c:func:`PyArray_ResultType` for details of
+ NumPy type promotion, updated in NumPy 1.6.0.
+
+ Implements the rules for scalar coercion. Scalars are only
+ silently coerced from thistype to neededtype if this function
+ returns nonzero. If scalar is :c:data:`NPY_NOSCALAR`, then this
+ function is equivalent to :c:func:`PyArray_CanCastSafely`. The rule is
+ that scalars of the same KIND can be coerced into arrays of the
+ same KIND. This rule means that high-precision scalars will never
+ cause low-precision arrays of the same KIND to be upcast.
+
+
+Data-type descriptors
+---------------------
+
+
+
+.. warning::
+
+ Data-type objects must be reference counted so be aware of the
+ action on the data-type reference of different C-API calls. The
+ standard rule is that when a data-type object is returned it is a
+ new reference. Functions that take :c:type:`PyArray_Descr *` objects and
+ return arrays steal references to the data-type their inputs
+ unless otherwise noted. Therefore, you must own a reference to any
+ data-type object used as input to such a function.
+
+.. c:function:: int PyArray_DescrCheck(PyObject* obj)
+
+ Evaluates as true if *obj* is a data-type object ( :c:type:`PyArray_Descr *` ).
+
+.. c:function:: PyArray_Descr* PyArray_DescrNew(PyArray_Descr* obj)
+
+ Return a new data-type object copied from *obj* (the fields
+ reference is just updated so that the new object points to the
+ same fields dictionary if any).
+
+.. c:function:: PyArray_Descr* PyArray_DescrNewFromType(int typenum)
+
+ Create a new data-type object from the built-in (or
+ user-registered) data-type indicated by *typenum*. All builtin
+ types should not have any of their fields changed. This creates a
+ new copy of the :c:type:`PyArray_Descr` structure so that you can fill
+ it in as appropriate. This function is especially needed for
+ flexible data-types which need to have a new elsize member in
+ order to be meaningful in array construction.
+
+.. c:function:: PyArray_Descr* PyArray_DescrNewByteorder( \
+ PyArray_Descr* obj, char newendian)
+
+ Create a new data-type object with the byteorder set according to
+ *newendian*. All referenced data-type objects (in subdescr and
+ fields members of the data-type object) are also changed
+ (recursively). If a byteorder of :c:data:`NPY_IGNORE` is encountered it
+ is left alone. If newendian is :c:data:`NPY_SWAP`, then all byte-orders
+ are swapped. Other valid newendian values are :c:data:`NPY_NATIVE`,
+ :c:data:`NPY_LITTLE`, and :c:data:`NPY_BIG` which all cause the returned
+ data-typed descriptor (and all it's
+ referenced data-type descriptors) to have the corresponding byte-
+ order.
+
+.. c:function:: PyArray_Descr* PyArray_DescrFromObject( \
+ PyObject* op, PyArray_Descr* mintype)
+
+ Determine an appropriate data-type object from the object *op*
+ (which should be a "nested" sequence object) and the minimum
+ data-type descriptor mintype (which can be ``NULL`` ). Similar in
+ behavior to array(*op*).dtype. Don't confuse this function with
+ :c:func:`PyArray_DescrConverter`. This function essentially looks at
+ all the objects in the (nested) sequence and determines the
+ data-type from the elements it finds.
+
+.. c:function:: PyArray_Descr* PyArray_DescrFromScalar(PyObject* scalar)
+
+ Return a data-type object from an array-scalar object. No checking
+ is done to be sure that *scalar* is an array scalar. If no
+ suitable data-type can be determined, then a data-type of
+ :c:data:`NPY_OBJECT` is returned by default.
+
+.. c:function:: PyArray_Descr* PyArray_DescrFromType(int typenum)
+
+ Returns a data-type object corresponding to *typenum*. The
+ *typenum* can be one of the enumerated types, a character code for
+ one of the enumerated types, or a user-defined type. If you want to use a
+ flexible size array, then you need to ``flexible typenum`` and set the
+ results ``elsize`` parameter to the desired size. The typenum is one of the
+ :c:data:`NPY_TYPES`.
+
+.. c:function:: int PyArray_DescrConverter(PyObject* obj, PyArray_Descr** dtype)
+
+ Convert any compatible Python object, *obj*, to a data-type object
+ in *dtype*. A large number of Python objects can be converted to
+ data-type objects. See :ref:`arrays.dtypes` for a complete
+ description. This version of the converter converts None objects
+ to a :c:data:`NPY_DEFAULT_TYPE` data-type object. This function can
+ be used with the "O&" character code in :c:func:`PyArg_ParseTuple`
+ processing.
+
+.. c:function:: int PyArray_DescrConverter2( \
+ PyObject* obj, PyArray_Descr** dtype)
+
+ Convert any compatible Python object, *obj*, to a data-type
+ object in *dtype*. This version of the converter converts None
+ objects so that the returned data-type is ``NULL``. This function
+ can also be used with the "O&" character in PyArg_ParseTuple
+ processing.
+
+.. c:function:: int Pyarray_DescrAlignConverter( \
+ PyObject* obj, PyArray_Descr** dtype)
+
+ Like :c:func:`PyArray_DescrConverter` except it aligns C-struct-like
+ objects on word-boundaries as the compiler would.
+
+.. c:function:: int Pyarray_DescrAlignConverter2( \
+ PyObject* obj, PyArray_Descr** dtype)
+
+ Like :c:func:`PyArray_DescrConverter2` except it aligns C-struct-like
+ objects on word-boundaries as the compiler would.
+
+.. c:function:: PyObject *PyArray_FieldNames(PyObject* dict)
+
+ Take the fields dictionary, *dict*, such as the one attached to a
+ data-type object and construct an ordered-list of field names such
+ as is stored in the names field of the :c:type:`PyArray_Descr` object.
+
+
+Conversion Utilities
+--------------------
+
+
+For use with :c:func:`PyArg_ParseTuple`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+All of these functions can be used in :c:func:`PyArg_ParseTuple` (...) with
+the "O&" format specifier to automatically convert any Python object
+to the required C-object. All of these functions return
+:c:data:`NPY_SUCCEED` if successful and :c:data:`NPY_FAIL` if not. The first
+argument to all of these function is a Python object. The second
+argument is the **address** of the C-type to convert the Python object
+to.
+
+
+.. warning::
+
+ Be sure to understand what steps you should take to manage the
+ memory when using these conversion functions. These functions can
+ require freeing memory, and/or altering the reference counts of
+ specific objects based on your use.
+
+.. c:function:: int PyArray_Converter(PyObject* obj, PyObject** address)
+
+ Convert any Python object to a :c:type:`PyArrayObject`. If
+ :c:func:`PyArray_Check` (*obj*) is TRUE then its reference count is
+ incremented and a reference placed in *address*. If *obj* is not
+ an array, then convert it to an array using :c:func:`PyArray_FromAny`
+ . No matter what is returned, you must DECREF the object returned
+ by this routine in *address* when you are done with it.
+
+.. c:function:: int PyArray_OutputConverter( \
+ PyObject* obj, PyArrayObject** address)
+
+ This is a default converter for output arrays given to
+ functions. If *obj* is :c:data:`Py_None` or ``NULL``, then *\*address*
+ will be ``NULL`` but the call will succeed. If :c:func:`PyArray_Check` (
+ *obj*) is TRUE then it is returned in *\*address* without
+ incrementing its reference count.
+
+.. c:function:: int PyArray_IntpConverter(PyObject* obj, PyArray_Dims* seq)
+
+ Convert any Python sequence, *obj*, smaller than :c:data:`NPY_MAXDIMS`
+ to a C-array of :c:type:`npy_intp`. The Python object could also be a
+ single number. The *seq* variable is a pointer to a structure with
+ members ptr and len. On successful return, *seq* ->ptr contains a
+ pointer to memory that must be freed, by calling :c:func:`PyDimMem_FREE`,
+ to avoid a memory leak. The restriction on memory size allows this
+ converter to be conveniently used for sequences intended to be
+ interpreted as array shapes.
+
+.. c:function:: int PyArray_BufferConverter(PyObject* obj, PyArray_Chunk* buf)
+
+ Convert any Python object, *obj*, with a (single-segment) buffer
+ interface to a variable with members that detail the object's use
+ of its chunk of memory. The *buf* variable is a pointer to a
+ structure with base, ptr, len, and flags members. The
+ :c:type:`PyArray_Chunk` structure is binary compatible with the
+ Python's buffer object (through its len member on 32-bit platforms
+ and its ptr member on 64-bit platforms or in Python 2.5). On
+ return, the base member is set to *obj* (or its base if *obj* is
+ already a buffer object pointing to another object). If you need
+ to hold on to the memory be sure to INCREF the base member. The
+ chunk of memory is pointed to by *buf* ->ptr member and has length
+ *buf* ->len. The flags member of *buf* is :c:data:`NPY_BEHAVED_RO` with
+ the :c:data:`NPY_ARRAY_WRITEABLE` flag set if *obj* has a writeable buffer
+ interface.
+
+.. c:function:: int PyArray_AxisConverter(PyObject \* obj, int* axis)
+
+ Convert a Python object, *obj*, representing an axis argument to
+ the proper value for passing to the functions that take an integer
+ axis. Specifically, if *obj* is None, *axis* is set to
+ :c:data:`NPY_MAXDIMS` which is interpreted correctly by the C-API
+ functions that take axis arguments.
+
+.. c:function:: int PyArray_BoolConverter(PyObject* obj, Bool* value)
+
+ Convert any Python object, *obj*, to :c:data:`NPY_TRUE` or
+ :c:data:`NPY_FALSE`, and place the result in *value*.
+
+.. c:function:: int PyArray_ByteorderConverter(PyObject* obj, char* endian)
+
+ Convert Python strings into the corresponding byte-order
+ character:
+ '>', '<', 's', '=', or '\|'.
+
+.. c:function:: int PyArray_SortkindConverter(PyObject* obj, NPY_SORTKIND* sort)
+
+ Convert Python strings into one of :c:data:`NPY_QUICKSORT` (starts
+ with 'q' or 'Q'), :c:data:`NPY_HEAPSORT` (starts with 'h' or 'H'),
+ :c:data:`NPY_MERGESORT` (starts with 'm' or 'M') or :c:data:`NPY_STABLESORT`
+ (starts with 't' or 'T'). :c:data:`NPY_MERGESORT` and :c:data:`NPY_STABLESORT`
+ are aliased to each other for backwards compatibility and may refer to one
+ of several stable sorting algorithms depending on the data type.
+
+.. c:function:: int PyArray_SearchsideConverter( \
+ PyObject* obj, NPY_SEARCHSIDE* side)
+
+ Convert Python strings into one of :c:data:`NPY_SEARCHLEFT` (starts with 'l'
+ or 'L'), or :c:data:`NPY_SEARCHRIGHT` (starts with 'r' or 'R').
+
+.. c:function:: int PyArray_OrderConverter(PyObject* obj, NPY_ORDER* order)
+
+ Convert the Python strings 'C', 'F', 'A', and 'K' into the :c:type:`NPY_ORDER`
+ enumeration :c:data:`NPY_CORDER`, :c:data:`NPY_FORTRANORDER`,
+ :c:data:`NPY_ANYORDER`, and :c:data:`NPY_KEEPORDER`.
+
+.. c:function:: int PyArray_CastingConverter( \
+ PyObject* obj, NPY_CASTING* casting)
+
+ Convert the Python strings 'no', 'equiv', 'safe', 'same_kind', and
+ 'unsafe' into the :c:type:`NPY_CASTING` enumeration :c:data:`NPY_NO_CASTING`,
+ :c:data:`NPY_EQUIV_CASTING`, :c:data:`NPY_SAFE_CASTING`,
+ :c:data:`NPY_SAME_KIND_CASTING`, and :c:data:`NPY_UNSAFE_CASTING`.
+
+.. c:function:: int PyArray_ClipmodeConverter( \
+ PyObject* object, NPY_CLIPMODE* val)
+
+ Convert the Python strings 'clip', 'wrap', and 'raise' into the
+ :c:type:`NPY_CLIPMODE` enumeration :c:data:`NPY_CLIP`, :c:data:`NPY_WRAP`,
+ and :c:data:`NPY_RAISE`.
+
+.. c:function:: int PyArray_ConvertClipmodeSequence( \
+ PyObject* object, NPY_CLIPMODE* modes, int n)
+
+ Converts either a sequence of clipmodes or a single clipmode into
+ a C array of :c:type:`NPY_CLIPMODE` values. The number of clipmodes *n*
+ must be known before calling this function. This function is provided
+ to help functions allow a different clipmode for each dimension.
+
+Other conversions
+^^^^^^^^^^^^^^^^^
+
+.. c:function:: int PyArray_PyIntAsInt(PyObject* op)
+
+ Convert all kinds of Python objects (including arrays and array
+ scalars) to a standard integer. On error, -1 is returned and an
+ exception set. You may find useful the macro:
+
+ .. code-block:: c
+
+ #define error_converting(x) (((x) == -1) && PyErr_Occurred()
+
+.. c:function:: npy_intp PyArray_PyIntAsIntp(PyObject* op)
+
+ Convert all kinds of Python objects (including arrays and array
+ scalars) to a (platform-pointer-sized) integer. On error, -1 is
+ returned and an exception set.
+
+.. c:function:: int PyArray_IntpFromSequence( \
+ PyObject* seq, npy_intp* vals, int maxvals)
+
+ Convert any Python sequence (or single Python number) passed in as
+ *seq* to (up to) *maxvals* pointer-sized integers and place them
+ in the *vals* array. The sequence can be smaller then *maxvals* as
+ the number of converted objects is returned.
+
+.. c:function:: int PyArray_TypestrConvert(int itemsize, int gentype)
+
+ Convert typestring characters (with *itemsize*) to basic
+ enumerated data types. The typestring character corresponding to
+ signed and unsigned integers, floating point numbers, and
+ complex-floating point numbers are recognized and converted. Other
+ values of gentype are returned. This function can be used to
+ convert, for example, the string 'f4' to :c:data:`NPY_FLOAT32`.
+
+
+Miscellaneous
+-------------
+
+
+Importing the API
+^^^^^^^^^^^^^^^^^
+
+In order to make use of the C-API from another extension module, the
+:c:func:`import_array` function must be called. If the extension module is
+self-contained in a single .c file, then that is all that needs to be
+done. If, however, the extension module involves multiple files where
+the C-API is needed then some additional steps must be taken.
+
+.. c:function:: void import_array(void)
+
+ This function must be called in the initialization section of a
+ module that will make use of the C-API. It imports the module
+ where the function-pointer table is stored and points the correct
+ variable to it.
+
+.. c:macro:: PY_ARRAY_UNIQUE_SYMBOL
+
+.. c:macro:: NO_IMPORT_ARRAY
+
+ Using these #defines you can use the C-API in multiple files for a
+ single extension module. In each file you must define
+ :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` to some name that will hold the
+ C-API (*e.g.* myextension_ARRAY_API). This must be done **before**
+ including the numpy/arrayobject.h file. In the module
+ initialization routine you call :c:func:`import_array`. In addition,
+ in the files that do not have the module initialization
+ sub_routine define :c:macro:`NO_IMPORT_ARRAY` prior to including
+ numpy/arrayobject.h.
+
+ Suppose I have two files coolmodule.c and coolhelper.c which need
+ to be compiled and linked into a single extension module. Suppose
+ coolmodule.c contains the required initcool module initialization
+ function (with the import_array() function called). Then,
+ coolmodule.c would have at the top:
+
+ .. code-block:: c
+
+ #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API
+ #include numpy/arrayobject.h
+
+ On the other hand, coolhelper.c would contain at the top:
+
+ .. code-block:: c
+
+ #define NO_IMPORT_ARRAY
+ #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API
+ #include numpy/arrayobject.h
+
+ You can also put the common two last lines into an extension-local
+ header file as long as you make sure that NO_IMPORT_ARRAY is
+ #defined before #including that file.
+
+ Internally, these #defines work as follows:
+
+ * If neither is defined, the C-API is declared to be
+ :c:type:`static void**`, so it is only visible within the
+ compilation unit that #includes numpy/arrayobject.h.
+ * If :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is #defined, but
+ :c:macro:`NO_IMPORT_ARRAY` is not, the C-API is declared to
+ be :c:type:`void**`, so that it will also be visible to other
+ compilation units.
+ * If :c:macro:`NO_IMPORT_ARRAY` is #defined, regardless of
+ whether :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is, the C-API is
+ declared to be :c:type:`extern void**`, so it is expected to
+ be defined in another compilation unit.
+ * Whenever :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is #defined, it
+ also changes the name of the variable holding the C-API, which
+ defaults to :c:data:`PyArray_API`, to whatever the macro is
+ #defined to.
+
+Checking the API Version
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Because python extensions are not used in the same way as usual libraries on
+most platforms, some errors cannot be automatically detected at build time or
+even runtime. For example, if you build an extension using a function available
+only for numpy >= 1.3.0, and you import the extension later with numpy 1.2, you
+will not get an import error (but almost certainly a segmentation fault when
+calling the function). That's why several functions are provided to check for
+numpy versions. The macros :c:data:`NPY_VERSION` and
+:c:data:`NPY_FEATURE_VERSION` corresponds to the numpy version used to build the
+extension, whereas the versions returned by the functions
+PyArray_GetNDArrayCVersion and PyArray_GetNDArrayCFeatureVersion corresponds to
+the runtime numpy's version.
+
+The rules for ABI and API compatibilities can be summarized as follows:
+
+ * Whenever :c:data:`NPY_VERSION` != PyArray_GetNDArrayCVersion, the
+ extension has to be recompiled (ABI incompatibility).
+ * :c:data:`NPY_VERSION` == PyArray_GetNDArrayCVersion and
+ :c:data:`NPY_FEATURE_VERSION` <= PyArray_GetNDArrayCFeatureVersion means
+ backward compatible changes.
+
+ABI incompatibility is automatically detected in every numpy's version. API
+incompatibility detection was added in numpy 1.4.0. If you want to supported
+many different numpy versions with one extension binary, you have to build your
+extension with the lowest NPY_FEATURE_VERSION as possible.
+
+.. c:function:: unsigned int PyArray_GetNDArrayCVersion(void)
+
+ This just returns the value :c:data:`NPY_VERSION`. :c:data:`NPY_VERSION`
+ changes whenever a backward incompatible change at the ABI level. Because
+ it is in the C-API, however, comparing the output of this function from the
+ value defined in the current header gives a way to test if the C-API has
+ changed thus requiring a re-compilation of extension modules that use the
+ C-API. This is automatically checked in the function :c:func:`import_array`.
+
+.. c:function:: unsigned int PyArray_GetNDArrayCFeatureVersion(void)
+
+ .. versionadded:: 1.4.0
+
+ This just returns the value :c:data:`NPY_FEATURE_VERSION`.
+ :c:data:`NPY_FEATURE_VERSION` changes whenever the API changes (e.g. a
+ function is added). A changed value does not always require a recompile.
+
+Internal Flexibility
+^^^^^^^^^^^^^^^^^^^^
+
+.. c:function:: int PyArray_SetNumericOps(PyObject* dict)
+
+ NumPy stores an internal table of Python callable objects that are
+ used to implement arithmetic operations for arrays as well as
+ certain array calculation methods. This function allows the user
+ to replace any or all of these Python objects with their own
+ versions. The keys of the dictionary, *dict*, are the named
+ functions to replace and the paired value is the Python callable
+ object to use. Care should be taken that the function used to
+ replace an internal array operation does not itself call back to
+ that internal array operation (unless you have designed the
+ function to handle that), or an unchecked infinite recursion can
+ result (possibly causing program crash). The key names that
+ represent operations that can be replaced are:
+
+ **add**, **subtract**, **multiply**, **divide**,
+ **remainder**, **power**, **square**, **reciprocal**,
+ **ones_like**, **sqrt**, **negative**, **positive**,
+ **absolute**, **invert**, **left_shift**, **right_shift**,
+ **bitwise_and**, **bitwise_xor**, **bitwise_or**,
+ **less**, **less_equal**, **equal**, **not_equal**,
+ **greater**, **greater_equal**, **floor_divide**,
+ **true_divide**, **logical_or**, **logical_and**,
+ **floor**, **ceil**, **maximum**, **minimum**, **rint**.
+
+
+ These functions are included here because they are used at least once
+ in the array object's methods. The function returns -1 (without
+ setting a Python Error) if one of the objects being assigned is not
+ callable.
+
+ .. deprecated:: 1.16
+
+.. c:function:: PyObject* PyArray_GetNumericOps(void)
+
+ Return a Python dictionary containing the callable Python objects
+ stored in the internal arithmetic operation table. The keys of
+ this dictionary are given in the explanation for :c:func:`PyArray_SetNumericOps`.
+
+ .. deprecated:: 1.16
+
+.. c:function:: void PyArray_SetStringFunction(PyObject* op, int repr)
+
+ This function allows you to alter the tp_str and tp_repr methods
+ of the array object to any Python function. Thus you can alter
+ what happens for all arrays when str(arr) or repr(arr) is called
+ from Python. The function to be called is passed in as *op*. If
+ *repr* is non-zero, then this function will be called in response
+ to repr(arr), otherwise the function will be called in response to
+ str(arr). No check on whether or not *op* is callable is
+ performed. The callable passed in to *op* should expect an array
+ argument and should return a string to be printed.
+
+
+Memory management
+^^^^^^^^^^^^^^^^^
+
+.. c:function:: char* PyDataMem_NEW(size_t nbytes)
+
+.. c:function:: PyDataMem_FREE(char* ptr)
+
+.. c:function:: char* PyDataMem_RENEW(void * ptr, size_t newbytes)
+
+ Macros to allocate, free, and reallocate memory. These macros are used
+ internally to create arrays.
+
+.. c:function:: npy_intp* PyDimMem_NEW(int nd)
+
+.. c:function:: PyDimMem_FREE(char* ptr)
+
+.. c:function:: npy_intp* PyDimMem_RENEW(void* ptr, size_t newnd)
+
+ Macros to allocate, free, and reallocate dimension and strides memory.
+
+.. c:function:: void* PyArray_malloc(size_t nbytes)
+
+.. c:function:: PyArray_free(void* ptr)
+
+.. c:function:: void* PyArray_realloc(npy_intp* ptr, size_t nbytes)
+
+ These macros use different memory allocators, depending on the
+ constant :c:data:`NPY_USE_PYMEM`. The system malloc is used when
+ :c:data:`NPY_USE_PYMEM` is 0, if :c:data:`NPY_USE_PYMEM` is 1, then
+ the Python memory allocator is used.
+
+.. c:function:: int PyArray_ResolveWritebackIfCopy(PyArrayObject* obj)
+
+ If ``obj.flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or (deprecated)
+ :c:data:`NPY_ARRAY_UPDATEIFCOPY`, this function clears the flags, `DECREF` s
+ `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. It then
+ copies ``obj->data`` to `obj->base->data`, and returns the error state of
+ the copy operation. This is the opposite of
+ :c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called once
+ you are finished with ``obj``, just before ``Py_DECREF(obj)``. It may be called
+ multiple times, or with ``NULL`` input. See also
+ :c:func:`PyArray_DiscardWritebackIfCopy`.
+
+ Returns 0 if nothing was done, -1 on error, and 1 if action was taken.
+
+Threading support
+^^^^^^^^^^^^^^^^^
+
+These macros are only meaningful if :c:data:`NPY_ALLOW_THREADS`
+evaluates True during compilation of the extension module. Otherwise,
+these macros are equivalent to whitespace. Python uses a single Global
+Interpreter Lock (GIL) for each Python process so that only a single
+thread may execute at a time (even on multi-cpu machines). When
+calling out to a compiled function that may take time to compute (and
+does not have side-effects for other threads like updated global
+variables), the GIL should be released so that other Python threads
+can run while the time-consuming calculations are performed. This can
+be accomplished using two groups of macros. Typically, if one macro in
+a group is used in a code block, all of them must be used in the same
+code block. Currently, :c:data:`NPY_ALLOW_THREADS` is defined to the
+python-defined :c:data:`WITH_THREADS` constant unless the environment
+variable :c:data:`NPY_NOSMP` is set in which case
+:c:data:`NPY_ALLOW_THREADS` is defined to be 0.
+
+Group 1
+"""""""
+
+ This group is used to call code that may take some time but does not
+ use any Python C-API calls. Thus, the GIL should be released during
+ its calculation.
+
+ .. c:macro:: NPY_BEGIN_ALLOW_THREADS
+
+ Equivalent to :c:macro:`Py_BEGIN_ALLOW_THREADS` except it uses
+ :c:data:`NPY_ALLOW_THREADS` to determine if the macro if
+ replaced with white-space or not.
+
+ .. c:macro:: NPY_END_ALLOW_THREADS
+
+ Equivalent to :c:macro:`Py_END_ALLOW_THREADS` except it uses
+ :c:data:`NPY_ALLOW_THREADS` to determine if the macro if
+ replaced with white-space or not.
+
+ .. c:macro:: NPY_BEGIN_THREADS_DEF
+
+ Place in the variable declaration area. This macro sets up the
+ variable needed for storing the Python state.
+
+ .. c:macro:: NPY_BEGIN_THREADS
+
+ Place right before code that does not need the Python
+ interpreter (no Python C-API calls). This macro saves the
+ Python state and releases the GIL.
+
+ .. c:macro:: NPY_END_THREADS
+
+ Place right after code that does not need the Python
+ interpreter. This macro acquires the GIL and restores the
+ Python state from the saved variable.
+
+ .. c:function:: NPY_BEGIN_THREADS_DESCR(PyArray_Descr *dtype)
+
+ Useful to release the GIL only if *dtype* does not contain
+ arbitrary Python objects which may need the Python interpreter
+ during execution of the loop.
+
+ .. c:function:: NPY_END_THREADS_DESCR(PyArray_Descr *dtype)
+
+ Useful to regain the GIL in situations where it was released
+ using the BEGIN form of this macro.
+
+ .. c:function:: NPY_BEGIN_THREADS_THRESHOLDED(int loop_size)
+
+ Useful to release the GIL only if *loop_size* exceeds a
+ minimum threshold, currently set to 500. Should be matched
+ with a :c:macro:`NPY_END_THREADS` to regain the GIL.
+
+Group 2
+"""""""
+
+ This group is used to re-acquire the Python GIL after it has been
+ released. For example, suppose the GIL has been released (using the
+ previous calls), and then some path in the code (perhaps in a
+ different subroutine) requires use of the Python C-API, then these
+ macros are useful to acquire the GIL. These macros accomplish
+ essentially a reverse of the previous three (acquire the LOCK saving
+ what state it had) and then re-release it with the saved state.
+
+ .. c:macro:: NPY_ALLOW_C_API_DEF
+
+ Place in the variable declaration area to set up the necessary
+ variable.
+
+ .. c:macro:: NPY_ALLOW_C_API
+
+ Place before code that needs to call the Python C-API (when it is
+ known that the GIL has already been released).
+
+ .. c:macro:: NPY_DISABLE_C_API
+
+ Place after code that needs to call the Python C-API (to re-release
+ the GIL).
+
+.. tip::
+
+ Never use semicolons after the threading support macros.
+
+
+Priority
+^^^^^^^^
+
+.. c:var:: NPY_PRIORITY
+
+ Default priority for arrays.
+
+.. c:var:: NPY_SUBTYPE_PRIORITY
+
+ Default subtype priority.
+
+.. c:var:: NPY_SCALAR_PRIORITY
+
+ Default scalar priority (very small)
+
+.. c:function:: double PyArray_GetPriority(PyObject* obj, double def)
+
+ Return the :obj:`~numpy.class.__array_priority__` attribute (converted to a
+ double) of *obj* or *def* if no attribute of that name
+ exists. Fast returns that avoid the attribute lookup are provided
+ for objects of type :c:data:`PyArray_Type`.
+
+
+Default buffers
+^^^^^^^^^^^^^^^
+
+.. c:var:: NPY_BUFSIZE
+
+ Default size of the user-settable internal buffers.
+
+.. c:var:: NPY_MIN_BUFSIZE
+
+ Smallest size of user-settable internal buffers.
+
+.. c:var:: NPY_MAX_BUFSIZE
+
+ Largest size allowed for the user-settable buffers.
+
+
+Other constants
+^^^^^^^^^^^^^^^
+
+.. c:var:: NPY_NUM_FLOATTYPE
+
+ The number of floating-point types
+
+.. c:var:: NPY_MAXDIMS
+
+ The maximum number of dimensions allowed in arrays.
+
+.. c:var:: NPY_MAXARGS
+
+ The maximum number of array arguments that can be used in functions.
+
+.. c:var:: NPY_VERSION
+
+ The current version of the ndarray object (check to see if this
+ variable is defined to guarantee the numpy/arrayobject.h header is
+ being used).
+
+.. c:var:: NPY_FALSE
+
+ Defined as 0 for use with Bool.
+
+.. c:var:: NPY_TRUE
+
+ Defined as 1 for use with Bool.
+
+.. c:var:: NPY_FAIL
+
+ The return value of failed converter functions which are called using
+ the "O&" syntax in :c:func:`PyArg_ParseTuple`-like functions.
+
+.. c:var:: NPY_SUCCEED
+
+ The return value of successful converter functions which are called
+ using the "O&" syntax in :c:func:`PyArg_ParseTuple`-like functions.
+
+
+Miscellaneous Macros
+^^^^^^^^^^^^^^^^^^^^
+
+.. c:function:: PyArray_SAMESHAPE(PyArrayObject *a1, PyArrayObject *a2)
+
+ Evaluates as True if arrays *a1* and *a2* have the same shape.
+
+.. c:var:: a
+
+.. c:var:: b
+
+.. c:macro:: PyArray_MAX(a,b)
+
+ Returns the maximum of *a* and *b*. If (*a*) or (*b*) are
+ expressions they are evaluated twice.
+
+.. c:macro:: PyArray_MIN(a,b)
+
+ Returns the minimum of *a* and *b*. If (*a*) or (*b*) are
+ expressions they are evaluated twice.
+
+.. c:macro:: PyArray_CLT(a,b)
+
+.. c:macro:: PyArray_CGT(a,b)
+
+.. c:macro:: PyArray_CLE(a,b)
+
+.. c:macro:: PyArray_CGE(a,b)
+
+.. c:macro:: PyArray_CEQ(a,b)
+
+.. c:macro:: PyArray_CNE(a,b)
+
+ Implements the complex comparisons between two complex numbers
+ (structures with a real and imag member) using NumPy's definition
+ of the ordering which is lexicographic: comparing the real parts
+ first and then the complex parts if the real parts are equal.
+
+.. c:function:: PyArray_REFCOUNT(PyObject* op)
+
+ Returns the reference count of any Python object.
+
+.. c:function:: PyArray_DiscardWritebackIfCopy(PyObject* obj)
+
+ If ``obj.flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or (deprecated)
+ :c:data:`NPY_ARRAY_UPDATEIFCOPY`, this function clears the flags, `DECREF` s
+ `obj->base` and makes it writeable, and sets ``obj->base`` to NULL. In
+ contrast to :c:func:`PyArray_DiscardWritebackIfCopy` it makes no attempt
+ to copy the data from `obj->base` This undoes
+ :c:func:`PyArray_SetWritebackIfCopyBase`. Usually this is called after an
+ error when you are finished with ``obj``, just before ``Py_DECREF(obj)``.
+ It may be called multiple times, or with ``NULL`` input.
+
+.. c:function:: PyArray_XDECREF_ERR(PyObject* obj)
+
+ Deprecated in 1.14, use :c:func:`PyArray_DiscardWritebackIfCopy`
+ followed by ``Py_XDECREF``
+
+ DECREF's an array object which may have the (deprecated)
+ :c:data:`NPY_ARRAY_UPDATEIFCOPY` or :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`
+ flag set without causing the contents to be copied back into the
+ original array. Resets the :c:data:`NPY_ARRAY_WRITEABLE` flag on the base
+ object. This is useful for recovering from an error condition when
+ writeback semantics are used, but will lead to wrong results.
+
+
+Enumerated Types
+^^^^^^^^^^^^^^^^
+
+.. c:type:: NPY_SORTKIND
+
+ A special variable-type which can take on different values to indicate
+ the sorting algorithm being used.
+
+ .. c:var:: NPY_QUICKSORT
+
+ .. c:var:: NPY_HEAPSORT
+
+ .. c:var:: NPY_MERGESORT
+
+ .. c:var:: NPY_STABLESORT
+
+ Used as an alias of :c:data:`NPY_MERGESORT` and vica versa.
+
+ .. c:var:: NPY_NSORTS
+
+ Defined to be the number of sorts. It is fixed at three by the need for
+ backwards compatibility, and consequently :c:data:`NPY_MERGESORT` and
+ :c:data:`NPY_STABLESORT` are aliased to each other and may refer to one
+ of several stable sorting algorithms depending on the data type.
+
+
+.. c:type:: NPY_SCALARKIND
+
+ A special variable type indicating the number of "kinds" of
+ scalars distinguished in determining scalar-coercion rules. This
+ variable can take on the values:
+
+ .. c:var:: NPY_NOSCALAR
+
+ .. c:var:: NPY_BOOL_SCALAR
+
+ .. c:var:: NPY_INTPOS_SCALAR
+
+ .. c:var:: NPY_INTNEG_SCALAR
+
+ .. c:var:: NPY_FLOAT_SCALAR
+
+ .. c:var:: NPY_COMPLEX_SCALAR
+
+ .. c:var:: NPY_OBJECT_SCALAR
+
+ .. c:var:: NPY_NSCALARKINDS
+
+ Defined to be the number of scalar kinds
+ (not including :c:data:`NPY_NOSCALAR`).
+
+.. c:type:: NPY_ORDER
+
+ An enumeration type indicating the element order that an array should be
+ interpreted in. When a brand new array is created, generally
+ only **NPY_CORDER** and **NPY_FORTRANORDER** are used, whereas
+ when one or more inputs are provided, the order can be based on them.
+
+ .. c:var:: NPY_ANYORDER
+
+ Fortran order if all the inputs are Fortran, C otherwise.
+
+ .. c:var:: NPY_CORDER
+
+ C order.
+
+ .. c:var:: NPY_FORTRANORDER
+
+ Fortran order.
+
+ .. c:var:: NPY_KEEPORDER
+
+ An order as close to the order of the inputs as possible, even
+ if the input is in neither C nor Fortran order.
+
+.. c:type:: NPY_CLIPMODE
+
+ A variable type indicating the kind of clipping that should be
+ applied in certain functions.
+
+ .. c:var:: NPY_RAISE
+
+ The default for most operations, raises an exception if an index
+ is out of bounds.
+
+ .. c:var:: NPY_CLIP
+
+ Clips an index to the valid range if it is out of bounds.
+
+ .. c:var:: NPY_WRAP
+
+ Wraps an index to the valid range if it is out of bounds.
+
+.. c:type:: NPY_CASTING
+
+ .. versionadded:: 1.6
+
+ An enumeration type indicating how permissive data conversions should
+ be. This is used by the iterator added in NumPy 1.6, and is intended
+ to be used more broadly in a future version.
+
+ .. c:var:: NPY_NO_CASTING
+
+ Only allow identical types.
+
+ .. c:var:: NPY_EQUIV_CASTING
+
+ Allow identical and casts involving byte swapping.
+
+ .. c:var:: NPY_SAFE_CASTING
+
+ Only allow casts which will not cause values to be rounded,
+ truncated, or otherwise changed.
+
+ .. c:var:: NPY_SAME_KIND_CASTING
+
+ Allow any safe casts, and casts between types of the same kind.
+ For example, float64 -> float32 is permitted with this rule.
+
+ .. c:var:: NPY_UNSAFE_CASTING
+
+ Allow any cast, no matter what kind of data loss may occur.
+
+.. index::
+ pair: ndarray; C-API
--- /dev/null
+System configuration
+====================
+
+.. sectionauthor:: Travis E. Oliphant
+
+When NumPy is built, information about system configuration is
+recorded, and is made available for extension modules using NumPy's C
+API. These are mostly defined in ``numpyconfig.h`` (included in
+``ndarrayobject.h``). The public symbols are prefixed by ``NPY_*``.
+NumPy also offers some functions for querying information about the
+platform in use.
+
+For private use, NumPy also constructs a ``config.h`` in the NumPy
+include directory, which is not exported by NumPy (that is a python
+extension which use the numpy C API will not see those symbols), to
+avoid namespace pollution.
+
+
+Data type sizes
+---------------
+
+The :c:data:`NPY_SIZEOF_{CTYPE}` constants are defined so that sizeof
+information is available to the pre-processor.
+
+.. c:var:: NPY_SIZEOF_SHORT
+
+ sizeof(short)
+
+.. c:var:: NPY_SIZEOF_INT
+
+ sizeof(int)
+
+.. c:var:: NPY_SIZEOF_LONG
+
+ sizeof(long)
+
+.. c:var:: NPY_SIZEOF_LONGLONG
+
+ sizeof(longlong) where longlong is defined appropriately on the
+ platform.
+
+.. c:var:: NPY_SIZEOF_PY_LONG_LONG
+
+
+.. c:var:: NPY_SIZEOF_FLOAT
+
+ sizeof(float)
+
+.. c:var:: NPY_SIZEOF_DOUBLE
+
+ sizeof(double)
+
+.. c:var:: NPY_SIZEOF_LONG_DOUBLE
+
+ sizeof(longdouble) (A macro defines **NPY_SIZEOF_LONGDOUBLE** as well.)
+
+.. c:var:: NPY_SIZEOF_PY_INTPTR_T
+
+ Size of a pointer on this platform (sizeof(void \*)) (A macro defines
+ NPY_SIZEOF_INTP as well.)
+
+
+Platform information
+--------------------
+
+.. c:var:: NPY_CPU_X86
+.. c:var:: NPY_CPU_AMD64
+.. c:var:: NPY_CPU_IA64
+.. c:var:: NPY_CPU_PPC
+.. c:var:: NPY_CPU_PPC64
+.. c:var:: NPY_CPU_SPARC
+.. c:var:: NPY_CPU_SPARC64
+.. c:var:: NPY_CPU_S390
+.. c:var:: NPY_CPU_PARISC
+
+ .. versionadded:: 1.3.0
+
+ CPU architecture of the platform; only one of the above is
+ defined.
+
+ Defined in ``numpy/npy_cpu.h``
+
+.. c:var:: NPY_LITTLE_ENDIAN
+
+.. c:var:: NPY_BIG_ENDIAN
+
+.. c:var:: NPY_BYTE_ORDER
+
+ .. versionadded:: 1.3.0
+
+ Portable alternatives to the ``endian.h`` macros of GNU Libc.
+ If big endian, :c:data:`NPY_BYTE_ORDER` == :c:data:`NPY_BIG_ENDIAN`, and
+ similarly for little endian architectures.
+
+ Defined in ``numpy/npy_endian.h``.
+
+.. c:function:: PyArray_GetEndianness()
+
+ .. versionadded:: 1.3.0
+
+ Returns the endianness of the current platform.
+ One of :c:data:`NPY_CPU_BIG`, :c:data:`NPY_CPU_LITTLE`,
+ or :c:data:`NPY_CPU_UNKNOWN_ENDIAN`.
+
+
+Compiler directives
+-------------------
+
+.. c:var:: NPY_LIKELY
+.. c:var:: NPY_UNLIKELY
+.. c:var:: NPY_UNUSED
+
+
+Interrupt Handling
+------------------
+
+.. c:var:: NPY_INTERRUPT_H
+.. c:var:: NPY_SIGSETJMP
+.. c:var:: NPY_SIGLONGJMP
+.. c:var:: NPY_SIGJMP_BUF
+.. c:var:: NPY_SIGINT_ON
+.. c:var:: NPY_SIGINT_OFF
--- /dev/null
+NumPy core libraries
+====================
+
+.. sectionauthor:: David Cournapeau
+
+.. versionadded:: 1.3.0
+
+Starting from numpy 1.3.0, we are working on separating the pure C,
+"computational" code from the python dependent code. The goal is twofolds:
+making the code cleaner, and enabling code reuse by other extensions outside
+numpy (scipy, etc...).
+
+NumPy core math library
+-----------------------
+
+The numpy core math library ('npymath') is a first step in this direction. This
+library contains most math-related C99 functionality, which can be used on
+platforms where C99 is not well supported. The core math functions have the
+same API as the C99 ones, except for the npy_* prefix.
+
+The available functions are defined in <numpy/npy_math.h> - please refer to this header when
+in doubt.
+
+Floating point classification
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. c:var:: NPY_NAN
+
+ This macro is defined to a NaN (Not a Number), and is guaranteed to have
+ the signbit unset ('positive' NaN). The corresponding single and extension
+ precision macro are available with the suffix F and L.
+
+.. c:var:: NPY_INFINITY
+
+ This macro is defined to a positive inf. The corresponding single and
+ extension precision macro are available with the suffix F and L.
+
+.. c:var:: NPY_PZERO
+
+ This macro is defined to positive zero. The corresponding single and
+ extension precision macro are available with the suffix F and L.
+
+.. c:var:: NPY_NZERO
+
+ This macro is defined to negative zero (that is with the sign bit set). The
+ corresponding single and extension precision macro are available with the
+ suffix F and L.
+
+.. c:function:: int npy_isnan(x)
+
+ This is a macro, and is equivalent to C99 isnan: works for single, double
+ and extended precision, and return a non 0 value is x is a NaN.
+
+.. c:function:: int npy_isfinite(x)
+
+ This is a macro, and is equivalent to C99 isfinite: works for single,
+ double and extended precision, and return a non 0 value is x is neither a
+ NaN nor an infinity.
+
+.. c:function:: int npy_isinf(x)
+
+ This is a macro, and is equivalent to C99 isinf: works for single, double
+ and extended precision, and return a non 0 value is x is infinite (positive
+ and negative).
+
+.. c:function:: int npy_signbit(x)
+
+ This is a macro, and is equivalent to C99 signbit: works for single, double
+ and extended precision, and return a non 0 value is x has the signbit set
+ (that is the number is negative).
+
+.. c:function:: double npy_copysign(double x, double y)
+
+ This is a function equivalent to C99 copysign: return x with the same sign
+ as y. Works for any value, including inf and nan. Single and extended
+ precisions are available with suffix f and l.
+
+ .. versionadded:: 1.4.0
+
+Useful math constants
+~~~~~~~~~~~~~~~~~~~~~
+
+The following math constants are available in ``npy_math.h``. Single
+and extended precision are also available by adding the ``f`` and
+``l`` suffixes respectively.
+
+.. c:var:: NPY_E
+
+ Base of natural logarithm (:math:`e`)
+
+.. c:var:: NPY_LOG2E
+
+ Logarithm to base 2 of the Euler constant (:math:`\frac{\ln(e)}{\ln(2)}`)
+
+.. c:var:: NPY_LOG10E
+
+ Logarithm to base 10 of the Euler constant (:math:`\frac{\ln(e)}{\ln(10)}`)
+
+.. c:var:: NPY_LOGE2
+
+ Natural logarithm of 2 (:math:`\ln(2)`)
+
+.. c:var:: NPY_LOGE10
+
+ Natural logarithm of 10 (:math:`\ln(10)`)
+
+.. c:var:: NPY_PI
+
+ Pi (:math:`\pi`)
+
+.. c:var:: NPY_PI_2
+
+ Pi divided by 2 (:math:`\frac{\pi}{2}`)
+
+.. c:var:: NPY_PI_4
+
+ Pi divided by 4 (:math:`\frac{\pi}{4}`)
+
+.. c:var:: NPY_1_PI
+
+ Reciprocal of pi (:math:`\frac{1}{\pi}`)
+
+.. c:var:: NPY_2_PI
+
+ Two times the reciprocal of pi (:math:`\frac{2}{\pi}`)
+
+.. c:var:: NPY_EULER
+
+ The Euler constant
+ :math:`\lim_{n\rightarrow\infty}({\sum_{k=1}^n{\frac{1}{k}}-\ln n})`
+
+Low-level floating point manipulation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Those can be useful for precise floating point comparison.
+
+.. c:function:: double npy_nextafter(double x, double y)
+
+ This is a function equivalent to C99 nextafter: return next representable
+ floating point value from x in the direction of y. Single and extended
+ precisions are available with suffix f and l.
+
+ .. versionadded:: 1.4.0
+
+.. c:function:: double npy_spacing(double x)
+
+ This is a function equivalent to Fortran intrinsic. Return distance between
+ x and next representable floating point value from x, e.g. spacing(1) ==
+ eps. spacing of nan and +/- inf return nan. Single and extended precisions
+ are available with suffix f and l.
+
+ .. versionadded:: 1.4.0
+
+.. c:function:: void npy_set_floatstatus_divbyzero()
+
+ Set the divide by zero floating point exception
+
+ .. versionadded:: 1.6.0
+
+.. c:function:: void npy_set_floatstatus_overflow()
+
+ Set the overflow floating point exception
+
+ .. versionadded:: 1.6.0
+
+.. c:function:: void npy_set_floatstatus_underflow()
+
+ Set the underflow floating point exception
+
+ .. versionadded:: 1.6.0
+
+.. c:function:: void npy_set_floatstatus_invalid()
+
+ Set the invalid floating point exception
+
+ .. versionadded:: 1.6.0
+
+.. c:function:: int npy_get_floatstatus()
+
+ Get floating point status. Returns a bitmask with following possible flags:
+
+ * NPY_FPE_DIVIDEBYZERO
+ * NPY_FPE_OVERFLOW
+ * NPY_FPE_UNDERFLOW
+ * NPY_FPE_INVALID
+
+ Note that :c:func:`npy_get_floatstatus_barrier` is preferable as it prevents
+ aggressive compiler optimizations reordering the call relative to
+ the code setting the status, which could lead to incorrect results.
+
+ .. versionadded:: 1.9.0
+
+.. c:function:: int npy_get_floatstatus_barrier(char*)
+
+ Get floating point status. A pointer to a local variable is passed in to
+ prevent aggressive compiler optimizations from reodering this function call
+ relative to the code setting the status, which could lead to incorrect
+ results.
+
+ Returns a bitmask with following possible flags:
+
+ * NPY_FPE_DIVIDEBYZERO
+ * NPY_FPE_OVERFLOW
+ * NPY_FPE_UNDERFLOW
+ * NPY_FPE_INVALID
+
+ .. versionadded:: 1.15.0
+
+.. c:function:: int npy_clear_floatstatus()
+
+ Clears the floating point status. Returns the previous status mask.
+
+ Note that :c:func:`npy_clear_floatstatus_barrier` is preferable as it
+ prevents aggressive compiler optimizations reordering the call relative to
+ the code setting the status, which could lead to incorrect results.
+
+ .. versionadded:: 1.9.0
+
+.. c:function:: int npy_clear_floatstatus_barrier(char*)
+
+ Clears the floating point status. A pointer to a local variable is passed in to
+ prevent aggressive compiler optimizations from reodering this function call.
+ Returns the previous status mask.
+
+ .. versionadded:: 1.15.0
+
+Complex functions
+~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 1.4.0
+
+C99-like complex functions have been added. Those can be used if you wish to
+implement portable C extensions. Since we still support platforms without C99
+complex type, you need to restrict to C90-compatible syntax, e.g.:
+
+.. code-block:: c
+
+ /* a = 1 + 2i \*/
+ npy_complex a = npy_cpack(1, 2);
+ npy_complex b;
+
+ b = npy_log(a);
+
+Linking against the core math library in an extension
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 1.4.0
+
+To use the core math library in your own extension, you need to add the npymath
+compile and link options to your extension in your setup.py:
+
+ >>> from numpy.distutils.misc_util import get_info
+ >>> info = get_info('npymath')
+ >>> config.add_extension('foo', sources=['foo.c'], extra_info=info)
+
+In other words, the usage of info is exactly the same as when using blas_info
+and co.
+
+Half-precision functions
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 1.6.0
+
+The header file <numpy/halffloat.h> provides functions to work with
+IEEE 754-2008 16-bit floating point values. While this format is
+not typically used for numerical computations, it is useful for
+storing values which require floating point but do not need much precision.
+It can also be used as an educational tool to understand the nature
+of floating point round-off error.
+
+Like for other types, NumPy includes a typedef npy_half for the 16 bit
+float. Unlike for most of the other types, you cannot use this as a
+normal type in C, since it is a typedef for npy_uint16. For example,
+1.0 looks like 0x3c00 to C, and if you do an equality comparison
+between the different signed zeros, you will get -0.0 != 0.0
+(0x8000 != 0x0000), which is incorrect.
+
+For these reasons, NumPy provides an API to work with npy_half values
+accessible by including <numpy/halffloat.h> and linking to 'npymath'.
+For functions that are not provided directly, such as the arithmetic
+operations, the preferred method is to convert to float
+or double and back again, as in the following example.
+
+.. code-block:: c
+
+ npy_half sum(int n, npy_half *array) {
+ float ret = 0;
+ while(n--) {
+ ret += npy_half_to_float(*array++);
+ }
+ return npy_float_to_half(ret);
+ }
+
+External Links:
+
+* `754-2008 IEEE Standard for Floating-Point Arithmetic`__
+* `Half-precision Float Wikipedia Article`__.
+* `OpenGL Half Float Pixel Support`__
+* `The OpenEXR image format`__.
+
+__ https://ieeexplore.ieee.org/document/4610935/
+__ https://en.wikipedia.org/wiki/Half-precision_floating-point_format
+__ https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_half_float_pixel.txt
+__ https://www.openexr.com/about.html
+
+.. c:var:: NPY_HALF_ZERO
+
+ This macro is defined to positive zero.
+
+.. c:var:: NPY_HALF_PZERO
+
+ This macro is defined to positive zero.
+
+.. c:var:: NPY_HALF_NZERO
+
+ This macro is defined to negative zero.
+
+.. c:var:: NPY_HALF_ONE
+
+ This macro is defined to 1.0.
+
+.. c:var:: NPY_HALF_NEGONE
+
+ This macro is defined to -1.0.
+
+.. c:var:: NPY_HALF_PINF
+
+ This macro is defined to +inf.
+
+.. c:var:: NPY_HALF_NINF
+
+ This macro is defined to -inf.
+
+.. c:var:: NPY_HALF_NAN
+
+ This macro is defined to a NaN value, guaranteed to have its sign bit unset.
+
+.. c:function:: float npy_half_to_float(npy_half h)
+
+ Converts a half-precision float to a single-precision float.
+
+.. c:function:: double npy_half_to_double(npy_half h)
+
+ Converts a half-precision float to a double-precision float.
+
+.. c:function:: npy_half npy_float_to_half(float f)
+
+ Converts a single-precision float to a half-precision float. The
+ value is rounded to the nearest representable half, with ties going
+ to the nearest even. If the value is too small or too big, the
+ system's floating point underflow or overflow bit will be set.
+
+.. c:function:: npy_half npy_double_to_half(double d)
+
+ Converts a double-precision float to a half-precision float. The
+ value is rounded to the nearest representable half, with ties going
+ to the nearest even. If the value is too small or too big, the
+ system's floating point underflow or overflow bit will be set.
+
+.. c:function:: int npy_half_eq(npy_half h1, npy_half h2)
+
+ Compares two half-precision floats (h1 == h2).
+
+.. c:function:: int npy_half_ne(npy_half h1, npy_half h2)
+
+ Compares two half-precision floats (h1 != h2).
+
+.. c:function:: int npy_half_le(npy_half h1, npy_half h2)
+
+ Compares two half-precision floats (h1 <= h2).
+
+.. c:function:: int npy_half_lt(npy_half h1, npy_half h2)
+
+ Compares two half-precision floats (h1 < h2).
+
+.. c:function:: int npy_half_ge(npy_half h1, npy_half h2)
+
+ Compares two half-precision floats (h1 >= h2).
+
+.. c:function:: int npy_half_gt(npy_half h1, npy_half h2)
+
+ Compares two half-precision floats (h1 > h2).
+
+.. c:function:: int npy_half_eq_nonan(npy_half h1, npy_half h2)
+
+ Compares two half-precision floats that are known to not be NaN (h1 == h2). If
+ a value is NaN, the result is undefined.
+
+.. c:function:: int npy_half_lt_nonan(npy_half h1, npy_half h2)
+
+ Compares two half-precision floats that are known to not be NaN (h1 < h2). If
+ a value is NaN, the result is undefined.
+
+.. c:function:: int npy_half_le_nonan(npy_half h1, npy_half h2)
+
+ Compares two half-precision floats that are known to not be NaN (h1 <= h2). If
+ a value is NaN, the result is undefined.
+
+.. c:function:: int npy_half_iszero(npy_half h)
+
+ Tests whether the half-precision float has a value equal to zero. This may be slightly
+ faster than calling npy_half_eq(h, NPY_ZERO).
+
+.. c:function:: int npy_half_isnan(npy_half h)
+
+ Tests whether the half-precision float is a NaN.
+
+.. c:function:: int npy_half_isinf(npy_half h)
+
+ Tests whether the half-precision float is plus or minus Inf.
+
+.. c:function:: int npy_half_isfinite(npy_half h)
+
+ Tests whether the half-precision float is finite (not NaN or Inf).
+
+.. c:function:: int npy_half_signbit(npy_half h)
+
+ Returns 1 is h is negative, 0 otherwise.
+
+.. c:function:: npy_half npy_half_copysign(npy_half x, npy_half y)
+
+ Returns the value of x with the sign bit copied from y. Works for any value,
+ including Inf and NaN.
+
+.. c:function:: npy_half npy_half_spacing(npy_half h)
+
+ This is the same for half-precision float as npy_spacing and npy_spacingf
+ described in the low-level floating point section.
+
+.. c:function:: npy_half npy_half_nextafter(npy_half x, npy_half y)
+
+ This is the same for half-precision float as npy_nextafter and npy_nextafterf
+ described in the low-level floating point section.
+
+.. c:function:: npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f)
+
+ Low-level function which converts a 32-bit single-precision float, stored
+ as a uint32, into a 16-bit half-precision float.
+
+.. c:function:: npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
+
+ Low-level function which converts a 64-bit double-precision float, stored
+ as a uint64, into a 16-bit half-precision float.
+
+.. c:function:: npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h)
+
+ Low-level function which converts a 16-bit half-precision float
+ into a 32-bit single-precision float, stored as a uint32.
+
+.. c:function:: npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h)
+
+ Low-level function which converts a 16-bit half-precision float
+ into a 64-bit double-precision float, stored as a uint64.
--- /dev/null
+C API Deprecations
+==================
+
+Background
+----------
+
+The API exposed by NumPy for third-party extensions has grown over
+years of releases, and has allowed programmers to directly access
+NumPy functionality from C. This API can be best described as
+"organic". It has emerged from multiple competing desires and from
+multiple points of view over the years, strongly influenced by the
+desire to make it easy for users to move to NumPy from Numeric and
+Numarray. The core API originated with Numeric in 1995 and there are
+patterns such as the heavy use of macros written to mimic Python's
+C-API as well as account for compiler technology of the late 90's.
+There is also only a small group of volunteers who have had very little
+time to spend on improving this API.
+
+There is an ongoing effort to improve the API.
+It is important in this effort
+to ensure that code that compiles for NumPy 1.X continues to
+compile for NumPy 1.X. At the same time, certain API's will be marked
+as deprecated so that future-looking code can avoid these API's and
+follow better practices.
+
+Another important role played by deprecation markings in the C API is to move
+towards hiding internal details of the NumPy implementation. For those
+needing direct, easy, access to the data of ndarrays, this will not
+remove this ability. Rather, there are many potential performance
+optimizations which require changing the implementation details, and
+NumPy developers have been unable to try them because of the high
+value of preserving ABI compatibility. By deprecating this direct
+access, we will in the future be able to improve NumPy's performance
+in ways we cannot presently.
+
+Deprecation Mechanism NPY_NO_DEPRECATED_API
+-------------------------------------------
+
+In C, there is no equivalent to the deprecation warnings that Python
+supports. One way to do deprecations is to flag them in the
+documentation and release notes, then remove or change the deprecated
+features in a future major version (NumPy 2.0 and beyond). Minor
+versions of NumPy should not have major C-API changes, however, that
+prevent code that worked on a previous minor release. For example, we
+will do our best to ensure that code that compiled and worked on NumPy
+1.4 should continue to work on NumPy 1.7 (but perhaps with compiler
+warnings).
+
+To use the NPY_NO_DEPRECATED_API mechanism, you need to #define it to
+the target API version of NumPy before #including any NumPy headers.
+If you want to confirm that your code is clean against 1.7, use::
+
+ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+
+On compilers which support a #warning mechanism, NumPy issues a
+compiler warning if you do not define the symbol NPY_NO_DEPRECATED_API.
+This way, the fact that there are deprecations will be flagged for
+third-party developers who may not have read the release notes closely.
--- /dev/null
+Data Type API
+=============
+
+.. sectionauthor:: Travis E. Oliphant
+
+The standard array can have 24 different data types (and has some
+support for adding your own types). These data types all have an
+enumerated type, an enumerated type-character, and a corresponding
+array scalar Python type object (placed in a hierarchy). There are
+also standard C typedefs to make it easier to manipulate elements of
+the given data type. For the numeric types, there are also bit-width
+equivalent C typedefs and named typenumbers that make it easier to
+select the precision desired.
+
+.. warning::
+
+ The names for the types in c code follows c naming conventions
+ more closely. The Python names for these types follow Python
+ conventions. Thus, :c:data:`NPY_FLOAT` picks up a 32-bit float in
+ C, but :class:`numpy.float_` in Python corresponds to a 64-bit
+ double. The bit-width names can be used in both Python and C for
+ clarity.
+
+
+Enumerated Types
+----------------
+
+.. c:var:: NPY_TYPES
+
+There is a list of enumerated types defined providing the basic 24
+data types plus some useful generic names. Whenever the code requires
+a type number, one of these enumerated types is requested. The types
+are all called :c:data:`NPY_{NAME}`:
+
+.. c:var:: NPY_BOOL
+
+ The enumeration value for the boolean type, stored as one byte.
+ It may only be set to the values 0 and 1.
+
+.. c:var:: NPY_BYTE
+.. c:var:: NPY_INT8
+
+ The enumeration value for an 8-bit/1-byte signed integer.
+
+.. c:var:: NPY_SHORT
+.. c:var:: NPY_INT16
+
+ The enumeration value for a 16-bit/2-byte signed integer.
+
+.. c:var:: NPY_INT
+.. c:var:: NPY_INT32
+
+ The enumeration value for a 32-bit/4-byte signed integer.
+
+.. c:var:: NPY_LONG
+
+ Equivalent to either NPY_INT or NPY_LONGLONG, depending on the
+ platform.
+
+.. c:var:: NPY_LONGLONG
+.. c:var:: NPY_INT64
+
+ The enumeration value for a 64-bit/8-byte signed integer.
+
+.. c:var:: NPY_UBYTE
+.. c:var:: NPY_UINT8
+
+ The enumeration value for an 8-bit/1-byte unsigned integer.
+
+.. c:var:: NPY_USHORT
+.. c:var:: NPY_UINT16
+
+ The enumeration value for a 16-bit/2-byte unsigned integer.
+
+.. c:var:: NPY_UINT
+.. c:var:: NPY_UINT32
+
+ The enumeration value for a 32-bit/4-byte unsigned integer.
+
+.. c:var:: NPY_ULONG
+
+ Equivalent to either NPY_UINT or NPY_ULONGLONG, depending on the
+ platform.
+
+.. c:var:: NPY_ULONGLONG
+.. c:var:: NPY_UINT64
+
+ The enumeration value for a 64-bit/8-byte unsigned integer.
+
+.. c:var:: NPY_HALF
+.. c:var:: NPY_FLOAT16
+
+ The enumeration value for a 16-bit/2-byte IEEE 754-2008 compatible floating
+ point type.
+
+.. c:var:: NPY_FLOAT
+.. c:var:: NPY_FLOAT32
+
+ The enumeration value for a 32-bit/4-byte IEEE 754 compatible floating
+ point type.
+
+.. c:var:: NPY_DOUBLE
+.. c:var:: NPY_FLOAT64
+
+ The enumeration value for a 64-bit/8-byte IEEE 754 compatible floating
+ point type.
+
+.. c:var:: NPY_LONGDOUBLE
+
+ The enumeration value for a platform-specific floating point type which is
+ at least as large as NPY_DOUBLE, but larger on many platforms.
+
+.. c:var:: NPY_CFLOAT
+.. c:var:: NPY_COMPLEX64
+
+ The enumeration value for a 64-bit/8-byte complex type made up of
+ two NPY_FLOAT values.
+
+.. c:var:: NPY_CDOUBLE
+.. c:var:: NPY_COMPLEX128
+
+ The enumeration value for a 128-bit/16-byte complex type made up of
+ two NPY_DOUBLE values.
+
+.. c:var:: NPY_CLONGDOUBLE
+
+ The enumeration value for a platform-specific complex floating point
+ type which is made up of two NPY_LONGDOUBLE values.
+
+.. c:var:: NPY_DATETIME
+
+ The enumeration value for a data type which holds dates or datetimes with
+ a precision based on selectable date or time units.
+
+.. c:var:: NPY_TIMEDELTA
+
+ The enumeration value for a data type which holds lengths of times in
+ integers of selectable date or time units.
+
+.. c:var:: NPY_STRING
+
+ The enumeration value for ASCII strings of a selectable size. The
+ strings have a fixed maximum size within a given array.
+
+.. c:var:: NPY_UNICODE
+
+ The enumeration value for UCS4 strings of a selectable size. The
+ strings have a fixed maximum size within a given array.
+
+.. c:var:: NPY_OBJECT
+
+ The enumeration value for references to arbitrary Python objects.
+
+.. c:var:: NPY_VOID
+
+ Primarily used to hold struct dtypes, but can contain arbitrary
+ binary data.
+
+Some useful aliases of the above types are
+
+.. c:var:: NPY_INTP
+
+ The enumeration value for a signed integer type which is the same
+ size as a (void \*) pointer. This is the type used by all
+ arrays of indices.
+
+.. c:var:: NPY_UINTP
+
+ The enumeration value for an unsigned integer type which is the
+ same size as a (void \*) pointer.
+
+.. c:var:: NPY_MASK
+
+ The enumeration value of the type used for masks, such as with
+ the :c:data:`NPY_ITER_ARRAYMASK` iterator flag. This is equivalent
+ to :c:data:`NPY_UINT8`.
+
+.. c:var:: NPY_DEFAULT_TYPE
+
+ The default type to use when no dtype is explicitly specified, for
+ example when calling np.zero(shape). This is equivalent to
+ :c:data:`NPY_DOUBLE`.
+
+Other useful related constants are
+
+.. c:var:: NPY_NTYPES
+
+ The total number of built-in NumPy types. The enumeration covers
+ the range from 0 to NPY_NTYPES-1.
+
+.. c:var:: NPY_NOTYPE
+
+ A signal value guaranteed not to be a valid type enumeration number.
+
+.. c:var:: NPY_USERDEF
+
+ The start of type numbers used for Custom Data types.
+
+The various character codes indicating certain types are also part of
+an enumerated list. References to type characters (should they be
+needed at all) should always use these enumerations. The form of them
+is :c:data:`NPY_{NAME}LTR` where ``{NAME}`` can be
+
+ **BOOL**, **BYTE**, **UBYTE**, **SHORT**, **USHORT**, **INT**,
+ **UINT**, **LONG**, **ULONG**, **LONGLONG**, **ULONGLONG**,
+ **HALF**, **FLOAT**, **DOUBLE**, **LONGDOUBLE**, **CFLOAT**,
+ **CDOUBLE**, **CLONGDOUBLE**, **DATETIME**, **TIMEDELTA**,
+ **OBJECT**, **STRING**, **VOID**
+
+ **INTP**, **UINTP**
+
+ **GENBOOL**, **SIGNED**, **UNSIGNED**, **FLOATING**, **COMPLEX**
+
+The latter group of ``{NAME}s`` corresponds to letters used in the array
+interface typestring specification.
+
+
+Defines
+-------
+
+Max and min values for integers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. c:var:: NPY_MAX_INT{bits}
+
+.. c:var:: NPY_MAX_UINT{bits}
+
+.. c:var:: NPY_MIN_INT{bits}
+
+ These are defined for ``{bits}`` = 8, 16, 32, 64, 128, and 256 and provide
+ the maximum (minimum) value of the corresponding (unsigned) integer
+ type. Note: the actual integer type may not be available on all
+ platforms (i.e. 128-bit and 256-bit integers are rare).
+
+.. c:var:: NPY_MIN_{type}
+
+ This is defined for ``{type}`` = **BYTE**, **SHORT**, **INT**,
+ **LONG**, **LONGLONG**, **INTP**
+
+.. c:var:: NPY_MAX_{type}
+
+ This is defined for all defined for ``{type}`` = **BYTE**, **UBYTE**,
+ **SHORT**, **USHORT**, **INT**, **UINT**, **LONG**, **ULONG**,
+ **LONGLONG**, **ULONGLONG**, **INTP**, **UINTP**
+
+
+Number of bits in data types
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+All :c:data:`NPY_SIZEOF_{CTYPE}` constants have corresponding
+:c:data:`NPY_BITSOF_{CTYPE}` constants defined. The :c:data:`NPY_BITSOF_{CTYPE}`
+constants provide the number of bits in the data type. Specifically,
+the available ``{CTYPE}s`` are
+
+ **BOOL**, **CHAR**, **SHORT**, **INT**, **LONG**,
+ **LONGLONG**, **FLOAT**, **DOUBLE**, **LONGDOUBLE**
+
+
+Bit-width references to enumerated typenums
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+All of the numeric data types (integer, floating point, and complex)
+have constants that are defined to be a specific enumerated type
+number. Exactly which enumerated type a bit-width type refers to is
+platform dependent. In particular, the constants available are
+:c:data:`PyArray_{NAME}{BITS}` where ``{NAME}`` is **INT**, **UINT**,
+**FLOAT**, **COMPLEX** and ``{BITS}`` can be 8, 16, 32, 64, 80, 96, 128,
+160, 192, 256, and 512. Obviously not all bit-widths are available on
+all platforms for all the kinds of numeric types. Commonly 8-, 16-,
+32-, 64-bit integers; 32-, 64-bit floats; and 64-, 128-bit complex
+types are available.
+
+
+Integer that can hold a pointer
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The constants **NPY_INTP** and **NPY_UINTP** refer to an
+enumerated integer type that is large enough to hold a pointer on the
+platform. Index arrays should always be converted to **NPY_INTP**
+, because the dimension of the array is of type npy_intp.
+
+
+C-type names
+------------
+
+There are standard variable types for each of the numeric data types
+and the bool data type. Some of these are already available in the
+C-specification. You can create variables in extension code with these
+types.
+
+
+Boolean
+^^^^^^^
+
+.. c:type:: npy_bool
+
+ unsigned char; The constants :c:data:`NPY_FALSE` and
+ :c:data:`NPY_TRUE` are also defined.
+
+
+(Un)Signed Integer
+^^^^^^^^^^^^^^^^^^
+
+Unsigned versions of the integers can be defined by pre-pending a 'u'
+to the front of the integer name.
+
+.. c:type:: npy_(u)byte
+
+ (unsigned) char
+
+.. c:type:: npy_short
+
+ short
+
+.. c:type:: npy_ushort
+
+ unsigned short
+
+.. c:type:: npy_uint
+
+ unsigned int
+
+.. c:type:: npy_int
+
+ int
+
+.. c:type:: npy_int16
+
+ 16-bit integer
+
+.. c:type:: npy_uint16
+
+ 16-bit unsigned integer
+
+.. c:type:: npy_int32
+
+ 32-bit integer
+
+.. c:type:: npy_uint32
+
+ 32-bit unsigned integer
+
+.. c:type:: npy_int64
+
+ 64-bit integer
+
+.. c:type:: npy_uint64
+
+ 64-bit unsigned integer
+
+.. c:type:: npy_(u)long
+
+ (unsigned) long int
+
+.. c:type:: npy_(u)longlong
+
+ (unsigned long long int)
+
+.. c:type:: npy_intp
+
+ Py_intptr_t (an integer that is the size of a pointer on
+ the platform).
+
+.. c:type:: npy_uintp
+
+ unsigned Py_intptr_t (an integer that is the size of a pointer on
+ the platform).
+
+
+(Complex) Floating point
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. c:type:: npy_half
+
+ 16-bit float
+
+.. c:type:: npy_(c)float
+
+ 32-bit float
+
+.. c:type:: npy_(c)double
+
+ 64-bit double
+
+.. c:type:: npy_(c)longdouble
+
+ long double
+
+complex types are structures with **.real** and **.imag** members (in
+that order).
+
+
+Bit-width names
+^^^^^^^^^^^^^^^
+
+There are also typedefs for signed integers, unsigned integers,
+floating point, and complex floating point types of specific bit-
+widths. The available type names are
+
+ :c:type:`npy_int{bits}`, :c:type:`npy_uint{bits}`, :c:type:`npy_float{bits}`,
+ and :c:type:`npy_complex{bits}`
+
+where ``{bits}`` is the number of bits in the type and can be **8**,
+**16**, **32**, **64**, 128, and 256 for integer types; 16, **32**
+, **64**, 80, 96, 128, and 256 for floating-point types; and 32,
+**64**, **128**, 160, 192, and 512 for complex-valued types. Which
+bit-widths are available is platform dependent. The bolded bit-widths
+are usually available on all platforms.
+
+
+Printf Formatting
+-----------------
+
+For help in printing, the following strings are defined as the correct
+format specifier in printf and related commands.
+
+ :c:data:`NPY_LONGLONG_FMT`, :c:data:`NPY_ULONGLONG_FMT`,
+ :c:data:`NPY_INTP_FMT`, :c:data:`NPY_UINTP_FMT`,
+ :c:data:`NPY_LONGDOUBLE_FMT`
--- /dev/null
+.. _c-api.generalized-ufuncs:
+
+==================================
+Generalized Universal Function API
+==================================
+
+There is a general need for looping over not only functions on scalars
+but also over functions on vectors (or arrays).
+This concept is realized in NumPy by generalizing the universal functions
+(ufuncs). In regular ufuncs, the elementary function is limited to
+element-by-element operations, whereas the generalized version (gufuncs)
+supports "sub-array" by "sub-array" operations. The Perl vector library PDL
+provides a similar functionality and its terms are re-used in the following.
+
+Each generalized ufunc has information associated with it that states
+what the "core" dimensionality of the inputs is, as well as the
+corresponding dimensionality of the outputs (the element-wise ufuncs
+have zero core dimensions). The list of the core dimensions for all
+arguments is called the "signature" of a ufunc. For example, the
+ufunc numpy.add has signature ``(),()->()`` defining two scalar inputs
+and one scalar output.
+
+Another example is the function ``inner1d(a, b)`` with a signature of
+``(i),(i)->()``. This applies the inner product along the last axis of
+each input, but keeps the remaining indices intact.
+For example, where ``a`` is of shape ``(3, 5, N)`` and ``b`` is of shape
+``(5, N)``, this will return an output of shape ``(3,5)``.
+The underlying elementary function is called ``3 * 5`` times. In the
+signature, we specify one core dimension ``(i)`` for each input and zero core
+dimensions ``()`` for the output, since it takes two 1-d arrays and
+returns a scalar. By using the same name ``i``, we specify that the two
+corresponding dimensions should be of the same size.
+
+The dimensions beyond the core dimensions are called "loop" dimensions. In
+the above example, this corresponds to ``(3, 5)``.
+
+The signature determines how the dimensions of each input/output array are
+split into core and loop dimensions:
+
+#. Each dimension in the signature is matched to a dimension of the
+ corresponding passed-in array, starting from the end of the shape tuple.
+ These are the core dimensions, and they must be present in the arrays, or
+ an error will be raised.
+#. Core dimensions assigned to the same label in the signature (e.g. the
+ ``i`` in ``inner1d``'s ``(i),(i)->()``) must have exactly matching sizes,
+ no broadcasting is performed.
+#. The core dimensions are removed from all inputs and the remaining
+ dimensions are broadcast together, defining the loop dimensions.
+#. The shape of each output is determined from the loop dimensions plus the
+ output's core dimensions
+
+Typically, the size of all core dimensions in an output will be determined by
+the size of a core dimension with the same label in an input array. This is
+not a requirement, and it is possible to define a signature where a label
+comes up for the first time in an output, although some precautions must be
+taken when calling such a function. An example would be the function
+``euclidean_pdist(a)``, with signature ``(n,d)->(p)``, that given an array of
+``n`` ``d``-dimensional vectors, computes all unique pairwise Euclidean
+distances among them. The output dimension ``p`` must therefore be equal to
+``n * (n - 1) / 2``, but it is the caller's responsibility to pass in an
+output array of the right size. If the size of a core dimension of an output
+cannot be determined from a passed in input or output array, an error will be
+raised.
+
+Note: Prior to NumPy 1.10.0, less strict checks were in place: missing core
+dimensions were created by prepending 1's to the shape as necessary, core
+dimensions with the same label were broadcast together, and undetermined
+dimensions were created with size 1.
+
+
+Definitions
+-----------
+
+Elementary Function
+ Each ufunc consists of an elementary function that performs the
+ most basic operation on the smallest portion of array arguments
+ (e.g. adding two numbers is the most basic operation in adding two
+ arrays). The ufunc applies the elementary function multiple times
+ on different parts of the arrays. The input/output of elementary
+ functions can be vectors; e.g., the elementary function of inner1d
+ takes two vectors as input.
+
+Signature
+ A signature is a string describing the input/output dimensions of
+ the elementary function of a ufunc. See section below for more
+ details.
+
+Core Dimension
+ The dimensionality of each input/output of an elementary function
+ is defined by its core dimensions (zero core dimensions correspond
+ to a scalar input/output). The core dimensions are mapped to the
+ last dimensions of the input/output arrays.
+
+Dimension Name
+ A dimension name represents a core dimension in the signature.
+ Different dimensions may share a name, indicating that they are of
+ the same size.
+
+Dimension Index
+ A dimension index is an integer representing a dimension name. It
+ enumerates the dimension names according to the order of the first
+ occurrence of each name in the signature.
+
+.. _details-of-signature:
+
+Details of Signature
+--------------------
+
+The signature defines "core" dimensionality of input and output
+variables, and thereby also defines the contraction of the
+dimensions. The signature is represented by a string of the
+following format:
+
+* Core dimensions of each input or output array are represented by a
+ list of dimension names in parentheses, ``(i_1,...,i_N)``; a scalar
+ input/output is denoted by ``()``. Instead of ``i_1``, ``i_2``,
+ etc, one can use any valid Python variable name.
+* Dimension lists for different arguments are separated by ``","``.
+ Input/output arguments are separated by ``"->"``.
+* If one uses the same dimension name in multiple locations, this
+ enforces the same size of the corresponding dimensions.
+
+The formal syntax of signatures is as follows::
+
+ <Signature> ::= <Input arguments> "->" <Output arguments>
+ <Input arguments> ::= <Argument list>
+ <Output arguments> ::= <Argument list>
+ <Argument list> ::= nil | <Argument> | <Argument> "," <Argument list>
+ <Argument> ::= "(" <Core dimension list> ")"
+ <Core dimension list> ::= nil | <Core dimension> |
+ <Core dimension> "," <Core dimension list>
+ <Core dimension> ::= <Dimension name> <Dimension modifier>
+ <Dimension name> ::= valid Python variable name | valid integer
+ <Dimension modifier> ::= nil | "?"
+
+Notes:
+
+#. All quotes are for clarity.
+#. Unmodified core dimensions that share the same name must have the same size.
+ Each dimension name typically corresponds to one level of looping in the
+ elementary function's implementation.
+#. White spaces are ignored.
+#. An integer as a dimension name freezes that dimension to the value.
+#. If the name is suffixed with the "?" modifier, the dimension is a core
+ dimension only if it exists on all inputs and outputs that share it;
+ otherwise it is ignored (and replaced by a dimension of size 1 for the
+ elementary function).
+
+Here are some examples of signatures:
+
++-------------+----------------------------+-----------------------------------+
+| name | signature | common usage |
++=============+============================+===================================+
+| add | ``(),()->()`` | binary ufunc |
++-------------+----------------------------+-----------------------------------+
+| sum1d | ``(i)->()`` | reduction |
++-------------+----------------------------+-----------------------------------+
+| inner1d | ``(i),(i)->()`` | vector-vector multiplication |
++-------------+----------------------------+-----------------------------------+
+| matmat | ``(m,n),(n,p)->(m,p)`` | matrix multiplication |
++-------------+----------------------------+-----------------------------------+
+| vecmat | ``(n),(n,p)->(p)`` | vector-matrix multiplication |
++-------------+----------------------------+-----------------------------------+
+| matvec | ``(m,n),(n)->(m)`` | matrix-vector multiplication |
++-------------+----------------------------+-----------------------------------+
+| matmul | ``(m?,n),(n,p?)->(m?,p?)`` | combination of the four above |
++-------------+----------------------------+-----------------------------------+
+| outer_inner | ``(i,t),(j,t)->(i,j)`` | inner over the last dimension, |
+| | | outer over the second to last, |
+| | | and loop/broadcast over the rest. |
++-------------+----------------------------+-----------------------------------+
+| cross1d | ``(3),(3)->(3)`` | cross product where the last |
+| | | dimension is frozen and must be 3 |
++-------------+----------------------------+-----------------------------------+
+
+.. _frozen:
+
+The last is an instance of freezing a core dimension and can be used to
+improve ufunc performance
+
+C-API for implementing Elementary Functions
+-------------------------------------------
+
+The current interface remains unchanged, and ``PyUFunc_FromFuncAndData``
+can still be used to implement (specialized) ufuncs, consisting of
+scalar elementary functions.
+
+One can use ``PyUFunc_FromFuncAndDataAndSignature`` to declare a more
+general ufunc. The argument list is the same as
+``PyUFunc_FromFuncAndData``, with an additional argument specifying the
+signature as C string.
+
+Furthermore, the callback function is of the same type as before,
+``void (*foo)(char **args, intp *dimensions, intp *steps, void *func)``.
+When invoked, ``args`` is a list of length ``nargs`` containing
+the data of all input/output arguments. For a scalar elementary
+function, ``steps`` is also of length ``nargs``, denoting the strides used
+for the arguments. ``dimensions`` is a pointer to a single integer
+defining the size of the axis to be looped over.
+
+For a non-trivial signature, ``dimensions`` will also contain the sizes
+of the core dimensions as well, starting at the second entry. Only
+one size is provided for each unique dimension name and the sizes are
+given according to the first occurrence of a dimension name in the
+signature.
+
+The first ``nargs`` elements of ``steps`` remain the same as for scalar
+ufuncs. The following elements contain the strides of all core
+dimensions for all arguments in order.
+
+For example, consider a ufunc with signature ``(i,j),(i)->()``. In
+this case, ``args`` will contain three pointers to the data of the
+input/output arrays ``a``, ``b``, ``c``. Furthermore, ``dimensions`` will be
+``[N, I, J]`` to define the size of ``N`` of the loop and the sizes ``I`` and ``J``
+for the core dimensions ``i`` and ``j``. Finally, ``steps`` will be
+``[a_N, b_N, c_N, a_i, a_j, b_i]``, containing all necessary strides.
--- /dev/null
+.. _c-api:
+
+###########
+NumPy C-API
+###########
+
+.. sectionauthor:: Travis E. Oliphant
+
+| Beware of the man who won't be bothered with details.
+| --- *William Feather, Sr.*
+
+| The truth is out there.
+| --- *Chris Carter, The X Files*
+
+
+NumPy provides a C-API to enable users to extend the system and get
+access to the array object for use in other routines. The best way to
+truly understand the C-API is to read the source code. If you are
+unfamiliar with (C) source code, however, this can be a daunting
+experience at first. Be assured that the task becomes easier with
+practice, and you may be surprised at how simple the C-code can be to
+understand. Even if you don't think you can write C-code from scratch,
+it is much easier to understand and modify already-written source code
+then create it *de novo*.
+
+Python extensions are especially straightforward to understand because
+they all have a very similar structure. Admittedly, NumPy is not a
+trivial extension to Python, and may take a little more snooping to
+grasp. This is especially true because of the code-generation
+techniques, which simplify maintenance of very similar code, but can
+make the code a little less readable to beginners. Still, with a
+little persistence, the code can be opened to your understanding. It
+is my hope, that this guide to the C-API can assist in the process of
+becoming familiar with the compiled-level work that can be done with
+NumPy in order to squeeze that last bit of necessary speed out of your
+code.
+
+.. currentmodule:: numpy-c-api
+
+.. toctree::
+ :maxdepth: 2
+
+ types-and-structures
+ config
+ dtype
+ array
+ iterator
+ ufunc
+ generalized-ufuncs
+ coremath
+ deprecations
--- /dev/null
+Array Iterator API
+==================
+
+.. sectionauthor:: Mark Wiebe
+
+.. index::
+ pair: iterator; C-API
+ pair: C-API; iterator
+
+.. versionadded:: 1.6
+
+Array Iterator
+--------------
+
+The array iterator encapsulates many of the key features in ufuncs,
+allowing user code to support features like output parameters,
+preservation of memory layouts, and buffering of data with the wrong
+alignment or type, without requiring difficult coding.
+
+This page documents the API for the iterator.
+The iterator is named ``NpyIter`` and functions are
+named ``NpyIter_*``.
+
+There is an :ref:`introductory guide to array iteration <arrays.nditer>`
+which may be of interest for those using this C API. In many instances,
+testing out ideas by creating the iterator in Python is a good idea
+before writing the C iteration code.
+
+Simple Iteration Example
+------------------------
+
+The best way to become familiar with the iterator is to look at its
+usage within the NumPy codebase itself. For example, here is a slightly
+tweaked version of the code for :c:func:`PyArray_CountNonzero`, which counts the
+number of non-zero elements in an array.
+
+.. code-block:: c
+
+ npy_intp PyArray_CountNonzero(PyArrayObject* self)
+ {
+ /* Nonzero boolean function */
+ PyArray_NonzeroFunc* nonzero = PyArray_DESCR(self)->f->nonzero;
+
+ NpyIter* iter;
+ NpyIter_IterNextFunc *iternext;
+ char** dataptr;
+ npy_intp nonzero_count;
+ npy_intp* strideptr,* innersizeptr;
+
+ /* Handle zero-sized arrays specially */
+ if (PyArray_SIZE(self) == 0) {
+ return 0;
+ }
+
+ /*
+ * Create and use an iterator to count the nonzeros.
+ * flag NPY_ITER_READONLY
+ * - The array is never written to.
+ * flag NPY_ITER_EXTERNAL_LOOP
+ * - Inner loop is done outside the iterator for efficiency.
+ * flag NPY_ITER_NPY_ITER_REFS_OK
+ * - Reference types are acceptable.
+ * order NPY_KEEPORDER
+ * - Visit elements in memory order, regardless of strides.
+ * This is good for performance when the specific order
+ * elements are visited is unimportant.
+ * casting NPY_NO_CASTING
+ * - No casting is required for this operation.
+ */
+ iter = NpyIter_New(self, NPY_ITER_READONLY|
+ NPY_ITER_EXTERNAL_LOOP|
+ NPY_ITER_REFS_OK,
+ NPY_KEEPORDER, NPY_NO_CASTING,
+ NULL);
+ if (iter == NULL) {
+ return -1;
+ }
+
+ /*
+ * The iternext function gets stored in a local variable
+ * so it can be called repeatedly in an efficient manner.
+ */
+ iternext = NpyIter_GetIterNext(iter, NULL);
+ if (iternext == NULL) {
+ NpyIter_Deallocate(iter);
+ return -1;
+ }
+ /* The location of the data pointer which the iterator may update */
+ dataptr = NpyIter_GetDataPtrArray(iter);
+ /* The location of the stride which the iterator may update */
+ strideptr = NpyIter_GetInnerStrideArray(iter);
+ /* The location of the inner loop size which the iterator may update */
+ innersizeptr = NpyIter_GetInnerLoopSizePtr(iter);
+
+ nonzero_count = 0;
+ do {
+ /* Get the inner loop data/stride/count values */
+ char* data = *dataptr;
+ npy_intp stride = *strideptr;
+ npy_intp count = *innersizeptr;
+
+ /* This is a typical inner loop for NPY_ITER_EXTERNAL_LOOP */
+ while (count--) {
+ if (nonzero(data, self)) {
+ ++nonzero_count;
+ }
+ data += stride;
+ }
+
+ /* Increment the iterator to the next inner loop */
+ } while(iternext(iter));
+
+ NpyIter_Deallocate(iter);
+
+ return nonzero_count;
+ }
+
+Simple Multi-Iteration Example
+------------------------------
+
+Here is a simple copy function using the iterator. The ``order`` parameter
+is used to control the memory layout of the allocated result, typically
+:c:data:`NPY_KEEPORDER` is desired.
+
+.. code-block:: c
+
+ PyObject *CopyArray(PyObject *arr, NPY_ORDER order)
+ {
+ NpyIter *iter;
+ NpyIter_IterNextFunc *iternext;
+ PyObject *op[2], *ret;
+ npy_uint32 flags;
+ npy_uint32 op_flags[2];
+ npy_intp itemsize, *innersizeptr, innerstride;
+ char **dataptrarray;
+
+ /*
+ * No inner iteration - inner loop is handled by CopyArray code
+ */
+ flags = NPY_ITER_EXTERNAL_LOOP;
+ /*
+ * Tell the constructor to automatically allocate the output.
+ * The data type of the output will match that of the input.
+ */
+ op[0] = arr;
+ op[1] = NULL;
+ op_flags[0] = NPY_ITER_READONLY;
+ op_flags[1] = NPY_ITER_WRITEONLY | NPY_ITER_ALLOCATE;
+
+ /* Construct the iterator */
+ iter = NpyIter_MultiNew(2, op, flags, order, NPY_NO_CASTING,
+ op_flags, NULL);
+ if (iter == NULL) {
+ return NULL;
+ }
+
+ /*
+ * Make a copy of the iternext function pointer and
+ * a few other variables the inner loop needs.
+ */
+ iternext = NpyIter_GetIterNext(iter, NULL);
+ innerstride = NpyIter_GetInnerStrideArray(iter)[0];
+ itemsize = NpyIter_GetDescrArray(iter)[0]->elsize;
+ /*
+ * The inner loop size and data pointers may change during the
+ * loop, so just cache the addresses.
+ */
+ innersizeptr = NpyIter_GetInnerLoopSizePtr(iter);
+ dataptrarray = NpyIter_GetDataPtrArray(iter);
+
+ /*
+ * Note that because the iterator allocated the output,
+ * it matches the iteration order and is packed tightly,
+ * so we don't need to check it like the input.
+ */
+ if (innerstride == itemsize) {
+ do {
+ memcpy(dataptrarray[1], dataptrarray[0],
+ itemsize * (*innersizeptr));
+ } while (iternext(iter));
+ } else {
+ /* For efficiency, should specialize this based on item size... */
+ npy_intp i;
+ do {
+ npy_intp size = *innersizeptr;
+ char *src = dataptrarray[0], *dst = dataptrarray[1];
+ for(i = 0; i < size; i++, src += innerstride, dst += itemsize) {
+ memcpy(dst, src, itemsize);
+ }
+ } while (iternext(iter));
+ }
+
+ /* Get the result from the iterator object array */
+ ret = NpyIter_GetOperandArray(iter)[1];
+ Py_INCREF(ret);
+
+ if (NpyIter_Deallocate(iter) != NPY_SUCCEED) {
+ Py_DECREF(ret);
+ return NULL;
+ }
+
+ return ret;
+ }
+
+
+Iterator Data Types
+---------------------
+
+The iterator layout is an internal detail, and user code only sees
+an incomplete struct.
+
+.. c:type:: NpyIter
+
+ This is an opaque pointer type for the iterator. Access to its contents
+ can only be done through the iterator API.
+
+.. c:type:: NpyIter_Type
+
+ This is the type which exposes the iterator to Python. Currently, no
+ API is exposed which provides access to the values of a Python-created
+ iterator. If an iterator is created in Python, it must be used in Python
+ and vice versa. Such an API will likely be created in a future version.
+
+.. c:type:: NpyIter_IterNextFunc
+
+ This is a function pointer for the iteration loop, returned by
+ :c:func:`NpyIter_GetIterNext`.
+
+.. c:type:: NpyIter_GetMultiIndexFunc
+
+ This is a function pointer for getting the current iterator multi-index,
+ returned by :c:func:`NpyIter_GetGetMultiIndex`.
+
+Construction and Destruction
+----------------------------
+
+.. c:function:: NpyIter* NpyIter_New( \
+ PyArrayObject* op, npy_uint32 flags, NPY_ORDER order, \
+ NPY_CASTING casting, PyArray_Descr* dtype)
+
+ Creates an iterator for the given numpy array object ``op``.
+
+ Flags that may be passed in ``flags`` are any combination
+ of the global and per-operand flags documented in
+ :c:func:`NpyIter_MultiNew`, except for :c:data:`NPY_ITER_ALLOCATE`.
+
+ Any of the :c:type:`NPY_ORDER` enum values may be passed to ``order``. For
+ efficient iteration, :c:type:`NPY_KEEPORDER` is the best option, and
+ the other orders enforce the particular iteration pattern.
+
+ Any of the :c:type:`NPY_CASTING` enum values may be passed to ``casting``.
+ The values include :c:data:`NPY_NO_CASTING`, :c:data:`NPY_EQUIV_CASTING`,
+ :c:data:`NPY_SAFE_CASTING`, :c:data:`NPY_SAME_KIND_CASTING`, and
+ :c:data:`NPY_UNSAFE_CASTING`. To allow the casts to occur, copying or
+ buffering must also be enabled.
+
+ If ``dtype`` isn't ``NULL``, then it requires that data type.
+ If copying is allowed, it will make a temporary copy if the data
+ is castable. If :c:data:`NPY_ITER_UPDATEIFCOPY` is enabled, it will
+ also copy the data back with another cast upon iterator destruction.
+
+ Returns NULL if there is an error, otherwise returns the allocated
+ iterator.
+
+ To make an iterator similar to the old iterator, this should work.
+
+ .. code-block:: c
+
+ iter = NpyIter_New(op, NPY_ITER_READWRITE,
+ NPY_CORDER, NPY_NO_CASTING, NULL);
+
+ If you want to edit an array with aligned ``double`` code,
+ but the order doesn't matter, you would use this.
+
+ .. code-block:: c
+
+ dtype = PyArray_DescrFromType(NPY_DOUBLE);
+ iter = NpyIter_New(op, NPY_ITER_READWRITE|
+ NPY_ITER_BUFFERED|
+ NPY_ITER_NBO|
+ NPY_ITER_ALIGNED,
+ NPY_KEEPORDER,
+ NPY_SAME_KIND_CASTING,
+ dtype);
+ Py_DECREF(dtype);
+
+.. c:function:: NpyIter* NpyIter_MultiNew( \
+ npy_intp nop, PyArrayObject** op, npy_uint32 flags, NPY_ORDER order, \
+ NPY_CASTING casting, npy_uint32* op_flags, PyArray_Descr** op_dtypes)
+
+ Creates an iterator for broadcasting the ``nop`` array objects provided
+ in ``op``, using regular NumPy broadcasting rules.
+
+ Any of the :c:type:`NPY_ORDER` enum values may be passed to ``order``. For
+ efficient iteration, :c:data:`NPY_KEEPORDER` is the best option, and the
+ other orders enforce the particular iteration pattern. When using
+ :c:data:`NPY_KEEPORDER`, if you also want to ensure that the iteration is
+ not reversed along an axis, you should pass the flag
+ :c:data:`NPY_ITER_DONT_NEGATE_STRIDES`.
+
+ Any of the :c:type:`NPY_CASTING` enum values may be passed to ``casting``.
+ The values include :c:data:`NPY_NO_CASTING`, :c:data:`NPY_EQUIV_CASTING`,
+ :c:data:`NPY_SAFE_CASTING`, :c:data:`NPY_SAME_KIND_CASTING`, and
+ :c:data:`NPY_UNSAFE_CASTING`. To allow the casts to occur, copying or
+ buffering must also be enabled.
+
+ If ``op_dtypes`` isn't ``NULL``, it specifies a data type or ``NULL``
+ for each ``op[i]``.
+
+ Returns NULL if there is an error, otherwise returns the allocated
+ iterator.
+
+ Flags that may be passed in ``flags``, applying to the whole
+ iterator, are:
+
+ .. c:var:: NPY_ITER_C_INDEX
+
+ Causes the iterator to track a raveled flat index matching C
+ order. This option cannot be used with :c:data:`NPY_ITER_F_INDEX`.
+
+ .. c:var:: NPY_ITER_F_INDEX
+
+ Causes the iterator to track a raveled flat index matching Fortran
+ order. This option cannot be used with :c:data:`NPY_ITER_C_INDEX`.
+
+ .. c:var:: NPY_ITER_MULTI_INDEX
+
+ Causes the iterator to track a multi-index.
+ This prevents the iterator from coalescing axes to
+ produce bigger inner loops. If the loop is also not buffered
+ and no index is being tracked (`NpyIter_RemoveAxis` can be called),
+ then the iterator size can be ``-1`` to indicate that the iterator
+ is too large. This can happen due to complex broadcasting and
+ will result in errors being created when the setting the iterator
+ range, removing the multi index, or getting the next function.
+ However, it is possible to remove axes again and use the iterator
+ normally if the size is small enough after removal.
+
+ .. c:var:: NPY_ITER_EXTERNAL_LOOP
+
+ Causes the iterator to skip iteration of the innermost
+ loop, requiring the user of the iterator to handle it.
+
+ This flag is incompatible with :c:data:`NPY_ITER_C_INDEX`,
+ :c:data:`NPY_ITER_F_INDEX`, and :c:data:`NPY_ITER_MULTI_INDEX`.
+
+ .. c:var:: NPY_ITER_DONT_NEGATE_STRIDES
+
+ This only affects the iterator when :c:type:`NPY_KEEPORDER` is
+ specified for the order parameter. By default with
+ :c:type:`NPY_KEEPORDER`, the iterator reverses axes which have
+ negative strides, so that memory is traversed in a forward
+ direction. This disables this step. Use this flag if you
+ want to use the underlying memory-ordering of the axes,
+ but don't want an axis reversed. This is the behavior of
+ ``numpy.ravel(a, order='K')``, for instance.
+
+ .. c:var:: NPY_ITER_COMMON_DTYPE
+
+ Causes the iterator to convert all the operands to a common
+ data type, calculated based on the ufunc type promotion rules.
+ Copying or buffering must be enabled.
+
+ If the common data type is known ahead of time, don't use this
+ flag. Instead, set the requested dtype for all the operands.
+
+ .. c:var:: NPY_ITER_REFS_OK
+
+ Indicates that arrays with reference types (object
+ arrays or structured arrays containing an object type)
+ may be accepted and used in the iterator. If this flag
+ is enabled, the caller must be sure to check whether
+ :c:func:`NpyIter_IterationNeedsAPI(iter)` is true, in which case
+ it may not release the GIL during iteration.
+
+ .. c:var:: NPY_ITER_ZEROSIZE_OK
+
+ Indicates that arrays with a size of zero should be permitted.
+ Since the typical iteration loop does not naturally work with
+ zero-sized arrays, you must check that the IterSize is larger
+ than zero before entering the iteration loop.
+ Currently only the operands are checked, not a forced shape.
+
+ .. c:var:: NPY_ITER_REDUCE_OK
+
+ Permits writeable operands with a dimension with zero
+ stride and size greater than one. Note that such operands
+ must be read/write.
+
+ When buffering is enabled, this also switches to a special
+ buffering mode which reduces the loop length as necessary to
+ not trample on values being reduced.
+
+ Note that if you want to do a reduction on an automatically
+ allocated output, you must use :c:func:`NpyIter_GetOperandArray`
+ to get its reference, then set every value to the reduction
+ unit before doing the iteration loop. In the case of a
+ buffered reduction, this means you must also specify the
+ flag :c:data:`NPY_ITER_DELAY_BUFALLOC`, then reset the iterator
+ after initializing the allocated operand to prepare the
+ buffers.
+
+ .. c:var:: NPY_ITER_RANGED
+
+ Enables support for iteration of sub-ranges of the full
+ ``iterindex`` range ``[0, NpyIter_IterSize(iter))``. Use
+ the function :c:func:`NpyIter_ResetToIterIndexRange` to specify
+ a range for iteration.
+
+ This flag can only be used with :c:data:`NPY_ITER_EXTERNAL_LOOP`
+ when :c:data:`NPY_ITER_BUFFERED` is enabled. This is because
+ without buffering, the inner loop is always the size of the
+ innermost iteration dimension, and allowing it to get cut up
+ would require special handling, effectively making it more
+ like the buffered version.
+
+ .. c:var:: NPY_ITER_BUFFERED
+
+ Causes the iterator to store buffering data, and use buffering
+ to satisfy data type, alignment, and byte-order requirements.
+ To buffer an operand, do not specify the :c:data:`NPY_ITER_COPY`
+ or :c:data:`NPY_ITER_UPDATEIFCOPY` flags, because they will
+ override buffering. Buffering is especially useful for Python
+ code using the iterator, allowing for larger chunks
+ of data at once to amortize the Python interpreter overhead.
+
+ If used with :c:data:`NPY_ITER_EXTERNAL_LOOP`, the inner loop
+ for the caller may get larger chunks than would be possible
+ without buffering, because of how the strides are laid out.
+
+ Note that if an operand is given the flag :c:data:`NPY_ITER_COPY`
+ or :c:data:`NPY_ITER_UPDATEIFCOPY`, a copy will be made in preference
+ to buffering. Buffering will still occur when the array was
+ broadcast so elements need to be duplicated to get a constant
+ stride.
+
+ In normal buffering, the size of each inner loop is equal
+ to the buffer size, or possibly larger if
+ :c:data:`NPY_ITER_GROWINNER` is specified. If
+ :c:data:`NPY_ITER_REDUCE_OK` is enabled and a reduction occurs,
+ the inner loops may become smaller depending
+ on the structure of the reduction.
+
+ .. c:var:: NPY_ITER_GROWINNER
+
+ When buffering is enabled, this allows the size of the inner
+ loop to grow when buffering isn't necessary. This option
+ is best used if you're doing a straight pass through all the
+ data, rather than anything with small cache-friendly arrays
+ of temporary values for each inner loop.
+
+ .. c:var:: NPY_ITER_DELAY_BUFALLOC
+
+ When buffering is enabled, this delays allocation of the
+ buffers until :c:func:`NpyIter_Reset` or another reset function is
+ called. This flag exists to avoid wasteful copying of
+ buffer data when making multiple copies of a buffered
+ iterator for multi-threaded iteration.
+
+ Another use of this flag is for setting up reduction operations.
+ After the iterator is created, and a reduction output
+ is allocated automatically by the iterator (be sure to use
+ READWRITE access), its value may be initialized to the reduction
+ unit. Use :c:func:`NpyIter_GetOperandArray` to get the object.
+ Then, call :c:func:`NpyIter_Reset` to allocate and fill the buffers
+ with their initial values.
+
+ .. c:var:: NPY_ITER_COPY_IF_OVERLAP
+
+ If any write operand has overlap with any read operand, eliminate all
+ overlap by making temporary copies (enabling UPDATEIFCOPY for write
+ operands, if necessary). A pair of operands has overlap if there is
+ a memory address that contains data common to both arrays.
+
+ Because exact overlap detection has exponential runtime
+ in the number of dimensions, the decision is made based
+ on heuristics, which has false positives (needless copies in unusual
+ cases) but has no false negatives.
+
+ If any read/write overlap exists, this flag ensures the result of the
+ operation is the same as if all operands were copied.
+ In cases where copies would need to be made, **the result of the
+ computation may be undefined without this flag!**
+
+ Flags that may be passed in ``op_flags[i]``, where ``0 <= i < nop``:
+
+ .. c:var:: NPY_ITER_READWRITE
+ .. c:var:: NPY_ITER_READONLY
+ .. c:var:: NPY_ITER_WRITEONLY
+
+ Indicate how the user of the iterator will read or write
+ to ``op[i]``. Exactly one of these flags must be specified
+ per operand. Using ``NPY_ITER_READWRITE`` or ``NPY_ITER_WRITEONLY``
+ for a user-provided operand may trigger `WRITEBACKIFCOPY``
+ semantics. The data will be written back to the original array
+ when ``NpyIter_Deallocate`` is called.
+
+ .. c:var:: NPY_ITER_COPY
+
+ Allow a copy of ``op[i]`` to be made if it does not
+ meet the data type or alignment requirements as specified
+ by the constructor flags and parameters.
+
+ .. c:var:: NPY_ITER_UPDATEIFCOPY
+
+ Triggers :c:data:`NPY_ITER_COPY`, and when an array operand
+ is flagged for writing and is copied, causes the data
+ in a copy to be copied back to ``op[i]`` when
+ ``NpyIter_Deallocate`` is called.
+
+ If the operand is flagged as write-only and a copy is needed,
+ an uninitialized temporary array will be created and then copied
+ to back to ``op[i]`` on calling ``NpyIter_Deallocate``, instead of
+ doing the unnecessary copy operation.
+
+ .. c:var:: NPY_ITER_NBO
+ .. c:var:: NPY_ITER_ALIGNED
+ .. c:var:: NPY_ITER_CONTIG
+
+ Causes the iterator to provide data for ``op[i]``
+ that is in native byte order, aligned according to
+ the dtype requirements, contiguous, or any combination.
+
+ By default, the iterator produces pointers into the
+ arrays provided, which may be aligned or unaligned, and
+ with any byte order. If copying or buffering is not
+ enabled and the operand data doesn't satisfy the constraints,
+ an error will be raised.
+
+ The contiguous constraint applies only to the inner loop,
+ successive inner loops may have arbitrary pointer changes.
+
+ If the requested data type is in non-native byte order,
+ the NBO flag overrides it and the requested data type is
+ converted to be in native byte order.
+
+ .. c:var:: NPY_ITER_ALLOCATE
+
+ This is for output arrays, and requires that the flag
+ :c:data:`NPY_ITER_WRITEONLY` or :c:data:`NPY_ITER_READWRITE`
+ be set. If ``op[i]`` is NULL, creates a new array with
+ the final broadcast dimensions, and a layout matching
+ the iteration order of the iterator.
+
+ When ``op[i]`` is NULL, the requested data type
+ ``op_dtypes[i]`` may be NULL as well, in which case it is
+ automatically generated from the dtypes of the arrays which
+ are flagged as readable. The rules for generating the dtype
+ are the same is for UFuncs. Of special note is handling
+ of byte order in the selected dtype. If there is exactly
+ one input, the input's dtype is used as is. Otherwise,
+ if more than one input dtypes are combined together, the
+ output will be in native byte order.
+
+ After being allocated with this flag, the caller may retrieve
+ the new array by calling :c:func:`NpyIter_GetOperandArray` and
+ getting the i-th object in the returned C array. The caller
+ must call Py_INCREF on it to claim a reference to the array.
+
+ .. c:var:: NPY_ITER_NO_SUBTYPE
+
+ For use with :c:data:`NPY_ITER_ALLOCATE`, this flag disables
+ allocating an array subtype for the output, forcing
+ it to be a straight ndarray.
+
+ TODO: Maybe it would be better to introduce a function
+ ``NpyIter_GetWrappedOutput`` and remove this flag?
+
+ .. c:var:: NPY_ITER_NO_BROADCAST
+
+ Ensures that the input or output matches the iteration
+ dimensions exactly.
+
+ .. c:var:: NPY_ITER_ARRAYMASK
+
+ .. versionadded:: 1.7
+
+ Indicates that this operand is the mask to use for
+ selecting elements when writing to operands which have
+ the :c:data:`NPY_ITER_WRITEMASKED` flag applied to them.
+ Only one operand may have :c:data:`NPY_ITER_ARRAYMASK` flag
+ applied to it.
+
+ The data type of an operand with this flag should be either
+ :c:data:`NPY_BOOL`, :c:data:`NPY_MASK`, or a struct dtype
+ whose fields are all valid mask dtypes. In the latter case,
+ it must match up with a struct operand being WRITEMASKED,
+ as it is specifying a mask for each field of that array.
+
+ This flag only affects writing from the buffer back to
+ the array. This means that if the operand is also
+ :c:data:`NPY_ITER_READWRITE` or :c:data:`NPY_ITER_WRITEONLY`,
+ code doing iteration can write to this operand to
+ control which elements will be untouched and which ones will be
+ modified. This is useful when the mask should be a combination
+ of input masks.
+
+ .. c:var:: NPY_ITER_WRITEMASKED
+
+ .. versionadded:: 1.7
+
+ This array is the mask for all `writemasked <numpy.nditer>`
+ operands. Code uses the ``writemasked`` flag which indicates
+ that only elements where the chosen ARRAYMASK operand is True
+ will be written to. In general, the iterator does not enforce
+ this, it is up to the code doing the iteration to follow that
+ promise.
+
+ When ``writemasked`` flag is used, and this operand is buffered,
+ this changes how data is copied from the buffer into the array.
+ A masked copying routine is used, which only copies the
+ elements in the buffer for which ``writemasked``
+ returns true from the corresponding element in the ARRAYMASK
+ operand.
+
+ .. c:var:: NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE
+
+ In memory overlap checks, assume that operands with
+ ``NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE`` enabled are accessed only
+ in the iterator order.
+
+ This enables the iterator to reason about data dependency,
+ possibly avoiding unnecessary copies.
+
+ This flag has effect only if ``NPY_ITER_COPY_IF_OVERLAP`` is enabled
+ on the iterator.
+
+.. c:function:: NpyIter* NpyIter_AdvancedNew( \
+ npy_intp nop, PyArrayObject** op, npy_uint32 flags, NPY_ORDER order, \
+ NPY_CASTING casting, npy_uint32* op_flags, PyArray_Descr** op_dtypes, \
+ int oa_ndim, int** op_axes, npy_intp const* itershape, npy_intp buffersize)
+
+ Extends :c:func:`NpyIter_MultiNew` with several advanced options providing
+ more control over broadcasting and buffering.
+
+ If -1/NULL values are passed to ``oa_ndim``, ``op_axes``, ``itershape``,
+ and ``buffersize``, it is equivalent to :c:func:`NpyIter_MultiNew`.
+
+ The parameter ``oa_ndim``, when not zero or -1, specifies the number of
+ dimensions that will be iterated with customized broadcasting.
+ If it is provided, ``op_axes`` must and ``itershape`` can also be provided.
+ The ``op_axes`` parameter let you control in detail how the
+ axes of the operand arrays get matched together and iterated.
+ In ``op_axes``, you must provide an array of ``nop`` pointers
+ to ``oa_ndim``-sized arrays of type ``npy_intp``. If an entry
+ in ``op_axes`` is NULL, normal broadcasting rules will apply.
+ In ``op_axes[j][i]`` is stored either a valid axis of ``op[j]``, or
+ -1 which means ``newaxis``. Within each ``op_axes[j]`` array, axes
+ may not be repeated. The following example is how normal broadcasting
+ applies to a 3-D array, a 2-D array, a 1-D array and a scalar.
+
+ **Note**: Before NumPy 1.8 ``oa_ndim == 0` was used for signalling that
+ that ``op_axes`` and ``itershape`` are unused. This is deprecated and
+ should be replaced with -1. Better backward compatibility may be
+ achieved by using :c:func:`NpyIter_MultiNew` for this case.
+
+ .. code-block:: c
+
+ int oa_ndim = 3; /* # iteration axes */
+ int op0_axes[] = {0, 1, 2}; /* 3-D operand */
+ int op1_axes[] = {-1, 0, 1}; /* 2-D operand */
+ int op2_axes[] = {-1, -1, 0}; /* 1-D operand */
+ int op3_axes[] = {-1, -1, -1} /* 0-D (scalar) operand */
+ int* op_axes[] = {op0_axes, op1_axes, op2_axes, op3_axes};
+
+ The ``itershape`` parameter allows you to force the iterator
+ to have a specific iteration shape. It is an array of length
+ ``oa_ndim``. When an entry is negative, its value is determined
+ from the operands. This parameter allows automatically allocated
+ outputs to get additional dimensions which don't match up with
+ any dimension of an input.
+
+ If ``buffersize`` is zero, a default buffer size is used,
+ otherwise it specifies how big of a buffer to use. Buffers
+ which are powers of 2 such as 4096 or 8192 are recommended.
+
+ Returns NULL if there is an error, otherwise returns the allocated
+ iterator.
+
+.. c:function:: NpyIter* NpyIter_Copy(NpyIter* iter)
+
+ Makes a copy of the given iterator. This function is provided
+ primarily to enable multi-threaded iteration of the data.
+
+ *TODO*: Move this to a section about multithreaded iteration.
+
+ The recommended approach to multithreaded iteration is to
+ first create an iterator with the flags
+ :c:data:`NPY_ITER_EXTERNAL_LOOP`, :c:data:`NPY_ITER_RANGED`,
+ :c:data:`NPY_ITER_BUFFERED`, :c:data:`NPY_ITER_DELAY_BUFALLOC`, and
+ possibly :c:data:`NPY_ITER_GROWINNER`. Create a copy of this iterator
+ for each thread (minus one for the first iterator). Then, take
+ the iteration index range ``[0, NpyIter_GetIterSize(iter))`` and
+ split it up into tasks, for example using a TBB parallel_for loop.
+ When a thread gets a task to execute, it then uses its copy of
+ the iterator by calling :c:func:`NpyIter_ResetToIterIndexRange` and
+ iterating over the full range.
+
+ When using the iterator in multi-threaded code or in code not
+ holding the Python GIL, care must be taken to only call functions
+ which are safe in that context. :c:func:`NpyIter_Copy` cannot be safely
+ called without the Python GIL, because it increments Python
+ references. The ``Reset*`` and some other functions may be safely
+ called by passing in the ``errmsg`` parameter as non-NULL, so that
+ the functions will pass back errors through it instead of setting
+ a Python exception.
+
+ :c:func:`NpyIter_Deallocate` must be called for each copy.
+
+.. c:function:: int NpyIter_RemoveAxis(NpyIter* iter, int axis)``
+
+ Removes an axis from iteration. This requires that
+ :c:data:`NPY_ITER_MULTI_INDEX` was set for iterator creation, and does
+ not work if buffering is enabled or an index is being tracked. This
+ function also resets the iterator to its initial state.
+
+ This is useful for setting up an accumulation loop, for example.
+ The iterator can first be created with all the dimensions, including
+ the accumulation axis, so that the output gets created correctly.
+ Then, the accumulation axis can be removed, and the calculation
+ done in a nested fashion.
+
+ **WARNING**: This function may change the internal memory layout of
+ the iterator. Any cached functions or pointers from the iterator
+ must be retrieved again! The iterator range will be reset as well.
+
+ Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
+
+
+.. c:function:: int NpyIter_RemoveMultiIndex(NpyIter* iter)
+
+ If the iterator is tracking a multi-index, this strips support for them,
+ and does further iterator optimizations that are possible if multi-indices
+ are not needed. This function also resets the iterator to its initial
+ state.
+
+ **WARNING**: This function may change the internal memory layout of
+ the iterator. Any cached functions or pointers from the iterator
+ must be retrieved again!
+
+ After calling this function, :c:func:`NpyIter_HasMultiIndex(iter)` will
+ return false.
+
+ Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
+
+.. c:function:: int NpyIter_EnableExternalLoop(NpyIter* iter)
+
+ If :c:func:`NpyIter_RemoveMultiIndex` was called, you may want to enable the
+ flag :c:data:`NPY_ITER_EXTERNAL_LOOP`. This flag is not permitted
+ together with :c:data:`NPY_ITER_MULTI_INDEX`, so this function is provided
+ to enable the feature after :c:func:`NpyIter_RemoveMultiIndex` is called.
+ This function also resets the iterator to its initial state.
+
+ **WARNING**: This function changes the internal logic of the iterator.
+ Any cached functions or pointers from the iterator must be retrieved
+ again!
+
+ Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
+
+.. c:function:: int NpyIter_Deallocate(NpyIter* iter)
+
+ Deallocates the iterator object and resolves any needed writebacks.
+
+ Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
+
+.. c:function:: int NpyIter_Reset(NpyIter* iter, char** errmsg)
+
+ Resets the iterator back to its initial state, at the beginning
+ of the iteration range.
+
+ Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. If errmsg is non-NULL,
+ no Python exception is set when ``NPY_FAIL`` is returned.
+ Instead, \*errmsg is set to an error message. When errmsg is
+ non-NULL, the function may be safely called without holding
+ the Python GIL.
+
+.. c:function:: int NpyIter_ResetToIterIndexRange( \
+ NpyIter* iter, npy_intp istart, npy_intp iend, char** errmsg)
+
+ Resets the iterator and restricts it to the ``iterindex`` range
+ ``[istart, iend)``. See :c:func:`NpyIter_Copy` for an explanation of
+ how to use this for multi-threaded iteration. This requires that
+ the flag :c:data:`NPY_ITER_RANGED` was passed to the iterator constructor.
+
+ If you want to reset both the ``iterindex`` range and the base
+ pointers at the same time, you can do the following to avoid
+ extra buffer copying (be sure to add the return code error checks
+ when you copy this code).
+
+ .. code-block:: c
+
+ /* Set to a trivial empty range */
+ NpyIter_ResetToIterIndexRange(iter, 0, 0);
+ /* Set the base pointers */
+ NpyIter_ResetBasePointers(iter, baseptrs);
+ /* Set to the desired range */
+ NpyIter_ResetToIterIndexRange(iter, istart, iend);
+
+ Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. If errmsg is non-NULL,
+ no Python exception is set when ``NPY_FAIL`` is returned.
+ Instead, \*errmsg is set to an error message. When errmsg is
+ non-NULL, the function may be safely called without holding
+ the Python GIL.
+
+.. c:function:: int NpyIter_ResetBasePointers( \
+ NpyIter *iter, char** baseptrs, char** errmsg)
+
+ Resets the iterator back to its initial state, but using the values
+ in ``baseptrs`` for the data instead of the pointers from the arrays
+ being iterated. This functions is intended to be used, together with
+ the ``op_axes`` parameter, by nested iteration code with two or more
+ iterators.
+
+ Returns ``NPY_SUCCEED`` or ``NPY_FAIL``. If errmsg is non-NULL,
+ no Python exception is set when ``NPY_FAIL`` is returned.
+ Instead, \*errmsg is set to an error message. When errmsg is
+ non-NULL, the function may be safely called without holding
+ the Python GIL.
+
+ *TODO*: Move the following into a special section on nested iterators.
+
+ Creating iterators for nested iteration requires some care. All
+ the iterator operands must match exactly, or the calls to
+ :c:func:`NpyIter_ResetBasePointers` will be invalid. This means that
+ automatic copies and output allocation should not be used haphazardly.
+ It is possible to still use the automatic data conversion and casting
+ features of the iterator by creating one of the iterators with
+ all the conversion parameters enabled, then grabbing the allocated
+ operands with the :c:func:`NpyIter_GetOperandArray` function and passing
+ them into the constructors for the rest of the iterators.
+
+ **WARNING**: When creating iterators for nested iteration,
+ the code must not use a dimension more than once in the different
+ iterators. If this is done, nested iteration will produce
+ out-of-bounds pointers during iteration.
+
+ **WARNING**: When creating iterators for nested iteration, buffering
+ can only be applied to the innermost iterator. If a buffered iterator
+ is used as the source for ``baseptrs``, it will point into a small buffer
+ instead of the array and the inner iteration will be invalid.
+
+ The pattern for using nested iterators is as follows.
+
+ .. code-block:: c
+
+ NpyIter *iter1, *iter1;
+ NpyIter_IterNextFunc *iternext1, *iternext2;
+ char **dataptrs1;
+
+ /*
+ * With the exact same operands, no copies allowed, and
+ * no axis in op_axes used both in iter1 and iter2.
+ * Buffering may be enabled for iter2, but not for iter1.
+ */
+ iter1 = ...; iter2 = ...;
+
+ iternext1 = NpyIter_GetIterNext(iter1);
+ iternext2 = NpyIter_GetIterNext(iter2);
+ dataptrs1 = NpyIter_GetDataPtrArray(iter1);
+
+ do {
+ NpyIter_ResetBasePointers(iter2, dataptrs1);
+ do {
+ /* Use the iter2 values */
+ } while (iternext2(iter2));
+ } while (iternext1(iter1));
+
+.. c:function:: int NpyIter_GotoMultiIndex(NpyIter* iter, npy_intp const* multi_index)
+
+ Adjusts the iterator to point to the ``ndim`` indices
+ pointed to by ``multi_index``. Returns an error if a multi-index
+ is not being tracked, the indices are out of bounds,
+ or inner loop iteration is disabled.
+
+ Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
+
+.. c:function:: int NpyIter_GotoIndex(NpyIter* iter, npy_intp index)
+
+ Adjusts the iterator to point to the ``index`` specified.
+ If the iterator was constructed with the flag
+ :c:data:`NPY_ITER_C_INDEX`, ``index`` is the C-order index,
+ and if the iterator was constructed with the flag
+ :c:data:`NPY_ITER_F_INDEX`, ``index`` is the Fortran-order
+ index. Returns an error if there is no index being tracked,
+ the index is out of bounds, or inner loop iteration is disabled.
+
+ Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
+
+.. c:function:: npy_intp NpyIter_GetIterSize(NpyIter* iter)
+
+ Returns the number of elements being iterated. This is the product
+ of all the dimensions in the shape. When a multi index is being tracked
+ (and `NpyIter_RemoveAxis` may be called) the size may be ``-1`` to
+ indicate an iterator is too large. Such an iterator is invalid, but
+ may become valid after `NpyIter_RemoveAxis` is called. It is not
+ necessary to check for this case.
+
+.. c:function:: npy_intp NpyIter_GetIterIndex(NpyIter* iter)
+
+ Gets the ``iterindex`` of the iterator, which is an index matching
+ the iteration order of the iterator.
+
+.. c:function:: void NpyIter_GetIterIndexRange( \
+ NpyIter* iter, npy_intp* istart, npy_intp* iend)
+
+ Gets the ``iterindex`` sub-range that is being iterated. If
+ :c:data:`NPY_ITER_RANGED` was not specified, this always returns the
+ range ``[0, NpyIter_IterSize(iter))``.
+
+.. c:function:: int NpyIter_GotoIterIndex(NpyIter* iter, npy_intp iterindex)
+
+ Adjusts the iterator to point to the ``iterindex`` specified.
+ The IterIndex is an index matching the iteration order of the iterator.
+ Returns an error if the ``iterindex`` is out of bounds,
+ buffering is enabled, or inner loop iteration is disabled.
+
+ Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
+
+.. c:function:: npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* iter)
+
+ Returns 1 if the flag :c:data:`NPY_ITER_DELAY_BUFALLOC` was passed
+ to the iterator constructor, and no call to one of the Reset
+ functions has been done yet, 0 otherwise.
+
+.. c:function:: npy_bool NpyIter_HasExternalLoop(NpyIter* iter)
+
+ Returns 1 if the caller needs to handle the inner-most 1-dimensional
+ loop, or 0 if the iterator handles all looping. This is controlled
+ by the constructor flag :c:data:`NPY_ITER_EXTERNAL_LOOP` or
+ :c:func:`NpyIter_EnableExternalLoop`.
+
+.. c:function:: npy_bool NpyIter_HasMultiIndex(NpyIter* iter)
+
+ Returns 1 if the iterator was created with the
+ :c:data:`NPY_ITER_MULTI_INDEX` flag, 0 otherwise.
+
+.. c:function:: npy_bool NpyIter_HasIndex(NpyIter* iter)
+
+ Returns 1 if the iterator was created with the
+ :c:data:`NPY_ITER_C_INDEX` or :c:data:`NPY_ITER_F_INDEX`
+ flag, 0 otherwise.
+
+.. c:function:: npy_bool NpyIter_RequiresBuffering(NpyIter* iter)
+
+ Returns 1 if the iterator requires buffering, which occurs
+ when an operand needs conversion or alignment and so cannot
+ be used directly.
+
+.. c:function:: npy_bool NpyIter_IsBuffered(NpyIter* iter)
+
+ Returns 1 if the iterator was created with the
+ :c:data:`NPY_ITER_BUFFERED` flag, 0 otherwise.
+
+.. c:function:: npy_bool NpyIter_IsGrowInner(NpyIter* iter)
+
+ Returns 1 if the iterator was created with the
+ :c:data:`NPY_ITER_GROWINNER` flag, 0 otherwise.
+
+.. c:function:: npy_intp NpyIter_GetBufferSize(NpyIter* iter)
+
+ If the iterator is buffered, returns the size of the buffer
+ being used, otherwise returns 0.
+
+.. c:function:: int NpyIter_GetNDim(NpyIter* iter)
+
+ Returns the number of dimensions being iterated. If a multi-index
+ was not requested in the iterator constructor, this value
+ may be smaller than the number of dimensions in the original
+ objects.
+
+.. c:function:: int NpyIter_GetNOp(NpyIter* iter)
+
+ Returns the number of operands in the iterator.
+
+.. c:function:: npy_intp* NpyIter_GetAxisStrideArray(NpyIter* iter, int axis)
+
+ Gets the array of strides for the specified axis. Requires that
+ the iterator be tracking a multi-index, and that buffering not
+ be enabled.
+
+ This may be used when you want to match up operand axes in
+ some fashion, then remove them with :c:func:`NpyIter_RemoveAxis` to
+ handle their processing manually. By calling this function
+ before removing the axes, you can get the strides for the
+ manual processing.
+
+ Returns ``NULL`` on error.
+
+.. c:function:: int NpyIter_GetShape(NpyIter* iter, npy_intp* outshape)
+
+ Returns the broadcast shape of the iterator in ``outshape``.
+ This can only be called on an iterator which is tracking a multi-index.
+
+ Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
+
+.. c:function:: PyArray_Descr** NpyIter_GetDescrArray(NpyIter* iter)
+
+ This gives back a pointer to the ``nop`` data type Descrs for
+ the objects being iterated. The result points into ``iter``,
+ so the caller does not gain any references to the Descrs.
+
+ This pointer may be cached before the iteration loop, calling
+ ``iternext`` will not change it.
+
+.. c:function:: PyObject** NpyIter_GetOperandArray(NpyIter* iter)
+
+ This gives back a pointer to the ``nop`` operand PyObjects
+ that are being iterated. The result points into ``iter``,
+ so the caller does not gain any references to the PyObjects.
+
+.. c:function:: PyObject* NpyIter_GetIterView(NpyIter* iter, npy_intp i)
+
+ This gives back a reference to a new ndarray view, which is a view
+ into the i-th object in the array :c:func:`NpyIter_GetOperandArray()`,
+ whose dimensions and strides match the internal optimized
+ iteration pattern. A C-order iteration of this view is equivalent
+ to the iterator's iteration order.
+
+ For example, if an iterator was created with a single array as its
+ input, and it was possible to rearrange all its axes and then
+ collapse it into a single strided iteration, this would return
+ a view that is a one-dimensional array.
+
+.. c:function:: void NpyIter_GetReadFlags(NpyIter* iter, char* outreadflags)
+
+ Fills ``nop`` flags. Sets ``outreadflags[i]`` to 1 if
+ ``op[i]`` can be read from, and to 0 if not.
+
+.. c:function:: void NpyIter_GetWriteFlags(NpyIter* iter, char* outwriteflags)
+
+ Fills ``nop`` flags. Sets ``outwriteflags[i]`` to 1 if
+ ``op[i]`` can be written to, and to 0 if not.
+
+.. c:function:: int NpyIter_CreateCompatibleStrides( \
+ NpyIter* iter, npy_intp itemsize, npy_intp* outstrides)
+
+ Builds a set of strides which are the same as the strides of an
+ output array created using the :c:data:`NPY_ITER_ALLOCATE` flag, where NULL
+ was passed for op_axes. This is for data packed contiguously,
+ but not necessarily in C or Fortran order. This should be used
+ together with :c:func:`NpyIter_GetShape` and :c:func:`NpyIter_GetNDim`
+ with the flag :c:data:`NPY_ITER_MULTI_INDEX` passed into the constructor.
+
+ A use case for this function is to match the shape and layout of
+ the iterator and tack on one or more dimensions. For example,
+ in order to generate a vector per input value for a numerical gradient,
+ you pass in ndim*itemsize for itemsize, then add another dimension to
+ the end with size ndim and stride itemsize. To do the Hessian matrix,
+ you do the same thing but add two dimensions, or take advantage of
+ the symmetry and pack it into 1 dimension with a particular encoding.
+
+ This function may only be called if the iterator is tracking a multi-index
+ and if :c:data:`NPY_ITER_DONT_NEGATE_STRIDES` was used to prevent an axis
+ from being iterated in reverse order.
+
+ If an array is created with this method, simply adding 'itemsize'
+ for each iteration will traverse the new array matching the
+ iterator.
+
+ Returns ``NPY_SUCCEED`` or ``NPY_FAIL``.
+
+.. c:function:: npy_bool NpyIter_IsFirstVisit(NpyIter* iter, int iop)
+
+ .. versionadded:: 1.7
+
+ Checks to see whether this is the first time the elements of the
+ specified reduction operand which the iterator points at are being
+ seen for the first time. The function returns a reasonable answer
+ for reduction operands and when buffering is disabled. The answer
+ may be incorrect for buffered non-reduction operands.
+
+ This function is intended to be used in EXTERNAL_LOOP mode only,
+ and will produce some wrong answers when that mode is not enabled.
+
+ If this function returns true, the caller should also check the inner
+ loop stride of the operand, because if that stride is 0, then only
+ the first element of the innermost external loop is being visited
+ for the first time.
+
+ *WARNING*: For performance reasons, 'iop' is not bounds-checked,
+ it is not confirmed that 'iop' is actually a reduction operand,
+ and it is not confirmed that EXTERNAL_LOOP mode is enabled. These
+ checks are the responsibility of the caller, and should be done
+ outside of any inner loops.
+
+Functions For Iteration
+-----------------------
+
+.. c:function:: NpyIter_IterNextFunc* NpyIter_GetIterNext( \
+ NpyIter* iter, char** errmsg)
+
+ Returns a function pointer for iteration. A specialized version
+ of the function pointer may be calculated by this function
+ instead of being stored in the iterator structure. Thus, to
+ get good performance, it is required that the function pointer
+ be saved in a variable rather than retrieved for each loop iteration.
+
+ Returns NULL if there is an error. If errmsg is non-NULL,
+ no Python exception is set when ``NPY_FAIL`` is returned.
+ Instead, \*errmsg is set to an error message. When errmsg is
+ non-NULL, the function may be safely called without holding
+ the Python GIL.
+
+ The typical looping construct is as follows.
+
+ .. code-block:: c
+
+ NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL);
+ char** dataptr = NpyIter_GetDataPtrArray(iter);
+
+ do {
+ /* use the addresses dataptr[0], ... dataptr[nop-1] */
+ } while(iternext(iter));
+
+ When :c:data:`NPY_ITER_EXTERNAL_LOOP` is specified, the typical
+ inner loop construct is as follows.
+
+ .. code-block:: c
+
+ NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL);
+ char** dataptr = NpyIter_GetDataPtrArray(iter);
+ npy_intp* stride = NpyIter_GetInnerStrideArray(iter);
+ npy_intp* size_ptr = NpyIter_GetInnerLoopSizePtr(iter), size;
+ npy_intp iop, nop = NpyIter_GetNOp(iter);
+
+ do {
+ size = *size_ptr;
+ while (size--) {
+ /* use the addresses dataptr[0], ... dataptr[nop-1] */
+ for (iop = 0; iop < nop; ++iop) {
+ dataptr[iop] += stride[iop];
+ }
+ }
+ } while (iternext());
+
+ Observe that we are using the dataptr array inside the iterator, not
+ copying the values to a local temporary. This is possible because
+ when ``iternext()`` is called, these pointers will be overwritten
+ with fresh values, not incrementally updated.
+
+ If a compile-time fixed buffer is being used (both flags
+ :c:data:`NPY_ITER_BUFFERED` and :c:data:`NPY_ITER_EXTERNAL_LOOP`), the
+ inner size may be used as a signal as well. The size is guaranteed
+ to become zero when ``iternext()`` returns false, enabling the
+ following loop construct. Note that if you use this construct,
+ you should not pass :c:data:`NPY_ITER_GROWINNER` as a flag, because it
+ will cause larger sizes under some circumstances.
+
+ .. code-block:: c
+
+ /* The constructor should have buffersize passed as this value */
+ #define FIXED_BUFFER_SIZE 1024
+
+ NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(iter, NULL);
+ char **dataptr = NpyIter_GetDataPtrArray(iter);
+ npy_intp *stride = NpyIter_GetInnerStrideArray(iter);
+ npy_intp *size_ptr = NpyIter_GetInnerLoopSizePtr(iter), size;
+ npy_intp i, iop, nop = NpyIter_GetNOp(iter);
+
+ /* One loop with a fixed inner size */
+ size = *size_ptr;
+ while (size == FIXED_BUFFER_SIZE) {
+ /*
+ * This loop could be manually unrolled by a factor
+ * which divides into FIXED_BUFFER_SIZE
+ */
+ for (i = 0; i < FIXED_BUFFER_SIZE; ++i) {
+ /* use the addresses dataptr[0], ... dataptr[nop-1] */
+ for (iop = 0; iop < nop; ++iop) {
+ dataptr[iop] += stride[iop];
+ }
+ }
+ iternext();
+ size = *size_ptr;
+ }
+
+ /* Finish-up loop with variable inner size */
+ if (size > 0) do {
+ size = *size_ptr;
+ while (size--) {
+ /* use the addresses dataptr[0], ... dataptr[nop-1] */
+ for (iop = 0; iop < nop; ++iop) {
+ dataptr[iop] += stride[iop];
+ }
+ }
+ } while (iternext());
+
+.. c:function:: NpyIter_GetMultiIndexFunc *NpyIter_GetGetMultiIndex( \
+ NpyIter* iter, char** errmsg)
+
+ Returns a function pointer for getting the current multi-index
+ of the iterator. Returns NULL if the iterator is not tracking
+ a multi-index. It is recommended that this function
+ pointer be cached in a local variable before the iteration
+ loop.
+
+ Returns NULL if there is an error. If errmsg is non-NULL,
+ no Python exception is set when ``NPY_FAIL`` is returned.
+ Instead, \*errmsg is set to an error message. When errmsg is
+ non-NULL, the function may be safely called without holding
+ the Python GIL.
+
+.. c:function:: char** NpyIter_GetDataPtrArray(NpyIter* iter)
+
+ This gives back a pointer to the ``nop`` data pointers. If
+ :c:data:`NPY_ITER_EXTERNAL_LOOP` was not specified, each data
+ pointer points to the current data item of the iterator. If
+ no inner iteration was specified, it points to the first data
+ item of the inner loop.
+
+ This pointer may be cached before the iteration loop, calling
+ ``iternext`` will not change it. This function may be safely
+ called without holding the Python GIL.
+
+.. c:function:: char** NpyIter_GetInitialDataPtrArray(NpyIter* iter)
+
+ Gets the array of data pointers directly into the arrays (never
+ into the buffers), corresponding to iteration index 0.
+
+ These pointers are different from the pointers accepted by
+ ``NpyIter_ResetBasePointers``, because the direction along
+ some axes may have been reversed.
+
+ This function may be safely called without holding the Python GIL.
+
+.. c:function:: npy_intp* NpyIter_GetIndexPtr(NpyIter* iter)
+
+ This gives back a pointer to the index being tracked, or NULL
+ if no index is being tracked. It is only useable if one of
+ the flags :c:data:`NPY_ITER_C_INDEX` or :c:data:`NPY_ITER_F_INDEX`
+ were specified during construction.
+
+When the flag :c:data:`NPY_ITER_EXTERNAL_LOOP` is used, the code
+needs to know the parameters for doing the inner loop. These
+functions provide that information.
+
+.. c:function:: npy_intp* NpyIter_GetInnerStrideArray(NpyIter* iter)
+
+ Returns a pointer to an array of the ``nop`` strides,
+ one for each iterated object, to be used by the inner loop.
+
+ This pointer may be cached before the iteration loop, calling
+ ``iternext`` will not change it. This function may be safely
+ called without holding the Python GIL.
+
+ **WARNING**: While the pointer may be cached, its values may
+ change if the iterator is buffered.
+
+.. c:function:: npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* iter)
+
+ Returns a pointer to the number of iterations the
+ inner loop should execute.
+
+ This address may be cached before the iteration loop, calling
+ ``iternext`` will not change it. The value itself may change during
+ iteration, in particular if buffering is enabled. This function
+ may be safely called without holding the Python GIL.
+
+.. c:function:: void NpyIter_GetInnerFixedStrideArray( \
+ NpyIter* iter, npy_intp* out_strides)
+
+ Gets an array of strides which are fixed, or will not change during
+ the entire iteration. For strides that may change, the value
+ NPY_MAX_INTP is placed in the stride.
+
+ Once the iterator is prepared for iteration (after a reset if
+ :c:data:`NPY_DELAY_BUFALLOC` was used), call this to get the strides
+ which may be used to select a fast inner loop function. For example,
+ if the stride is 0, that means the inner loop can always load its
+ value into a variable once, then use the variable throughout the loop,
+ or if the stride equals the itemsize, a contiguous version for that
+ operand may be used.
+
+ This function may be safely called without holding the Python GIL.
+
+.. index::
+ pair: iterator; C-API
+
+Converting from Previous NumPy Iterators
+----------------------------------------
+
+The old iterator API includes functions like PyArrayIter_Check,
+PyArray_Iter* and PyArray_ITER_*. The multi-iterator array includes
+PyArray_MultiIter*, PyArray_Broadcast, and PyArray_RemoveSmallest. The
+new iterator design replaces all of this functionality with a single object
+and associated API. One goal of the new API is that all uses of the
+existing iterator should be replaceable with the new iterator without
+significant effort. In 1.6, the major exception to this is the neighborhood
+iterator, which does not have corresponding features in this iterator.
+
+Here is a conversion table for which functions to use with the new iterator:
+
+===================================== ===================================================
+*Iterator Functions*
+:c:func:`PyArray_IterNew` :c:func:`NpyIter_New`
+:c:func:`PyArray_IterAllButAxis` :c:func:`NpyIter_New` + ``axes`` parameter **or**
+ Iterator flag :c:data:`NPY_ITER_EXTERNAL_LOOP`
+:c:func:`PyArray_BroadcastToShape` **NOT SUPPORTED** (Use the support for
+ multiple operands instead.)
+:c:func:`PyArrayIter_Check` Will need to add this in Python exposure
+:c:func:`PyArray_ITER_RESET` :c:func:`NpyIter_Reset`
+:c:func:`PyArray_ITER_NEXT` Function pointer from :c:func:`NpyIter_GetIterNext`
+:c:func:`PyArray_ITER_DATA` :c:func:`NpyIter_GetDataPtrArray`
+:c:func:`PyArray_ITER_GOTO` :c:func:`NpyIter_GotoMultiIndex`
+:c:func:`PyArray_ITER_GOTO1D` :c:func:`NpyIter_GotoIndex` or
+ :c:func:`NpyIter_GotoIterIndex`
+:c:func:`PyArray_ITER_NOTDONE` Return value of ``iternext`` function pointer
+*Multi-iterator Functions*
+:c:func:`PyArray_MultiIterNew` :c:func:`NpyIter_MultiNew`
+:c:func:`PyArray_MultiIter_RESET` :c:func:`NpyIter_Reset`
+:c:func:`PyArray_MultiIter_NEXT` Function pointer from :c:func:`NpyIter_GetIterNext`
+:c:func:`PyArray_MultiIter_DATA` :c:func:`NpyIter_GetDataPtrArray`
+:c:func:`PyArray_MultiIter_NEXTi` **NOT SUPPORTED** (always lock-step iteration)
+:c:func:`PyArray_MultiIter_GOTO` :c:func:`NpyIter_GotoMultiIndex`
+:c:func:`PyArray_MultiIter_GOTO1D` :c:func:`NpyIter_GotoIndex` or
+ :c:func:`NpyIter_GotoIterIndex`
+:c:func:`PyArray_MultiIter_NOTDONE` Return value of ``iternext`` function pointer
+:c:func:`PyArray_Broadcast` Handled by :c:func:`NpyIter_MultiNew`
+:c:func:`PyArray_RemoveSmallest` Iterator flag :c:data:`NPY_ITER_EXTERNAL_LOOP`
+*Other Functions*
+:c:func:`PyArray_ConvertToCommonType` Iterator flag :c:data:`NPY_ITER_COMMON_DTYPE`
+===================================== ===================================================
--- /dev/null
+
+*****************************
+Python Types and C-Structures
+*****************************
+
+.. sectionauthor:: Travis E. Oliphant
+
+Several new types are defined in the C-code. Most of these are
+accessible from Python, but a few are not exposed due to their limited
+use. Every new Python type has an associated :c:type:`PyObject *<PyObject>` with an
+internal structure that includes a pointer to a "method table" that
+defines how the new object behaves in Python. When you receive a
+Python object into C code, you always get a pointer to a
+:c:type:`PyObject` structure. Because a :c:type:`PyObject` structure is
+very generic and defines only :c:macro:`PyObject_HEAD`, by itself it
+is not very interesting. However, different objects contain more
+details after the :c:macro:`PyObject_HEAD` (but you have to cast to the
+correct type to access them --- or use accessor functions or macros).
+
+
+New Python Types Defined
+========================
+
+Python types are the functional equivalent in C of classes in Python.
+By constructing a new Python type you make available a new object for
+Python. The ndarray object is an example of a new type defined in C.
+New types are defined in C by two basic steps:
+
+1. creating a C-structure (usually named :c:type:`Py{Name}Object`) that is
+ binary- compatible with the :c:type:`PyObject` structure itself but holds
+ the additional information needed for that particular object;
+
+2. populating the :c:type:`PyTypeObject` table (pointed to by the ob_type
+ member of the :c:type:`PyObject` structure) with pointers to functions
+ that implement the desired behavior for the type.
+
+Instead of special method names which define behavior for Python
+classes, there are "function tables" which point to functions that
+implement the desired results. Since Python 2.2, the PyTypeObject
+itself has become dynamic which allows C types that can be "sub-typed
+"from other C-types in C, and sub-classed in Python. The children
+types inherit the attributes and methods from their parent(s).
+
+There are two major new types: the ndarray ( :c:data:`PyArray_Type` )
+and the ufunc ( :c:data:`PyUFunc_Type` ). Additional types play a
+supportive role: the :c:data:`PyArrayIter_Type`, the
+:c:data:`PyArrayMultiIter_Type`, and the :c:data:`PyArrayDescr_Type`
+. The :c:data:`PyArrayIter_Type` is the type for a flat iterator for an
+ndarray (the object that is returned when getting the flat
+attribute). The :c:data:`PyArrayMultiIter_Type` is the type of the
+object returned when calling ``broadcast`` (). It handles iteration
+and broadcasting over a collection of nested sequences. Also, the
+:c:data:`PyArrayDescr_Type` is the data-type-descriptor type whose
+instances describe the data. Finally, there are 21 new scalar-array
+types which are new Python scalars corresponding to each of the
+fundamental data types available for arrays. An additional 10 other
+types are place holders that allow the array scalars to fit into a
+hierarchy of actual Python types.
+
+
+PyArray_Type and PyArrayObject
+------------------------------
+
+.. c:var:: PyArray_Type
+
+ The Python type of the ndarray is :c:data:`PyArray_Type`. In C, every
+ ndarray is a pointer to a :c:type:`PyArrayObject` structure. The ob_type
+ member of this structure contains a pointer to the :c:data:`PyArray_Type`
+ typeobject.
+
+.. c:type:: PyArrayObject
+
+ The :c:type:`PyArrayObject` C-structure contains all of the required
+ information for an array. All instances of an ndarray (and its
+ subclasses) will have this structure. For future compatibility,
+ these structure members should normally be accessed using the
+ provided macros. If you need a shorter name, then you can make use
+ of :c:type:`NPY_AO` (deprecated) which is defined to be equivalent to
+ :c:type:`PyArrayObject`. Direct access to the struct fields are
+ deprecated. Use the `PyArray_*(arr)` form instead.
+
+ .. code-block:: c
+
+ typedef struct PyArrayObject {
+ PyObject_HEAD
+ char *data;
+ int nd;
+ npy_intp *dimensions;
+ npy_intp *strides;
+ PyObject *base;
+ PyArray_Descr *descr;
+ int flags;
+ PyObject *weakreflist;
+ } PyArrayObject;
+
+.. c:macro:: PyArrayObject.PyObject_HEAD
+
+ This is needed by all Python objects. It consists of (at least)
+ a reference count member ( ``ob_refcnt`` ) and a pointer to the
+ typeobject ( ``ob_type`` ). (Other elements may also be present
+ if Python was compiled with special options see
+ Include/object.h in the Python source tree for more
+ information). The ob_type member points to a Python type
+ object.
+
+.. c:member:: char *PyArrayObject.data
+
+ Accessible via :c:data:`PyArray_DATA`, this data member is a
+ pointer to the first element of the array. This pointer can
+ (and normally should) be recast to the data type of the array.
+
+.. c:member:: int PyArrayObject.nd
+
+ An integer providing the number of dimensions for this
+ array. When nd is 0, the array is sometimes called a rank-0
+ array. Such arrays have undefined dimensions and strides and
+ cannot be accessed. Macro :c:data:`PyArray_NDIM` defined in
+ ``ndarraytypes.h`` points to this data member. :c:data:`NPY_MAXDIMS`
+ is the largest number of dimensions for any array.
+
+.. c:member:: npy_intp PyArrayObject.dimensions
+
+ An array of integers providing the shape in each dimension as
+ long as nd :math:`\geq` 1. The integer is always large enough
+ to hold a pointer on the platform, so the dimension size is
+ only limited by memory. :c:data:`PyArray_DIMS` is the macro
+ associated with this data member.
+
+.. c:member:: npy_intp *PyArrayObject.strides
+
+ An array of integers providing for each dimension the number of
+ bytes that must be skipped to get to the next element in that
+ dimension. Associated with macro :c:data:`PyArray_STRIDES`.
+
+.. c:member:: PyObject *PyArrayObject.base
+
+ Pointed to by :c:data:`PyArray_BASE`, this member is used to hold a
+ pointer to another Python object that is related to this array.
+ There are two use cases:
+
+ - If this array does not own its own memory, then base points to the
+ Python object that owns it (perhaps another array object)
+ - If this array has the (deprecated) :c:data:`NPY_ARRAY_UPDATEIFCOPY` or
+ :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set, then this array is a working
+ copy of a "misbehaved" array.
+
+ When ``PyArray_ResolveWritebackIfCopy`` is called, the array pointed to
+ by base will be updated with the contents of this array.
+
+.. c:member:: PyArray_Descr *PyArrayObject.descr
+
+ A pointer to a data-type descriptor object (see below). The
+ data-type descriptor object is an instance of a new built-in
+ type which allows a generic description of memory. There is a
+ descriptor structure for each data type supported. This
+ descriptor structure contains useful information about the type
+ as well as a pointer to a table of function pointers to
+ implement specific functionality. As the name suggests, it is
+ associated with the macro :c:data:`PyArray_DESCR`.
+
+.. c:member:: int PyArrayObject.flags
+
+ Pointed to by the macro :c:data:`PyArray_FLAGS`, this data member represents
+ the flags indicating how the memory pointed to by data is to be
+ interpreted. Possible flags are :c:data:`NPY_ARRAY_C_CONTIGUOUS`,
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS`, :c:data:`NPY_ARRAY_OWNDATA`,
+ :c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_WRITEABLE`,
+ :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, and :c:data:`NPY_ARRAY_UPDATEIFCOPY`.
+
+.. c:member:: PyObject *PyArrayObject.weakreflist
+
+ This member allows array objects to have weak references (using the
+ weakref module).
+
+
+PyArrayDescr_Type and PyArray_Descr
+-----------------------------------
+
+.. c:var:: PyArrayDescr_Type
+
+ The :c:data:`PyArrayDescr_Type` is the built-in type of the
+ data-type-descriptor objects used to describe how the bytes comprising
+ the array are to be interpreted. There are 21 statically-defined
+ :c:type:`PyArray_Descr` objects for the built-in data-types. While these
+ participate in reference counting, their reference count should never
+ reach zero. There is also a dynamic table of user-defined
+ :c:type:`PyArray_Descr` objects that is also maintained. Once a
+ data-type-descriptor object is "registered" it should never be
+ deallocated either. The function :c:func:`PyArray_DescrFromType` (...) can
+ be used to retrieve a :c:type:`PyArray_Descr` object from an enumerated
+ type-number (either built-in or user- defined).
+
+.. c:type:: PyArray_Descr
+
+ The :c:type:`PyArray_Descr` structure lies at the heart of the
+ :c:data:`PyArrayDescr_Type`. While it is described here for
+ completeness, it should be considered internal to NumPy and manipulated via
+ ``PyArrayDescr_*`` or ``PyDataType*`` functions and macros. The size of this
+ structure is subject to change across versions of NumPy. To ensure
+ compatibility:
+
+ - Never declare a non-pointer instance of the struct
+ - Never perform pointer arithmatic
+ - Never use ``sizof(PyArray_Descr)``
+
+ It has the following structure:
+
+ .. code-block:: c
+
+ typedef struct {
+ PyObject_HEAD
+ PyTypeObject *typeobj;
+ char kind;
+ char type;
+ char byteorder;
+ char flags;
+ int type_num;
+ int elsize;
+ int alignment;
+ PyArray_ArrayDescr *subarray;
+ PyObject *fields;
+ PyObject *names;
+ PyArray_ArrFuncs *f;
+ PyObject *metadata;
+ NpyAuxData *c_metadata;
+ npy_hash_t hash;
+ } PyArray_Descr;
+
+.. c:member:: PyTypeObject *PyArray_Descr.typeobj
+
+ Pointer to a typeobject that is the corresponding Python type for
+ the elements of this array. For the builtin types, this points to
+ the corresponding array scalar. For user-defined types, this
+ should point to a user-defined typeobject. This typeobject can
+ either inherit from array scalars or not. If it does not inherit
+ from array scalars, then the :c:data:`NPY_USE_GETITEM` and
+ :c:data:`NPY_USE_SETITEM` flags should be set in the ``flags`` member.
+
+.. c:member:: char PyArray_Descr.kind
+
+ A character code indicating the kind of array (using the array
+ interface typestring notation). A 'b' represents Boolean, a 'i'
+ represents signed integer, a 'u' represents unsigned integer, 'f'
+ represents floating point, 'c' represents complex floating point, 'S'
+ represents 8-bit zero-terminated bytes, 'U' represents 32-bit/character
+ unicode string, and 'V' represents arbitrary.
+
+.. c:member:: char PyArray_Descr.type
+
+ A traditional character code indicating the data type.
+
+.. c:member:: char PyArray_Descr.byteorder
+
+ A character indicating the byte-order: '>' (big-endian), '<' (little-
+ endian), '=' (native), '\|' (irrelevant, ignore). All builtin data-
+ types have byteorder '='.
+
+.. c:member:: char PyArray_Descr.flags
+
+ A data-type bit-flag that determines if the data-type exhibits object-
+ array like behavior. Each bit in this member is a flag which are named
+ as:
+
+ .. c:var:: NPY_ITEM_REFCOUNT
+
+ Indicates that items of this data-type must be reference
+ counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ).
+
+ .. c:var:: NPY_ITEM_HASOBJECT
+
+ Same as :c:data:`NPY_ITEM_REFCOUNT`.
+
+ .. c:var:: NPY_LIST_PICKLE
+
+ Indicates arrays of this data-type must be converted to a list
+ before pickling.
+
+ .. c:var:: NPY_ITEM_IS_POINTER
+
+ Indicates the item is a pointer to some other data-type
+
+ .. c:var:: NPY_NEEDS_INIT
+
+ Indicates memory for this data-type must be initialized (set
+ to 0) on creation.
+
+ .. c:var:: NPY_NEEDS_PYAPI
+
+ Indicates this data-type requires the Python C-API during
+ access (so don't give up the GIL if array access is going to
+ be needed).
+
+ .. c:var:: NPY_USE_GETITEM
+
+ On array access use the ``f->getitem`` function pointer
+ instead of the standard conversion to an array scalar. Must
+ use if you don't define an array scalar to go along with
+ the data-type.
+
+ .. c:var:: NPY_USE_SETITEM
+
+ When creating a 0-d array from an array scalar use
+ ``f->setitem`` instead of the standard copy from an array
+ scalar. Must use if you don't define an array scalar to go
+ along with the data-type.
+
+ .. c:var:: NPY_FROM_FIELDS
+
+ The bits that are inherited for the parent data-type if these
+ bits are set in any field of the data-type. Currently (
+ :c:data:`NPY_NEEDS_INIT` \| :c:data:`NPY_LIST_PICKLE` \|
+ :c:data:`NPY_ITEM_REFCOUNT` \| :c:data:`NPY_NEEDS_PYAPI` ).
+
+ .. c:var:: NPY_OBJECT_DTYPE_FLAGS
+
+ Bits set for the object data-type: ( :c:data:`NPY_LIST_PICKLE`
+ \| :c:data:`NPY_USE_GETITEM` \| :c:data:`NPY_ITEM_IS_POINTER` \|
+ :c:data:`NPY_REFCOUNT` \| :c:data:`NPY_NEEDS_INIT` \|
+ :c:data:`NPY_NEEDS_PYAPI`).
+
+ .. c:function:: PyDataType_FLAGCHK(PyArray_Descr *dtype, int flags)
+
+ Return true if all the given flags are set for the data-type
+ object.
+
+ .. c:function:: PyDataType_REFCHK(PyArray_Descr *dtype)
+
+ Equivalent to :c:func:`PyDataType_FLAGCHK` (*dtype*,
+ :c:data:`NPY_ITEM_REFCOUNT`).
+
+.. c:member:: int PyArray_Descr.type_num
+
+ A number that uniquely identifies the data type. For new data-types,
+ this number is assigned when the data-type is registered.
+
+.. c:member:: int PyArray_Descr.elsize
+
+ For data types that are always the same size (such as long), this
+ holds the size of the data type. For flexible data types where
+ different arrays can have a different elementsize, this should be
+ 0.
+
+.. c:member:: int PyArray_Descr.alignment
+
+ A number providing alignment information for this data type.
+ Specifically, it shows how far from the start of a 2-element
+ structure (whose first element is a ``char`` ), the compiler
+ places an item of this type: ``offsetof(struct {char c; type v;},
+ v)``
+
+.. c:member:: PyArray_ArrayDescr *PyArray_Descr.subarray
+
+ If this is non- ``NULL``, then this data-type descriptor is a
+ C-style contiguous array of another data-type descriptor. In
+ other-words, each element that this descriptor describes is
+ actually an array of some other base descriptor. This is most
+ useful as the data-type descriptor for a field in another
+ data-type descriptor. The fields member should be ``NULL`` if this
+ is non- ``NULL`` (the fields member of the base descriptor can be
+ non- ``NULL`` however). The :c:type:`PyArray_ArrayDescr` structure is
+ defined using
+
+ .. code-block:: c
+
+ typedef struct {
+ PyArray_Descr *base;
+ PyObject *shape;
+ } PyArray_ArrayDescr;
+
+ The elements of this structure are:
+
+ .. c:member:: PyArray_Descr *PyArray_ArrayDescr.base
+
+ The data-type-descriptor object of the base-type.
+
+ .. c:member:: PyObject *PyArray_ArrayDescr.shape
+
+ The shape (always C-style contiguous) of the sub-array as a Python
+ tuple.
+
+
+.. c:member:: PyObject *PyArray_Descr.fields
+
+ If this is non-NULL, then this data-type-descriptor has fields
+ described by a Python dictionary whose keys are names (and also
+ titles if given) and whose values are tuples that describe the
+ fields. Recall that a data-type-descriptor always describes a
+ fixed-length set of bytes. A field is a named sub-region of that
+ total, fixed-length collection. A field is described by a tuple
+ composed of another data- type-descriptor and a byte
+ offset. Optionally, the tuple may contain a title which is
+ normally a Python string. These tuples are placed in this
+ dictionary keyed by name (and also title if given).
+
+.. c:member:: PyObject *PyArray_Descr.names
+
+ An ordered tuple of field names. It is NULL if no field is
+ defined.
+
+.. c:member:: PyArray_ArrFuncs *PyArray_Descr.f
+
+ A pointer to a structure containing functions that the type needs
+ to implement internal features. These functions are not the same
+ thing as the universal functions (ufuncs) described later. Their
+ signatures can vary arbitrarily.
+
+.. c:member:: PyObject *PyArray_Descr.metadata
+
+ Metadata about this dtype.
+
+.. c:member:: NpyAuxData *PyArray_Descr.c_metadata
+
+ Metadata specific to the C implementation
+ of the particular dtype. Added for NumPy 1.7.0.
+
+.. c:member:: Npy_hash_t *PyArray_Descr.hash
+
+ Currently unused. Reserved for future use in caching
+ hash values.
+
+.. c:type:: PyArray_ArrFuncs
+
+ Functions implementing internal features. Not all of these
+ function pointers must be defined for a given type. The required
+ members are ``nonzero``, ``copyswap``, ``copyswapn``, ``setitem``,
+ ``getitem``, and ``cast``. These are assumed to be non- ``NULL``
+ and ``NULL`` entries will cause a program crash. The other
+ functions may be ``NULL`` which will just mean reduced
+ functionality for that data-type. (Also, the nonzero function will
+ be filled in with a default function if it is ``NULL`` when you
+ register a user-defined data-type).
+
+ .. code-block:: c
+
+ typedef struct {
+ PyArray_VectorUnaryFunc *cast[NPY_NTYPES];
+ PyArray_GetItemFunc *getitem;
+ PyArray_SetItemFunc *setitem;
+ PyArray_CopySwapNFunc *copyswapn;
+ PyArray_CopySwapFunc *copyswap;
+ PyArray_CompareFunc *compare;
+ PyArray_ArgFunc *argmax;
+ PyArray_DotFunc *dotfunc;
+ PyArray_ScanFunc *scanfunc;
+ PyArray_FromStrFunc *fromstr;
+ PyArray_NonzeroFunc *nonzero;
+ PyArray_FillFunc *fill;
+ PyArray_FillWithScalarFunc *fillwithscalar;
+ PyArray_SortFunc *sort[NPY_NSORTS];
+ PyArray_ArgSortFunc *argsort[NPY_NSORTS];
+ PyObject *castdict;
+ PyArray_ScalarKindFunc *scalarkind;
+ int **cancastscalarkindto;
+ int *cancastto;
+ PyArray_FastClipFunc *fastclip;
+ PyArray_FastPutmaskFunc *fastputmask;
+ PyArray_FastTakeFunc *fasttake;
+ PyArray_ArgFunc *argmin;
+ } PyArray_ArrFuncs;
+
+ The concept of a behaved segment is used in the description of the
+ function pointers. A behaved segment is one that is aligned and in
+ native machine byte-order for the data-type. The ``nonzero``,
+ ``copyswap``, ``copyswapn``, ``getitem``, and ``setitem``
+ functions can (and must) deal with mis-behaved arrays. The other
+ functions require behaved memory segments.
+
+ .. c:member:: void cast( \
+ void *from, void *to, npy_intp n, void *fromarr, void *toarr)
+
+ An array of function pointers to cast from the current type to
+ all of the other builtin types. Each function casts a
+ contiguous, aligned, and notswapped buffer pointed at by
+ *from* to a contiguous, aligned, and notswapped buffer pointed
+ at by *to* The number of items to cast is given by *n*, and
+ the arguments *fromarr* and *toarr* are interpreted as
+ PyArrayObjects for flexible arrays to get itemsize
+ information.
+
+ .. c:member:: PyObject *getitem(void *data, void *arr)
+
+ A pointer to a function that returns a standard Python object
+ from a single element of the array object *arr* pointed to by
+ *data*. This function must be able to deal with "misbehaved
+ "(misaligned and/or swapped) arrays correctly.
+
+ .. c:member:: int setitem(PyObject *item, void *data, void *arr)
+
+ A pointer to a function that sets the Python object *item*
+ into the array, *arr*, at the position pointed to by *data*
+ . This function deals with "misbehaved" arrays. If successful,
+ a zero is returned, otherwise, a negative one is returned (and
+ a Python error set).
+
+ .. c:member:: void copyswapn( \
+ void *dest, npy_intp dstride, void *src, npy_intp sstride, \
+ npy_intp n, int swap, void *arr)
+
+ .. c:member:: void copyswap(void *dest, void *src, int swap, void *arr)
+
+ These members are both pointers to functions to copy data from
+ *src* to *dest* and *swap* if indicated. The value of arr is
+ only used for flexible ( :c:data:`NPY_STRING`, :c:data:`NPY_UNICODE`,
+ and :c:data:`NPY_VOID` ) arrays (and is obtained from
+ ``arr->descr->elsize`` ). The second function copies a single
+ value, while the first loops over n values with the provided
+ strides. These functions can deal with misbehaved *src*
+ data. If *src* is NULL then no copy is performed. If *swap* is
+ 0, then no byteswapping occurs. It is assumed that *dest* and
+ *src* do not overlap. If they overlap, then use ``memmove``
+ (...) first followed by ``copyswap(n)`` with NULL valued
+ ``src``.
+
+ .. c:member:: int compare(const void* d1, const void* d2, void* arr)
+
+ A pointer to a function that compares two elements of the
+ array, ``arr``, pointed to by ``d1`` and ``d2``. This
+ function requires behaved (aligned and not swapped) arrays.
+ The return value is 1 if * ``d1`` > * ``d2``, 0 if * ``d1`` == *
+ ``d2``, and -1 if * ``d1`` < * ``d2``. The array object ``arr`` is
+ used to retrieve itemsize and field information for flexible arrays.
+
+ .. c:member:: int argmax( \
+ void* data, npy_intp n, npy_intp* max_ind, void* arr)
+
+ A pointer to a function that retrieves the index of the
+ largest of ``n`` elements in ``arr`` beginning at the element
+ pointed to by ``data``. This function requires that the
+ memory segment be contiguous and behaved. The return value is
+ always 0. The index of the largest element is returned in
+ ``max_ind``.
+
+ .. c:member:: void dotfunc( \
+ void* ip1, npy_intp is1, void* ip2, npy_intp is2, void* op, \
+ npy_intp n, void* arr)
+
+ A pointer to a function that multiplies two ``n`` -length
+ sequences together, adds them, and places the result in
+ element pointed to by ``op`` of ``arr``. The start of the two
+ sequences are pointed to by ``ip1`` and ``ip2``. To get to
+ the next element in each sequence requires a jump of ``is1``
+ and ``is2`` *bytes*, respectively. This function requires
+ behaved (though not necessarily contiguous) memory.
+
+ .. c:member:: int scanfunc(FILE* fd, void* ip, void* arr)
+
+ A pointer to a function that scans (scanf style) one element
+ of the corresponding type from the file descriptor ``fd`` into
+ the array memory pointed to by ``ip``. The array is assumed
+ to be behaved.
+ The last argument ``arr`` is the array to be scanned into.
+ Returns number of receiving arguments successfully assigned (which
+ may be zero in case a matching failure occurred before the first
+ receiving argument was assigned), or EOF if input failure occurs
+ before the first receiving argument was assigned.
+ This function should be called without holding the Python GIL, and
+ has to grab it for error reporting.
+
+ .. c:member:: int fromstr(char* str, void* ip, char** endptr, void* arr)
+
+ A pointer to a function that converts the string pointed to by
+ ``str`` to one element of the corresponding type and places it
+ in the memory location pointed to by ``ip``. After the
+ conversion is completed, ``*endptr`` points to the rest of the
+ string. The last argument ``arr`` is the array into which ip
+ points (needed for variable-size data- types). Returns 0 on
+ success or -1 on failure. Requires a behaved array.
+ This function should be called without holding the Python GIL, and
+ has to grab it for error reporting.
+
+ .. c:member:: Bool nonzero(void* data, void* arr)
+
+ A pointer to a function that returns TRUE if the item of
+ ``arr`` pointed to by ``data`` is nonzero. This function can
+ deal with misbehaved arrays.
+
+ .. c:member:: void fill(void* data, npy_intp length, void* arr)
+
+ A pointer to a function that fills a contiguous array of given
+ length with data. The first two elements of the array must
+ already be filled- in. From these two values, a delta will be
+ computed and the values from item 3 to the end will be
+ computed by repeatedly adding this computed delta. The data
+ buffer must be well-behaved.
+
+ .. c:member:: void fillwithscalar( \
+ void* buffer, npy_intp length, void* value, void* arr)
+
+ A pointer to a function that fills a contiguous ``buffer`` of
+ the given ``length`` with a single scalar ``value`` whose
+ address is given. The final argument is the array which is
+ needed to get the itemsize for variable-length arrays.
+
+ .. c:member:: int sort(void* start, npy_intp length, void* arr)
+
+ An array of function pointers to a particular sorting
+ algorithms. A particular sorting algorithm is obtained using a
+ key (so far :c:data:`NPY_QUICKSORT`, :c:data:`NPY_HEAPSORT`,
+ and :c:data:`NPY_MERGESORT` are defined). These sorts are done
+ in-place assuming contiguous and aligned data.
+
+ .. c:member:: int argsort( \
+ void* start, npy_intp* result, npy_intp length, void *arr)
+
+ An array of function pointers to sorting algorithms for this
+ data type. The same sorting algorithms as for sort are
+ available. The indices producing the sort are returned in
+ ``result`` (which must be initialized with indices 0 to
+ ``length-1`` inclusive).
+
+ .. c:member:: PyObject *castdict
+
+ Either ``NULL`` or a dictionary containing low-level casting
+ functions for user- defined data-types. Each function is
+ wrapped in a :c:type:`PyCObject *` and keyed by the data-type number.
+
+ .. c:member:: NPY_SCALARKIND scalarkind(PyArrayObject* arr)
+
+ A function to determine how scalars of this type should be
+ interpreted. The argument is ``NULL`` or a 0-dimensional array
+ containing the data (if that is needed to determine the kind
+ of scalar). The return value must be of type
+ :c:type:`NPY_SCALARKIND`.
+
+ .. c:member:: int **cancastscalarkindto
+
+ Either ``NULL`` or an array of :c:type:`NPY_NSCALARKINDS`
+ pointers. These pointers should each be either ``NULL`` or a
+ pointer to an array of integers (terminated by
+ :c:data:`NPY_NOTYPE`) indicating data-types that a scalar of
+ this data-type of the specified kind can be cast to safely
+ (this usually means without losing precision).
+
+ .. c:member:: int *cancastto
+
+ Either ``NULL`` or an array of integers (terminated by
+ :c:data:`NPY_NOTYPE` ) indicated data-types that this data-type
+ can be cast to safely (this usually means without losing
+ precision).
+
+ .. c:member:: void fastclip( \
+ void *in, npy_intp n_in, void *min, void *max, void *out)
+
+ A function that reads ``n_in`` items from ``in``, and writes to
+ ``out`` the read value if it is within the limits pointed to by
+ ``min`` and ``max``, or the corresponding limit if outside. The
+ memory segments must be contiguous and behaved, and either
+ ``min`` or ``max`` may be ``NULL``, but not both.
+
+ .. c:member:: void fastputmask( \
+ void *in, void *mask, npy_intp n_in, void *values, npy_intp nv)
+
+ A function that takes a pointer ``in`` to an array of ``n_in``
+ items, a pointer ``mask`` to an array of ``n_in`` boolean
+ values, and a pointer ``vals`` to an array of ``nv`` items.
+ Items from ``vals`` are copied into ``in`` wherever the value
+ in ``mask`` is non-zero, tiling ``vals`` as needed if
+ ``nv < n_in``. All arrays must be contiguous and behaved.
+
+ .. c:member:: void fasttake( \
+ void *dest, void *src, npy_intp *indarray, npy_intp nindarray, \
+ npy_intp n_outer, npy_intp m_middle, npy_intp nelem, \
+ NPY_CLIPMODE clipmode)
+
+ A function that takes a pointer ``src`` to a C contiguous,
+ behaved segment, interpreted as a 3-dimensional array of shape
+ ``(n_outer, nindarray, nelem)``, a pointer ``indarray`` to a
+ contiguous, behaved segment of ``m_middle`` integer indices,
+ and a pointer ``dest`` to a C contiguous, behaved segment,
+ interpreted as a 3-dimensional array of shape
+ ``(n_outer, m_middle, nelem)``. The indices in ``indarray`` are
+ used to index ``src`` along the second dimension, and copy the
+ corresponding chunks of ``nelem`` items into ``dest``.
+ ``clipmode`` (which can take on the values :c:data:`NPY_RAISE`,
+ :c:data:`NPY_WRAP` or :c:data:`NPY_CLIP`) determines how will
+ indices smaller than 0 or larger than ``nindarray`` will be
+ handled.
+
+ .. c:member:: int argmin( \
+ void* data, npy_intp n, npy_intp* min_ind, void* arr)
+
+ A pointer to a function that retrieves the index of the
+ smallest of ``n`` elements in ``arr`` beginning at the element
+ pointed to by ``data``. This function requires that the
+ memory segment be contiguous and behaved. The return value is
+ always 0. The index of the smallest element is returned in
+ ``min_ind``.
+
+
+The :c:data:`PyArray_Type` typeobject implements many of the features of
+:c:type:`Python objects <PyTypeObject>` including the :c:member:`tp_as_number
+<PyTypeObject.tp_as_number>`, :c:member:`tp_as_sequence
+<PyTypeObject.tp_as_sequence>`, :c:member:`tp_as_mapping
+<PyTypeObject.tp_as_mapping>`, and :c:member:`tp_as_buffer
+<PyTypeObject.tp_as_buffer>` interfaces. The :c:type:`rich comparison
+<richcmpfunc>`) is also used along with new-style attribute lookup for
+member (:c:member:`tp_members <PyTypeObject.tp_members>`) and properties
+(:c:member:`tp_getset <PyTypeObject.tp_getset>`).
+The :c:data:`PyArray_Type` can also be sub-typed.
+
+.. tip::
+
+ The ``tp_as_number`` methods use a generic approach to call whatever
+ function has been registered for handling the operation. When the
+ ``_multiarray_umath module`` is imported, it sets the numeric operations
+ for all arrays to the corresponding ufuncs. This choice can be changed with
+ :c:func:`PyUFunc_ReplaceLoopBySignature` The ``tp_str`` and ``tp_repr``
+ methods can also be altered using :c:func:`PyArray_SetStringFunction`.
+
+
+PyUFunc_Type and PyUFuncObject
+------------------------------
+
+.. c:var:: PyUFunc_Type
+
+ The ufunc object is implemented by creation of the
+ :c:data:`PyUFunc_Type`. It is a very simple type that implements only
+ basic getattribute behavior, printing behavior, and has call
+ behavior which allows these objects to act like functions. The
+ basic idea behind the ufunc is to hold a reference to fast
+ 1-dimensional (vector) loops for each data type that supports the
+ operation. These one-dimensional loops all have the same signature
+ and are the key to creating a new ufunc. They are called by the
+ generic looping code as appropriate to implement the N-dimensional
+ function. There are also some generic 1-d loops defined for
+ floating and complexfloating arrays that allow you to define a
+ ufunc using a single scalar function (*e.g.* atanh).
+
+
+.. c:type:: PyUFuncObject
+
+ The core of the ufunc is the :c:type:`PyUFuncObject` which contains all
+ the information needed to call the underlying C-code loops that
+ perform the actual work. While it is described here for completeness, it
+ should be considered internal to NumPy and manipulated via ``PyUFunc_*``
+ functions. The size of this structure is subject to change across versions
+ of NumPy. To ensure compatibility:
+
+ - Never declare a non-pointer instance of the struct
+ - Never perform pointer arithmetic
+ - Never use ``sizeof(PyUFuncObject)``
+
+ It has the following structure:
+
+ .. code-block:: c
+
+ typedef struct {
+ PyObject_HEAD
+ int nin;
+ int nout;
+ int nargs;
+ int identity;
+ PyUFuncGenericFunction *functions;
+ void **data;
+ int ntypes;
+ int reserved1;
+ const char *name;
+ char *types;
+ const char *doc;
+ void *ptr;
+ PyObject *obj;
+ PyObject *userloops;
+ int core_enabled;
+ int core_num_dim_ix;
+ int *core_num_dims;
+ int *core_dim_ixs;
+ int *core_offsets;
+ char *core_signature;
+ PyUFunc_TypeResolutionFunc *type_resolver;
+ PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector;
+ PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector;
+ npy_uint32 *op_flags;
+ npy_uint32 *iter_flags;
+ /* new in API version 0x0000000D */
+ npy_intp *core_dim_sizes;
+ npy_intp *core_dim_flags;
+
+ } PyUFuncObject;
+
+ .. c:macro: PyUFuncObject.PyObject_HEAD
+
+ required for all Python objects.
+
+ .. c:member:: int PyUFuncObject.nin
+
+ The number of input arguments.
+
+ .. c:member:: int PyUFuncObject.nout
+
+ The number of output arguments.
+
+ .. c:member:: int PyUFuncObject.nargs
+
+ The total number of arguments (*nin* + *nout*). This must be
+ less than :c:data:`NPY_MAXARGS`.
+
+ .. c:member:: int PyUFuncObject.identity
+
+ Either :c:data:`PyUFunc_One`, :c:data:`PyUFunc_Zero`,
+ :c:data:`PyUFunc_None` or :c:data:`PyUFunc_AllOnes` to indicate
+ the identity for this operation. It is only used for a
+ reduce-like call on an empty array.
+
+ .. c:member:: void PyUFuncObject.functions( \
+ char** args, npy_intp* dims, npy_intp* steps, void* extradata)
+
+ An array of function pointers --- one for each data type
+ supported by the ufunc. This is the vector loop that is called
+ to implement the underlying function *dims* [0] times. The
+ first argument, *args*, is an array of *nargs* pointers to
+ behaved memory. Pointers to the data for the input arguments
+ are first, followed by the pointers to the data for the output
+ arguments. How many bytes must be skipped to get to the next
+ element in the sequence is specified by the corresponding entry
+ in the *steps* array. The last argument allows the loop to
+ receive extra information. This is commonly used so that a
+ single, generic vector loop can be used for multiple
+ functions. In this case, the actual scalar function to call is
+ passed in as *extradata*. The size of this function pointer
+ array is ntypes.
+
+ .. c:member:: void **PyUFuncObject.data
+
+ Extra data to be passed to the 1-d vector loops or ``NULL`` if
+ no extra-data is needed. This C-array must be the same size (
+ *i.e.* ntypes) as the functions array. ``NULL`` is used if
+ extra_data is not needed. Several C-API calls for UFuncs are
+ just 1-d vector loops that make use of this extra data to
+ receive a pointer to the actual function to call.
+
+ .. c:member:: int PyUFuncObject.ntypes
+
+ The number of supported data types for the ufunc. This number
+ specifies how many different 1-d loops (of the builtin data
+ types) are available.
+
+ .. c:member:: int PyUFuncObject.reserved1
+
+ Unused.
+
+ .. c:member:: char *PyUFuncObject.name
+
+ A string name for the ufunc. This is used dynamically to build
+ the __doc\__ attribute of ufuncs.
+
+ .. c:member:: char *PyUFuncObject.types
+
+ An array of :math:`nargs \times ntypes` 8-bit type_numbers
+ which contains the type signature for the function for each of
+ the supported (builtin) data types. For each of the *ntypes*
+ functions, the corresponding set of type numbers in this array
+ shows how the *args* argument should be interpreted in the 1-d
+ vector loop. These type numbers do not have to be the same type
+ and mixed-type ufuncs are supported.
+
+ .. c:member:: char *PyUFuncObject.doc
+
+ Documentation for the ufunc. Should not contain the function
+ signature as this is generated dynamically when __doc\__ is
+ retrieved.
+
+ .. c:member:: void *PyUFuncObject.ptr
+
+ Any dynamically allocated memory. Currently, this is used for
+ dynamic ufuncs created from a python function to store room for
+ the types, data, and name members.
+
+ .. c:member:: PyObject *PyUFuncObject.obj
+
+ For ufuncs dynamically created from python functions, this member
+ holds a reference to the underlying Python function.
+
+ .. c:member:: PyObject *PyUFuncObject.userloops
+
+ A dictionary of user-defined 1-d vector loops (stored as CObject
+ ptrs) for user-defined types. A loop may be registered by the
+ user for any user-defined type. It is retrieved by type number.
+ User defined type numbers are always larger than
+ :c:data:`NPY_USERDEF`.
+
+ .. c:member:: int PyUFuncObject.core_enabled
+
+ 0 for scalar ufuncs; 1 for generalized ufuncs
+
+ .. c:member:: int PyUFuncObject.core_num_dim_ix
+
+ Number of distinct core dimension names in the signature
+
+ .. c:member:: int *PyUFuncObject.core_num_dims
+
+ Number of core dimensions of each argument
+
+ .. c:member:: int *PyUFuncObject.core_dim_ixs
+
+ Dimension indices in a flattened form; indices of argument ``k`` are
+ stored in ``core_dim_ixs[core_offsets[k] : core_offsets[k] +
+ core_numdims[k]]``
+
+ .. c:member:: int *PyUFuncObject.core_offsets
+
+ Position of 1st core dimension of each argument in ``core_dim_ixs``,
+ equivalent to cumsum(``core_num_dims``)
+
+ .. c:member:: char *PyUFuncObject.core_signature
+
+ Core signature string
+
+ .. c:member:: PyUFunc_TypeResolutionFunc *PyUFuncObject.type_resolver
+
+ A function which resolves the types and fills an array with the dtypes
+ for the inputs and outputs
+
+ .. c:member:: PyUFunc_LegacyInnerLoopSelectionFunc *PyUFuncObject.legacy_inner_loop_selector
+
+ A function which returns an inner loop. The ``legacy`` in the name arises
+ because for NumPy 1.6 a better variant had been planned. This variant
+ has not yet come about.
+
+ .. c:member:: void *PyUFuncObject.reserved2
+
+ For a possible future loop selector with a different signature.
+
+ .. c:member:: PyUFunc_MaskedInnerLoopSelectionFunc *PyUFuncObject.masked_inner_loop_selector
+
+ Function which returns a masked inner loop for the ufunc
+
+ .. c:member:: npy_uint32 PyUFuncObject.op_flags
+
+ Override the default operand flags for each ufunc operand.
+
+ .. c:member:: npy_uint32 PyUFuncObject.iter_flags
+
+ Override the default nditer flags for the ufunc.
+
+ Added in API version 0x0000000D
+
+ .. c:member:: npy_intp *PyUFuncObject.core_dim_sizes
+
+ For each distinct core dimension, the possible
+ :ref:`frozen <frozen>` size if :c:data:`UFUNC_CORE_DIM_SIZE_INFERRED` is 0
+
+ .. c:member:: npy_uint32 *PyUFuncObject.core_dim_flags
+
+ For each distinct core dimension, a set of ``UFUNC_CORE_DIM*`` flags
+
+ - :c:data:`UFUNC_CORE_DIM_CAN_IGNORE` if the dim name ends in ``?``
+ - :c:data:`UFUNC_CORE_DIM_SIZE_INFERRED` if the dim size will be
+ determined from the operands and not from a :ref:`frozen <frozen>` signature
+
+PyArrayIter_Type and PyArrayIterObject
+--------------------------------------
+
+.. c:var:: PyArrayIter_Type
+
+ This is an iterator object that makes it easy to loop over an
+ N-dimensional array. It is the object returned from the flat
+ attribute of an ndarray. It is also used extensively throughout the
+ implementation internals to loop over an N-dimensional array. The
+ tp_as_mapping interface is implemented so that the iterator object
+ can be indexed (using 1-d indexing), and a few methods are
+ implemented through the tp_methods table. This object implements the
+ next method and can be used anywhere an iterator can be used in
+ Python.
+
+.. c:type:: PyArrayIterObject
+
+ The C-structure corresponding to an object of :c:data:`PyArrayIter_Type` is
+ the :c:type:`PyArrayIterObject`. The :c:type:`PyArrayIterObject` is used to
+ keep track of a pointer into an N-dimensional array. It contains associated
+ information used to quickly march through the array. The pointer can
+ be adjusted in three basic ways: 1) advance to the "next" position in
+ the array in a C-style contiguous fashion, 2) advance to an arbitrary
+ N-dimensional coordinate in the array, and 3) advance to an arbitrary
+ one-dimensional index into the array. The members of the
+ :c:type:`PyArrayIterObject` structure are used in these
+ calculations. Iterator objects keep their own dimension and strides
+ information about an array. This can be adjusted as needed for
+ "broadcasting," or to loop over only specific dimensions.
+
+ .. code-block:: c
+
+ typedef struct {
+ PyObject_HEAD
+ int nd_m1;
+ npy_intp index;
+ npy_intp size;
+ npy_intp coordinates[NPY_MAXDIMS];
+ npy_intp dims_m1[NPY_MAXDIMS];
+ npy_intp strides[NPY_MAXDIMS];
+ npy_intp backstrides[NPY_MAXDIMS];
+ npy_intp factors[NPY_MAXDIMS];
+ PyArrayObject *ao;
+ char *dataptr;
+ Bool contiguous;
+ } PyArrayIterObject;
+
+ .. c:member:: int PyArrayIterObject.nd_m1
+
+ :math:`N-1` where :math:`N` is the number of dimensions in the
+ underlying array.
+
+ .. c:member:: npy_intp PyArrayIterObject.index
+
+ The current 1-d index into the array.
+
+ .. c:member:: npy_intp PyArrayIterObject.size
+
+ The total size of the underlying array.
+
+ .. c:member:: npy_intp *PyArrayIterObject.coordinates
+
+ An :math:`N` -dimensional index into the array.
+
+ .. c:member:: npy_intp *PyArrayIterObject.dims_m1
+
+ The size of the array minus 1 in each dimension.
+
+ .. c:member:: npy_intp *PyArrayIterObject.strides
+
+ The strides of the array. How many bytes needed to jump to the next
+ element in each dimension.
+
+ .. c:member:: npy_intp *PyArrayIterObject.backstrides
+
+ How many bytes needed to jump from the end of a dimension back
+ to its beginning. Note that ``backstrides[k] == strides[k] *
+ dims_m1[k]``, but it is stored here as an optimization.
+
+ .. c:member:: npy_intp *PyArrayIterObject.factors
+
+ This array is used in computing an N-d index from a 1-d index. It
+ contains needed products of the dimensions.
+
+ .. c:member:: PyArrayObject *PyArrayIterObject.ao
+
+ A pointer to the underlying ndarray this iterator was created to
+ represent.
+
+ .. c:member:: char *PyArrayIterObject.dataptr
+
+ This member points to an element in the ndarray indicated by the
+ index.
+
+ .. c:member:: Bool PyArrayIterObject.contiguous
+
+ This flag is true if the underlying array is
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS`. It is used to simplify
+ calculations when possible.
+
+
+How to use an array iterator on a C-level is explained more fully in
+later sections. Typically, you do not need to concern yourself with
+the internal structure of the iterator object, and merely interact
+with it through the use of the macros :c:func:`PyArray_ITER_NEXT` (it),
+:c:func:`PyArray_ITER_GOTO` (it, dest), or :c:func:`PyArray_ITER_GOTO1D`
+(it, index). All of these macros require the argument *it* to be a
+:c:type:`PyArrayIterObject *`.
+
+
+PyArrayMultiIter_Type and PyArrayMultiIterObject
+------------------------------------------------
+
+.. c:var:: PyArrayMultiIter_Type
+
+ This type provides an iterator that encapsulates the concept of
+ broadcasting. It allows :math:`N` arrays to be broadcast together
+ so that the loop progresses in C-style contiguous fashion over the
+ broadcasted array. The corresponding C-structure is the
+ :c:type:`PyArrayMultiIterObject` whose memory layout must begin any
+ object, *obj*, passed in to the :c:func:`PyArray_Broadcast` (obj)
+ function. Broadcasting is performed by adjusting array iterators so
+ that each iterator represents the broadcasted shape and size, but
+ has its strides adjusted so that the correct element from the array
+ is used at each iteration.
+
+
+.. c:type:: PyArrayMultiIterObject
+
+ .. code-block:: c
+
+ typedef struct {
+ PyObject_HEAD
+ int numiter;
+ npy_intp size;
+ npy_intp index;
+ int nd;
+ npy_intp dimensions[NPY_MAXDIMS];
+ PyArrayIterObject *iters[NPY_MAXDIMS];
+ } PyArrayMultiIterObject;
+
+ .. c:macro: PyArrayMultiIterObject.PyObject_HEAD
+
+ Needed at the start of every Python object (holds reference count
+ and type identification).
+
+ .. c:member:: int PyArrayMultiIterObject.numiter
+
+ The number of arrays that need to be broadcast to the same shape.
+
+ .. c:member:: npy_intp PyArrayMultiIterObject.size
+
+ The total broadcasted size.
+
+ .. c:member:: npy_intp PyArrayMultiIterObject.index
+
+ The current (1-d) index into the broadcasted result.
+
+ .. c:member:: int PyArrayMultiIterObject.nd
+
+ The number of dimensions in the broadcasted result.
+
+ .. c:member:: npy_intp *PyArrayMultiIterObject.dimensions
+
+ The shape of the broadcasted result (only ``nd`` slots are used).
+
+ .. c:member:: PyArrayIterObject **PyArrayMultiIterObject.iters
+
+ An array of iterator objects that holds the iterators for the
+ arrays to be broadcast together. On return, the iterators are
+ adjusted for broadcasting.
+
+PyArrayNeighborhoodIter_Type and PyArrayNeighborhoodIterObject
+--------------------------------------------------------------
+
+.. c:var:: PyArrayNeighborhoodIter_Type
+
+ This is an iterator object that makes it easy to loop over an
+ N-dimensional neighborhood.
+
+.. c:type:: PyArrayNeighborhoodIterObject
+
+ The C-structure corresponding to an object of
+ :c:data:`PyArrayNeighborhoodIter_Type` is the
+ :c:type:`PyArrayNeighborhoodIterObject`.
+
+ .. code-block:: c
+
+ typedef struct {
+ PyObject_HEAD
+ int nd_m1;
+ npy_intp index, size;
+ npy_intp coordinates[NPY_MAXDIMS]
+ npy_intp dims_m1[NPY_MAXDIMS];
+ npy_intp strides[NPY_MAXDIMS];
+ npy_intp backstrides[NPY_MAXDIMS];
+ npy_intp factors[NPY_MAXDIMS];
+ PyArrayObject *ao;
+ char *dataptr;
+ npy_bool contiguous;
+ npy_intp bounds[NPY_MAXDIMS][2];
+ npy_intp limits[NPY_MAXDIMS][2];
+ npy_intp limits_sizes[NPY_MAXDIMS];
+ npy_iter_get_dataptr_t translate;
+ npy_intp nd;
+ npy_intp dimensions[NPY_MAXDIMS];
+ PyArrayIterObject* _internal_iter;
+ char* constant;
+ int mode;
+ } PyArrayNeighborhoodIterObject;
+
+PyArrayFlags_Type and PyArrayFlagsObject
+----------------------------------------
+
+.. c:var:: PyArrayFlags_Type
+
+ When the flags attribute is retrieved from Python, a special
+ builtin object of this type is constructed. This special type makes
+ it easier to work with the different flags by accessing them as
+ attributes or by accessing them as if the object were a dictionary
+ with the flag names as entries.
+
+.. c:type:: PyArrayFlagsObject
+
+ .. code-block:: c
+
+ typedef struct PyArrayFlagsObject {
+ PyObject_HEAD
+ PyObject *arr;
+ int flags;
+ } PyArrayFlagsObject;
+
+
+ScalarArrayTypes
+----------------
+
+There is a Python type for each of the different built-in data types
+that can be present in the array Most of these are simple wrappers
+around the corresponding data type in C. The C-names for these types
+are :c:data:`Py{TYPE}ArrType_Type` where ``{TYPE}`` can be
+
+ **Bool**, **Byte**, **Short**, **Int**, **Long**, **LongLong**,
+ **UByte**, **UShort**, **UInt**, **ULong**, **ULongLong**,
+ **Half**, **Float**, **Double**, **LongDouble**, **CFloat**,
+ **CDouble**, **CLongDouble**, **String**, **Unicode**, **Void**, and
+ **Object**.
+
+These type names are part of the C-API and can therefore be created in
+extension C-code. There is also a :c:data:`PyIntpArrType_Type` and a
+:c:data:`PyUIntpArrType_Type` that are simple substitutes for one of the
+integer types that can hold a pointer on the platform. The structure
+of these scalar objects is not exposed to C-code. The function
+:c:func:`PyArray_ScalarAsCtype` (..) can be used to extract the C-type
+value from the array scalar and the function :c:func:`PyArray_Scalar`
+(...) can be used to construct an array scalar from a C-value.
+
+
+Other C-Structures
+==================
+
+A few new C-structures were found to be useful in the development of
+NumPy. These C-structures are used in at least one C-API call and are
+therefore documented here. The main reason these structures were
+defined is to make it easy to use the Python ParseTuple C-API to
+convert from Python objects to a useful C-Object.
+
+
+PyArray_Dims
+------------
+
+.. c:type:: PyArray_Dims
+
+ This structure is very useful when shape and/or strides information
+ is supposed to be interpreted. The structure is:
+
+ .. code-block:: c
+
+ typedef struct {
+ npy_intp *ptr;
+ int len;
+ } PyArray_Dims;
+
+ The members of this structure are
+
+ .. c:member:: npy_intp *PyArray_Dims.ptr
+
+ A pointer to a list of (:c:type:`npy_intp`) integers which
+ usually represent array shape or array strides.
+
+ .. c:member:: int PyArray_Dims.len
+
+ The length of the list of integers. It is assumed safe to
+ access *ptr* [0] to *ptr* [len-1].
+
+
+PyArray_Chunk
+-------------
+
+.. c:type:: PyArray_Chunk
+
+ This is equivalent to the buffer object structure in Python up to
+ the ptr member. On 32-bit platforms (*i.e.* if :c:data:`NPY_SIZEOF_INT`
+ == :c:data:`NPY_SIZEOF_INTP`), the len member also matches an equivalent
+ member of the buffer object. It is useful to represent a generic
+ single-segment chunk of memory.
+
+ .. code-block:: c
+
+ typedef struct {
+ PyObject_HEAD
+ PyObject *base;
+ void *ptr;
+ npy_intp len;
+ int flags;
+ } PyArray_Chunk;
+
+ The members are
+
+ .. c:macro: PyArray_Chunk.PyObject_HEAD
+
+ Necessary for all Python objects. Included here so that the
+ :c:type:`PyArray_Chunk` structure matches that of the buffer object
+ (at least to the len member).
+
+ .. c:member:: PyObject *PyArray_Chunk.base
+
+ The Python object this chunk of memory comes from. Needed so that
+ memory can be accounted for properly.
+
+ .. c:member:: void *PyArray_Chunk.ptr
+
+ A pointer to the start of the single-segment chunk of memory.
+
+ .. c:member:: npy_intp PyArray_Chunk.len
+
+ The length of the segment in bytes.
+
+ .. c:member:: int PyArray_Chunk.flags
+
+ Any data flags (*e.g.* :c:data:`NPY_ARRAY_WRITEABLE` ) that should
+ be used to interpret the memory.
+
+
+PyArrayInterface
+----------------
+
+.. seealso:: :ref:`arrays.interface`
+
+.. c:type:: PyArrayInterface
+
+ The :c:type:`PyArrayInterface` structure is defined so that NumPy and
+ other extension modules can use the rapid array interface
+ protocol. The :obj:`__array_struct__` method of an object that
+ supports the rapid array interface protocol should return a
+ :c:type:`PyCObject` that contains a pointer to a :c:type:`PyArrayInterface`
+ structure with the relevant details of the array. After the new
+ array is created, the attribute should be ``DECREF``'d which will
+ free the :c:type:`PyArrayInterface` structure. Remember to ``INCREF`` the
+ object (whose :obj:`__array_struct__` attribute was retrieved) and
+ point the base member of the new :c:type:`PyArrayObject` to this same
+ object. In this way the memory for the array will be managed
+ correctly.
+
+ .. code-block:: c
+
+ typedef struct {
+ int two;
+ int nd;
+ char typekind;
+ int itemsize;
+ int flags;
+ npy_intp *shape;
+ npy_intp *strides;
+ void *data;
+ PyObject *descr;
+ } PyArrayInterface;
+
+ .. c:member:: int PyArrayInterface.two
+
+ the integer 2 as a sanity check.
+
+ .. c:member:: int PyArrayInterface.nd
+
+ the number of dimensions in the array.
+
+ .. c:member:: char PyArrayInterface.typekind
+
+ A character indicating what kind of array is present according to the
+ typestring convention with 't' -> bitfield, 'b' -> Boolean, 'i' ->
+ signed integer, 'u' -> unsigned integer, 'f' -> floating point, 'c' ->
+ complex floating point, 'O' -> object, 'S' -> (byte-)string, 'U' ->
+ unicode, 'V' -> void.
+
+ .. c:member:: int PyArrayInterface.itemsize
+
+ The number of bytes each item in the array requires.
+
+ .. c:member:: int PyArrayInterface.flags
+
+ Any of the bits :c:data:`NPY_ARRAY_C_CONTIGUOUS` (1),
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS` (2), :c:data:`NPY_ARRAY_ALIGNED` (0x100),
+ :c:data:`NPY_ARRAY_NOTSWAPPED` (0x200), or :c:data:`NPY_ARRAY_WRITEABLE`
+ (0x400) to indicate something about the data. The
+ :c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_C_CONTIGUOUS`, and
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS` flags can actually be determined from
+ the other parameters. The flag :c:data:`NPY_ARR_HAS_DESCR`
+ (0x800) can also be set to indicate to objects consuming the
+ version 3 array interface that the descr member of the
+ structure is present (it will be ignored by objects consuming
+ version 2 of the array interface).
+
+ .. c:member:: npy_intp *PyArrayInterface.shape
+
+ An array containing the size of the array in each dimension.
+
+ .. c:member:: npy_intp *PyArrayInterface.strides
+
+ An array containing the number of bytes to jump to get to the next
+ element in each dimension.
+
+ .. c:member:: void *PyArrayInterface.data
+
+ A pointer *to* the first element of the array.
+
+ .. c:member:: PyObject *PyArrayInterface.descr
+
+ A Python object describing the data-type in more detail (same
+ as the *descr* key in :obj:`__array_interface__`). This can be
+ ``NULL`` if *typekind* and *itemsize* provide enough
+ information. This field is also ignored unless
+ :c:data:`ARR_HAS_DESCR` flag is on in *flags*.
+
+
+Internally used structures
+--------------------------
+
+Internally, the code uses some additional Python objects primarily for
+memory management. These types are not accessible directly from
+Python, and are not exposed to the C-API. They are included here only
+for completeness and assistance in understanding the code.
+
+
+.. c:type:: PyUFuncLoopObject
+
+ A loose wrapper for a C-structure that contains the information
+ needed for looping. This is useful if you are trying to understand
+ the ufunc looping code. The :c:type:`PyUFuncLoopObject` is the associated
+ C-structure. It is defined in the ``ufuncobject.h`` header.
+
+.. c:type:: PyUFuncReduceObject
+
+ A loose wrapper for the C-structure that contains the information
+ needed for reduce-like methods of ufuncs. This is useful if you are
+ trying to understand the reduce, accumulate, and reduce-at
+ code. The :c:type:`PyUFuncReduceObject` is the associated C-structure. It
+ is defined in the ``ufuncobject.h`` header.
+
+.. c:type:: PyUFunc_Loop1d
+
+ A simple linked-list of C-structures containing the information needed
+ to define a 1-d loop for a ufunc for every defined signature of a
+ user-defined data-type.
+
+.. c:var:: PyArrayMapIter_Type
+
+ Advanced indexing is handled with this Python type. It is simply a
+ loose wrapper around the C-structure containing the variables
+ needed for advanced array indexing. The associated C-structure,
+ :c:type:`PyArrayMapIterObject`, is useful if you are trying to
+ understand the advanced-index mapping code. It is defined in the
+ ``arrayobject.h`` header. This type is not exposed to Python and
+ could be replaced with a C-structure. As a Python type it takes
+ advantage of reference- counted memory management.
--- /dev/null
+UFunc API
+=========
+
+.. sectionauthor:: Travis E. Oliphant
+
+.. index::
+ pair: ufunc; C-API
+
+
+Constants
+---------
+
+.. c:var:: UFUNC_ERR_{HANDLER}
+
+ ``{HANDLER}`` can be **IGNORE**, **WARN**, **RAISE**, or **CALL**
+
+.. c:var:: UFUNC_{THING}_{ERR}
+
+ ``{THING}`` can be **MASK**, **SHIFT**, or **FPE**, and ``{ERR}`` can
+ be **DIVIDEBYZERO**, **OVERFLOW**, **UNDERFLOW**, and **INVALID**.
+
+.. c:var:: PyUFunc_{VALUE}
+
+ .. c:var:: PyUFunc_One
+
+ .. c:var:: PyUFunc_Zero
+
+ .. c:var:: PyUFunc_MinusOne
+
+ .. c:var:: PyUFunc_ReorderableNone
+
+ .. c:var:: PyUFunc_None
+
+ .. c:var:: PyUFunc_IdentityValue
+
+
+Macros
+------
+
+.. c:macro:: NPY_LOOP_BEGIN_THREADS
+
+ Used in universal function code to only release the Python GIL if
+ loop->obj is not true (*i.e.* this is not an OBJECT array
+ loop). Requires use of :c:macro:`NPY_BEGIN_THREADS_DEF` in variable
+ declaration area.
+
+.. c:macro:: NPY_LOOP_END_THREADS
+
+ Used in universal function code to re-acquire the Python GIL if it
+ was released (because loop->obj was not true).
+
+
+Functions
+---------
+
+.. c:function:: PyObject* PyUFunc_FromFuncAndData( \
+ PyUFuncGenericFunction* func, void** data, char* types, int ntypes, \
+ int nin, int nout, int identity, char* name, char* doc, int unused)
+
+ Create a new broadcasting universal function from required variables.
+ Each ufunc builds around the notion of an element-by-element
+ operation. Each ufunc object contains pointers to 1-d loops
+ implementing the basic functionality for each supported type.
+
+ .. note::
+
+ The *func*, *data*, *types*, *name*, and *doc* arguments are not
+ copied by :c:func:`PyUFunc_FromFuncAndData`. The caller must ensure
+ that the memory used by these arrays is not freed as long as the
+ ufunc object is alive.
+
+ :param func:
+ Must to an array of length *ntypes* containing
+ :c:type:`PyUFuncGenericFunction` items. These items are pointers to
+ functions that actually implement the underlying
+ (element-by-element) function :math:`N` times with the following
+ signature:
+
+ .. c:function:: void loopfunc(
+ char** args, npy_intp* dimensions, npy_intp* steps, void* data)
+
+ *args*
+
+ An array of pointers to the actual data for the input and output
+ arrays. The input arguments are given first followed by the output
+ arguments.
+
+ *dimensions*
+
+ A pointer to the size of the dimension over which this function is
+ looping.
+
+ *steps*
+
+ A pointer to the number of bytes to jump to get to the
+ next element in this dimension for each of the input and
+ output arguments.
+
+ *data*
+
+ Arbitrary data (extra arguments, function names, *etc.* )
+ that can be stored with the ufunc and will be passed in
+ when it is called.
+
+ This is an example of a func specialized for addition of doubles
+ returning doubles.
+
+ .. code-block:: c
+
+ static void
+ double_add(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *extra)
+ {
+ npy_intp i;
+ npy_intp is1 = steps[0], is2 = steps[1];
+ npy_intp os = steps[2], n = dimensions[0];
+ char *i1 = args[0], *i2 = args[1], *op = args[2];
+ for (i = 0; i < n; i++) {
+ *((double *)op) = *((double *)i1) +
+ *((double *)i2);
+ i1 += is1;
+ i2 += is2;
+ op += os;
+ }
+ }
+
+ :param data:
+ Should be ``NULL`` or a pointer to an array of size *ntypes*
+ . This array may contain arbitrary extra-data to be passed to
+ the corresponding loop function in the func array.
+
+ :param types:
+ Length ``(nin + nout) * ntypes`` array of ``char`` encoding the
+ `numpy.dtype.num` (built-in only) that the corresponding
+ function in the ``func`` array accepts. For instance, for a comparison
+ ufunc with three ``ntypes``, two ``nin`` and one ``nout``, where the
+ first function accepts `numpy.int32` and the the second
+ `numpy.int64`, with both returning `numpy.bool_`, ``types`` would
+ be ``(char[]) {5, 5, 0, 7, 7, 0}`` since ``NPY_INT32`` is 5,
+ ``NPY_INT64`` is 7, and ``NPY_BOOL`` is 0.
+
+ The bit-width names can also be used (e.g. :c:data:`NPY_INT32`,
+ :c:data:`NPY_COMPLEX128` ) if desired.
+
+ :ref:`ufuncs.casting` will be used at runtime to find the first
+ ``func`` callable by the input/output provided.
+
+ :param ntypes:
+ How many different data-type-specific functions the ufunc has implemented.
+
+ :param nin:
+ The number of inputs to this operation.
+
+ :param nout:
+ The number of outputs
+
+ :param identity:
+
+ Either :c:data:`PyUFunc_One`, :c:data:`PyUFunc_Zero`,
+ :c:data:`PyUFunc_MinusOne`, or :c:data:`PyUFunc_None`.
+ This specifies what should be returned when
+ an empty array is passed to the reduce method of the ufunc.
+ The special value :c:data:`PyUFunc_IdentityValue` may only be used with
+ the :c:func:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity` method, to
+ allow an arbitrary python object to be used as the identity.
+
+ :param name:
+ The name for the ufunc as a ``NULL`` terminated string. Specifying
+ a name of 'add' or 'multiply' enables a special behavior for
+ integer-typed reductions when no dtype is given. If the input type is an
+ integer (or boolean) data type smaller than the size of the `numpy.int_`
+ data type, it will be internally upcast to the `numpy.int_` (or
+ `numpy.uint`) data type.
+
+ :param doc:
+ Allows passing in a documentation string to be stored with the
+ ufunc. The documentation string should not contain the name
+ of the function or the calling signature as that will be
+ dynamically determined from the object and available when
+ accessing the **__doc__** attribute of the ufunc.
+
+ :param unused:
+ Unused and present for backwards compatibility of the C-API.
+
+.. c:function:: PyObject* PyUFunc_FromFuncAndDataAndSignature( \
+ PyUFuncGenericFunction* func, void** data, char* types, int ntypes, \
+ int nin, int nout, int identity, char* name, char* doc, int unused, char *signature)
+
+ This function is very similar to PyUFunc_FromFuncAndData above, but has
+ an extra *signature* argument, to define a
+ :ref:`generalized universal functions <c-api.generalized-ufuncs>`.
+ Similarly to how ufuncs are built around an element-by-element operation,
+ gufuncs are around subarray-by-subarray operations, the
+ :ref:`signature <details-of-signature>` defining the subarrays to operate on.
+
+ :param signature:
+ The signature for the new gufunc. Setting it to NULL is equivalent
+ to calling PyUFunc_FromFuncAndData. A copy of the string is made,
+ so the passed in buffer can be freed.
+
+.. c:function:: PyObject* PyUFunc_FromFuncAndDataAndSignatureAndIdentity( \
+ PyUFuncGenericFunction *func, void **data, char *types, int ntypes, \
+ int nin, int nout, int identity, char *name, char *doc, int unused, \
+ char *signature, PyObject *identity_value)
+
+ This function is very similar to `PyUFunc_FromFuncAndDataAndSignature` above,
+ but has an extra *identity_value* argument, to define an arbitrary identity
+ for the ufunc when ``identity`` is passed as ``PyUFunc_IdentityValue``.
+
+ :param identity_value:
+ The identity for the new gufunc. Must be passed as ``NULL`` unless the
+ ``identity`` argument is ``PyUFunc_IdentityValue``. Setting it to NULL
+ is equivalent to calling PyUFunc_FromFuncAndDataAndSignature.
+
+
+.. c:function:: int PyUFunc_RegisterLoopForType( \
+ PyUFuncObject* ufunc, int usertype, PyUFuncGenericFunction function, \
+ int* arg_types, void* data)
+
+ This function allows the user to register a 1-d loop with an
+ already- created ufunc to be used whenever the ufunc is called
+ with any of its input arguments as the user-defined
+ data-type. This is needed in order to make ufuncs work with
+ built-in data-types. The data-type must have been previously
+ registered with the numpy system. The loop is passed in as
+ *function*. This loop can take arbitrary data which should be
+ passed in as *data*. The data-types the loop requires are passed
+ in as *arg_types* which must be a pointer to memory at least as
+ large as ufunc->nargs.
+
+.. c:function:: int PyUFunc_RegisterLoopForDescr( \
+ PyUFuncObject* ufunc, PyArray_Descr* userdtype, \
+ PyUFuncGenericFunction function, PyArray_Descr** arg_dtypes, void* data)
+
+ This function behaves like PyUFunc_RegisterLoopForType above, except
+ that it allows the user to register a 1-d loop using PyArray_Descr
+ objects instead of dtype type num values. This allows a 1-d loop to be
+ registered for structured array data-dtypes and custom data-types
+ instead of scalar data-types.
+
+.. c:function:: int PyUFunc_ReplaceLoopBySignature( \
+ PyUFuncObject* ufunc, PyUFuncGenericFunction newfunc, int* signature, \
+ PyUFuncGenericFunction* oldfunc)
+
+ Replace a 1-d loop matching the given *signature* in the
+ already-created *ufunc* with the new 1-d loop newfunc. Return the
+ old 1-d loop function in *oldfunc*. Return 0 on success and -1 on
+ failure. This function works only with built-in types (use
+ :c:func:`PyUFunc_RegisterLoopForType` for user-defined types). A
+ signature is an array of data-type numbers indicating the inputs
+ followed by the outputs assumed by the 1-d loop.
+
+.. c:function:: int PyUFunc_GenericFunction( \
+ PyUFuncObject* self, PyObject* args, PyObject* kwds, PyArrayObject** mps)
+
+ A generic ufunc call. The ufunc is passed in as *self*, the arguments
+ to the ufunc as *args* and *kwds*. The *mps* argument is an array of
+ :c:type:`PyArrayObject` pointers whose values are discarded and which
+ receive the converted input arguments as well as the ufunc outputs
+ when success is returned. The user is responsible for managing this
+ array and receives a new reference for each array in *mps*. The total
+ number of arrays in *mps* is given by *self* ->nin + *self* ->nout.
+
+ Returns 0 on success, -1 on error.
+
+.. c:function:: int PyUFunc_checkfperr(int errmask, PyObject* errobj)
+
+ A simple interface to the IEEE error-flag checking support. The
+ *errmask* argument is a mask of :c:data:`UFUNC_MASK_{ERR}` bitmasks
+ indicating which errors to check for (and how to check for
+ them). The *errobj* must be a Python tuple with two elements: a
+ string containing the name which will be used in any communication
+ of error and either a callable Python object (call-back function)
+ or :c:data:`Py_None`. The callable object will only be used if
+ :c:data:`UFUNC_ERR_CALL` is set as the desired error checking
+ method. This routine manages the GIL and is safe to call even
+ after releasing the GIL. If an error in the IEEE-compatible
+ hardware is determined a -1 is returned, otherwise a 0 is
+ returned.
+
+.. c:function:: void PyUFunc_clearfperr()
+
+ Clear the IEEE error flags.
+
+.. c:function:: void PyUFunc_GetPyValues( \
+ char* name, int* bufsize, int* errmask, PyObject** errobj)
+
+ Get the Python values used for ufunc processing from the
+ thread-local storage area unless the defaults have been set in
+ which case the name lookup is bypassed. The name is placed as a
+ string in the first element of *\*errobj*. The second element is
+ the looked-up function to call on error callback. The value of the
+ looked-up buffer-size to use is passed into *bufsize*, and the
+ value of the error mask is placed into *errmask*.
+
+
+Generic functions
+-----------------
+
+At the core of every ufunc is a collection of type-specific functions
+that defines the basic functionality for each of the supported types.
+These functions must evaluate the underlying function :math:`N\geq1`
+times. Extra-data may be passed in that may be used during the
+calculation. This feature allows some general functions to be used as
+these basic looping functions. The general function has all the code
+needed to point variables to the right place and set up a function
+call. The general function assumes that the actual function to call is
+passed in as the extra data and calls it with the correct values. All
+of these functions are suitable for placing directly in the array of
+functions stored in the functions member of the PyUFuncObject
+structure.
+
+.. c:function:: void PyUFunc_f_f_As_d_d( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_d_d( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_f_f( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_g_g( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_F_F_As_D_D( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_F_F( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_D_D( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_G_G( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_e_e( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_e_e_As_f_f( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_e_e_As_d_d( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+ Type specific, core 1-d functions for ufuncs where each
+ calculation is obtained by calling a function taking one input
+ argument and returning one output. This function is passed in
+ ``func``. The letters correspond to dtypechar's of the supported
+ data types ( ``e`` - half, ``f`` - float, ``d`` - double,
+ ``g`` - long double, ``F`` - cfloat, ``D`` - cdouble,
+ ``G`` - clongdouble). The argument *func* must support the same
+ signature. The _As_X_X variants assume ndarray's of one data type
+ but cast the values to use an underlying function that takes a
+ different data type. Thus, :c:func:`PyUFunc_f_f_As_d_d` uses
+ ndarrays of data type :c:data:`NPY_FLOAT` but calls out to a
+ C-function that takes double and returns double.
+
+.. c:function:: void PyUFunc_ff_f_As_dd_d( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_ff_f( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_dd_d( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_gg_g( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_FF_F_As_DD_D( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_DD_D( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_FF_F( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_GG_G( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_ee_e( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_ee_e_As_ff_f( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_ee_e_As_dd_d( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+ Type specific, core 1-d functions for ufuncs where each
+ calculation is obtained by calling a function taking two input
+ arguments and returning one output. The underlying function to
+ call is passed in as *func*. The letters correspond to
+ dtypechar's of the specific data type supported by the
+ general-purpose function. The argument ``func`` must support the
+ corresponding signature. The ``_As_XX_X`` variants assume ndarrays
+ of one data type but cast the values at each iteration of the loop
+ to use the underlying function that takes a different data type.
+
+.. c:function:: void PyUFunc_O_O( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+.. c:function:: void PyUFunc_OO_O( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+ One-input, one-output, and two-input, one-output core 1-d functions
+ for the :c:data:`NPY_OBJECT` data type. These functions handle reference
+ count issues and return early on error. The actual function to call is
+ *func* and it must accept calls with the signature ``(PyObject*)
+ (PyObject*)`` for :c:func:`PyUFunc_O_O` or ``(PyObject*)(PyObject *,
+ PyObject *)`` for :c:func:`PyUFunc_OO_O`.
+
+.. c:function:: void PyUFunc_O_O_method( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+ This general purpose 1-d core function assumes that *func* is a string
+ representing a method of the input object. For each
+ iteration of the loop, the Python object is extracted from the array
+ and its *func* method is called returning the result to the output array.
+
+.. c:function:: void PyUFunc_OO_O_method( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+ This general purpose 1-d core function assumes that *func* is a
+ string representing a method of the input object that takes one
+ argument. The first argument in *args* is the method whose function is
+ called, the second argument in *args* is the argument passed to the
+ function. The output of the function is stored in the third entry
+ of *args*.
+
+.. c:function:: void PyUFunc_On_Om( \
+ char** args, npy_intp* dimensions, npy_intp* steps, void* func)
+
+ This is the 1-d core function used by the dynamic ufuncs created
+ by umath.frompyfunc(function, nin, nout). In this case *func* is a
+ pointer to a :c:type:`PyUFunc_PyFuncData` structure which has definition
+
+ .. c:type:: PyUFunc_PyFuncData
+
+ .. code-block:: c
+
+ typedef struct {
+ int nin;
+ int nout;
+ PyObject *callable;
+ } PyUFunc_PyFuncData;
+
+ At each iteration of the loop, the *nin* input objects are extracted
+ from their object arrays and placed into an argument tuple, the Python
+ *callable* is called with the input arguments, and the nout
+ outputs are placed into their object arrays.
+
+
+Importing the API
+-----------------
+
+.. c:var:: PY_UFUNC_UNIQUE_SYMBOL
+
+.. c:var:: NO_IMPORT_UFUNC
+
+.. c:function:: void import_ufunc(void)
+
+ These are the constants and functions for accessing the ufunc
+ C-API from extension modules in precisely the same way as the
+ array C-API can be accessed. The ``import_ufunc`` () function must
+ always be called (in the initialization subroutine of the
+ extension module). If your extension module is in one file then
+ that is all that is required. The other two constants are useful
+ if your extension module makes use of multiple files. In that
+ case, define :c:data:`PY_UFUNC_UNIQUE_SYMBOL` to something unique to
+ your code and then in source files that do not contain the module
+ initialization function but still need access to the UFUNC API,
+ define :c:data:`PY_UFUNC_UNIQUE_SYMBOL` to the same name used previously
+ and also define :c:data:`NO_IMPORT_UFUNC`.
+
+ The C-API is actually an array of function pointers. This array is
+ created (and pointed to by a global variable) by import_ufunc. The
+ global variable is either statically defined or allowed to be seen
+ by other files depending on the state of
+ :c:data:`PY_UFUNC_UNIQUE_SYMBOL` and :c:data:`NO_IMPORT_UFUNC`.
+
+.. index::
+ pair: ufunc; C-API
Modules in :mod:`numpy.distutils`
=================================
+.. toctree::
+ :maxdepth: 2
-misc_util
----------
+ distutils/misc_util
-.. module:: numpy.distutils.misc_util
+
+.. currentmodule:: numpy.distutils
.. autosummary::
:toctree: generated/
- get_numpy_include_dirs
- dict_append
- appendpath
- allpath
- dot_join
- generate_config_py
- get_cmd
- terminal_has_colors
- red_text
- green_text
- yellow_text
- blue_text
- cyan_text
- cyg2win32
- all_strings
- has_f_sources
- has_cxx_sources
- filter_sources
- get_dependencies
- is_local_src_dir
- get_ext_source_files
- get_script_files
+ ccompiler
+ cpuinfo.cpu
+ core.Extension
+ exec_command
+ log.set_verbosity
+ system_info.get_info
+ system_info.get_standard_file
+
+
+Configuration class
+===================
+.. currentmodule:: numpy.distutils.misc_util
.. class:: Configuration(package_name=None, parent_name=None, top_path=None, package_path=None, **attrs)
.. automethod:: get_info
-Other modules
--------------
-
-.. currentmodule:: numpy.distutils
-
-.. autosummary::
- :toctree: generated/
-
- system_info.get_info
- system_info.get_standard_file
- cpuinfo.cpu
- log.set_verbosity
- exec_command
-
Building Installable C libraries
================================
--- /dev/null
+distutils.misc_util
+===================
+
+.. automodule:: numpy.distutils.misc_util
+ :members:
+ :undoc-members:
+ :exclude-members: Configuration
routines
distutils
distutils_guide
- c-api
+ c-api/index
internals
swig
Item selection and manipulation
-------------------------------
-For array methods that take an *axis* keyword, it defaults to `None`.
-If axis is *None*, then the array is treated as a 1-D array.
-Any other value for *axis* represents the dimension along which
+For array methods that take an ``axis`` keyword, it defaults to None.
+If axis is None, then the array is treated as a 1-D array.
+Any other value for ``axis`` represents the dimension along which
the operation should proceed.
.. autosummary::
To create a masked array where all values close to 1.e20 are invalid, we would
do::
- >>> z = masked_values([1.0, 1.e20, 3.0, 4.0], 1.e20)
+ >>> z = ma.masked_values([1.0, 1.e20, 3.0, 4.0], 1.e20)
For a complete discussion of creation methods for masked arrays please see
section :ref:`Constructing masked arrays <maskedarray.generic.constructing>`.
>>> x = np.array([1, 2, 3])
>>> x.view(ma.MaskedArray)
- masked_array(data = [1 2 3],
- mask = False,
- fill_value = 999999)
+ masked_array(data=[1, 2, 3],
+ mask=False,
+ fill_value=999999)
>>> x = np.array([(1, 1.), (2, 2.)], dtype=[('a',int), ('b', float)])
>>> x.view(ma.MaskedArray)
- masked_array(data = [(1, 1.0) (2, 2.0)],
- mask = [(False, False) (False, False)],
- fill_value = (999999, 1e+20),
- dtype = [('a', '<i4'), ('b', '<f8')])
+ masked_array(data=[(1, 1.0), (2, 2.0)],
+ mask=[(False, False), (False, False)],
+ fill_value=(999999, 1.e+20),
+ dtype=[('a', '<i8'), ('b', '<f8')])
* Yet another possibility is to use any of the following functions:
>>> x = ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]])
>>> x[~x.mask]
- masked_array(data = [1 4],
- mask = [False False],
- fill_value = 999999)
+ masked_array(data=[1, 4],
+ mask=[False, False],
+ fill_value=999999)
Another way to retrieve the valid data is to use the :meth:`compressed`
method, which returns a one-dimensional :class:`~numpy.ndarray` (or one of its
>>> x = ma.array([1, 2, 3])
>>> x[0] = ma.masked
>>> x
- masked_array(data = [-- 2 3],
- mask = [ True False False],
- fill_value = 999999)
+ masked_array(data=[--, 2, 3],
+ mask=[ True, False, False],
+ fill_value=999999)
>>> y = ma.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> y[(0, 1, 2), (1, 2, 0)] = ma.masked
>>> y
- masked_array(data =
- [[1 -- 3]
- [4 5 --]
- [-- 8 9]],
- mask =
- [[False True False]
- [False False True]
- [ True False False]],
- fill_value = 999999)
+ masked_array(
+ data=[[1, --, 3],
+ [4, 5, --],
+ [--, 8, 9]],
+ mask=[[False, True, False],
+ [False, False, True],
+ [ True, False, False]],
+ fill_value=999999)
>>> z = ma.array([1, 2, 3, 4])
>>> z[:-2] = ma.masked
>>> z
- masked_array(data = [-- -- 3 4],
- mask = [ True True False False],
- fill_value = 999999)
+ masked_array(data=[--, --, 3, 4],
+ mask=[ True, True, False, False],
+ fill_value=999999)
A second possibility is to modify the :attr:`~MaskedArray.mask` directly,
>>> x = ma.array([1, 2, 3], mask=[0, 0, 1])
>>> x.mask = True
>>> x
- masked_array(data = [-- -- --],
- mask = [ True True True],
- fill_value = 999999)
+ masked_array(data=[--, --, --],
+ mask=[ True, True, True],
+ fill_value=999999,
+ dtype=int64)
Finally, specific entries can be masked and/or unmasked by assigning to the
mask a sequence of booleans::
>>> x = ma.array([1, 2, 3])
>>> x.mask = [0, 1, 0]
>>> x
- masked_array(data = [1 -- 3],
- mask = [False True False],
- fill_value = 999999)
+ masked_array(data=[1, --, 3],
+ mask=[False, True, False],
+ fill_value=999999)
Unmasking an entry
~~~~~~~~~~~~~~~~~~
>>> x = ma.array([1, 2, 3], mask=[0, 0, 1])
>>> x
- masked_array(data = [1 2 --],
- mask = [False False True],
- fill_value = 999999)
+ masked_array(data=[1, 2, --],
+ mask=[False, False, True],
+ fill_value=999999)
>>> x[-1] = 5
>>> x
- masked_array(data = [1 2 5],
- mask = [False False False],
- fill_value = 999999)
+ masked_array(data=[1, 2, 5],
+ mask=[False, False, False],
+ fill_value=999999)
.. note::
Unmasking an entry by direct assignment will silently fail if the masked
>>> x = ma.array([1, 2, 3], mask=[0, 0, 1], hard_mask=True)
>>> x
- masked_array(data = [1 2 --],
- mask = [False False True],
- fill_value = 999999)
+ masked_array(data=[1, 2, --],
+ mask=[False, False, True],
+ fill_value=999999)
>>> x[-1] = 5
>>> x
- masked_array(data = [1 2 --],
- mask = [False False True],
- fill_value = 999999)
+ masked_array(data=[1, 2, --],
+ mask=[False, False, True],
+ fill_value=999999)
>>> x.soften_mask()
+ masked_array(data=[1, 2, --],
+ mask=[False, False, True],
+ fill_value=999999)
>>> x[-1] = 5
>>> x
- masked_array(data = [1 2 5],
- mask = [False False False],
- fill_value = 999999)
+ masked_array(data=[1, 2, 5],
+ mask=[False, False, False],
+ fill_value=999999)
>>> x.harden_mask()
+ masked_array(data=[1, 2, 5],
+ mask=[False, False, False],
+ fill_value=999999)
To unmask all masked entries of a masked array (provided the mask isn't a hard
>>> x = ma.array([1, 2, 3], mask=[0, 0, 1])
>>> x
- masked_array(data = [1 2 --],
- mask = [False False True],
- fill_value = 999999)
+ masked_array(data=[1, 2, --],
+ mask=[False, False, True],
+ fill_value=999999)
>>> x.mask = ma.nomask
>>> x
- masked_array(data = [1 2 3],
- mask = [False False False],
- fill_value = 999999)
-
+ masked_array(data=[1, 2, 3],
+ mask=[False, False, False],
+ fill_value=999999)
Indexing and slicing
>>> x[0]
1
>>> x[-1]
- masked_array(data = --,
- mask = True,
- fill_value = 1e+20)
+ masked
>>> x[-1] is ma.masked
True
>>> y[0]
(1, 2)
>>> y[-1]
- masked_array(data = (3, --),
- mask = (False, True),
- fill_value = (999999, 999999),
- dtype = [('a', '<i4'), ('b', '<i4')])
+ (3, --)
When accessing a slice, the output is a masked array whose
>>> x = ma.array([1, 2, 3, 4, 5], mask=[0, 1, 0, 0, 1])
>>> mx = x[:3]
>>> mx
- masked_array(data = [1 -- 3],
- mask = [False True False],
- fill_value = 999999)
+ masked_array(data=[1, --, 3],
+ mask=[False, True, False],
+ fill_value=999999)
>>> mx[1] = -1
>>> mx
- masked_array(data = [1 -1 3],
- mask = [False False False],
- fill_value = 999999)
+ masked_array(data=[1, -1, 3],
+ mask=[False, False, False],
+ fill_value=999999)
>>> x.mask
- array([False, True, False, False, True])
+ array([False, False, False, False, True])
>>> x.data
array([ 1, -1, 3, 4, 5])
-
Accessing a field of a masked array with structured datatype returns a
:class:`MaskedArray`.
constant whenever the input is masked or falls outside the validity domain::
>>> ma.log([-1, 0, 1, 2])
- masked_array(data = [-- -- 0.0 0.69314718056],
- mask = [ True True False False],
- fill_value = 1e+20)
+ masked_array(data=[--, --, 0.0, 0.6931471805599453],
+ mask=[ True, True, False, False],
+ fill_value=1e+20)
Masked arrays also support standard numpy ufuncs. The output is then a masked
array. The result of a unary ufunc is masked wherever the input is masked. The
>>> x = ma.array([-1, 1, 0, 2, 3], mask=[0, 0, 0, 0, 1])
>>> np.log(x)
- masked_array(data = [-- -- 0.0 0.69314718056 --],
- mask = [ True True False False True],
- fill_value = 1e+20)
-
+ masked_array(data=[--, 0.0, --, 0.6931471805599453, --],
+ mask=[ True, False, True, False, True],
+ fill_value=1e+20)
Examples
>>> import numpy.ma as ma
>>> x = [0.,1.,-9999.,3.,4.]
>>> mx = ma.masked_values (x, -9999.)
- >>> print mx.mean()
+ >>> print(mx.mean())
2.0
- >>> print mx - mx.mean()
+ >>> print(mx - mx.mean())
[-2.0 -1.0 -- 1.0 2.0]
- >>> print mx.anom()
+ >>> print(mx.anom())
[-2.0 -1.0 -- 1.0 2.0]
Suppose now that we wish to print that same data, but with the missing values
replaced by the average value.
- >>> print mx.filled(mx.mean())
+ >>> print(mx.filled(mx.mean()))
[ 0. 1. 2. 3. 4.]
Numerical operations can be easily performed without worrying about missing
values, dividing by zero, square roots of negative numbers, etc.::
- >>> import numpy as np, numpy.ma as ma
+ >>> import numpy.ma as ma
>>> x = ma.array([1., -1., 3., 4., 5., 6.], mask=[0,0,0,0,1,0])
>>> y = ma.array([1., 2., 0., 4., 5., 6.], mask=[0,0,0,0,0,1])
- >>> print np.sqrt(x/y)
+ >>> print(ma.sqrt(x/y))
[1.0 -- -- 1.0 -- --]
Four values of the output are invalid: the first one comes from taking the
Ignoring extreme values
-----------------------
-Let's consider an array ``d`` of random floats between 0 and 1. We wish to
+Let's consider an array ``d`` of floats between 0 and 1. We wish to
compute the average of the values of ``d`` while ignoring any data outside
-the range ``[0.1, 0.9]``::
+the range ``[0.2, 0.9]``::
- >>> print ma.masked_outside(d, 0.1, 0.9).mean()
+ >>> d = np.linspace(0, 1, 20)
+ >>> print(d.mean() - ma.masked_outside(d, 0.2, 0.9).mean())
+ -0.05263157894736836
+++ /dev/null
-:orphan:
-
-BitGenerator
-------------
-
-.. currentmodule:: numpy.random.bit_generator
-
-.. autosummary::
- :toctree: generated/
-
- BitGenerator
-.. _bit_generator:
-
.. currentmodule:: numpy.random
Bit Generators
and can be advanced by an arbitrary amount. See the documentation for
:meth:`~.PCG64.advance`. PCG-64 has a period of :math:`2^{128}`. See the `PCG
author's page`_ for more details about this class of PRNG.
-* MT19937 - The standard Python BitGenerator. Adds a `~mt19937.MT19937.jumped`
+* MT19937 - The standard Python BitGenerator. Adds a `MT19937.jumped`
function that returns a new generator with state as-if :math:`2^{128}` draws have
been made.
* Philox - A counter-based generator capable of being advanced an
.. _`Random123`: https://www.deshawresearch.com/resources_random123.html
.. _`SFC author's page`: http://pracrand.sourceforge.net/RNG_engines.txt
+.. autosummary::
+ :toctree: generated/
+
+ BitGenerator
+
.. toctree::
- :maxdepth: 1
+ :maxdepth: 1
- BitGenerator <bitgenerators>
- MT19937 <mt19937>
- PCG64 <pcg64>
- Philox <philox>
- SFC64 <sfc64>
+ MT19937 <mt19937>
+ PCG64 <pcg64>
+ Philox <philox>
+ SFC64 <sfc64>
Seeding and Entropy
-------------------
non-negative integer, or a list of such integers, as a seed. BitGenerators
need to take those inputs and process them into a high-quality internal state
for the BitGenerator. All of the BitGenerators in numpy delegate that task to
-`~SeedSequence`, which uses hashing techniques to ensure that even low-quality
+`SeedSequence`, which uses hashing techniques to ensure that even low-quality
seeds generate high-quality initial states.
.. code-block:: python
- from numpy.random import PCG64
+ from numpy.random import PCG64
- bg = PCG64(12345678903141592653589793)
+ bg = PCG64(12345678903141592653589793)
.. end_block
.. code-block:: python
- from numpy.random import PCG64, SeedSequence
+ from numpy.random import PCG64, SeedSequence
- # Get the user's seed somehow, maybe through `argparse`.
- # If the user did not provide a seed, it should return `None`.
- seed = get_user_seed()
- ss = SeedSequence(seed)
- print('seed = {}'.format(ss.entropy))
- bg = PCG64(ss)
+ # Get the user's seed somehow, maybe through `argparse`.
+ # If the user did not provide a seed, it should return `None`.
+ seed = get_user_seed()
+ ss = SeedSequence(seed)
+ print('seed = {}'.format(ss.entropy))
+ bg = PCG64(ss)
.. end_block
convenient ways.
.. autosummary::
- :toctree: generated/
+ :toctree: generated/
SeedSequence
- bit_generator.ISeedSequence
- bit_generator.ISpawnableSeedSequence
- bit_generator.SeedlessSeedSequence
-Parallel Congruent Generator (64-bit, PCG64)
---------------------------------------------
+Permuted Congruential Generator (64-bit, PCG64)
+-----------------------------------------------
.. currentmodule:: numpy.random
--- /dev/null
+Cython API for random
+---------------------
+
+.. currentmodule:: numpy.random
+
+Typed versions of many of the `Generator` and `BitGenerator` methods as well as
+the classes themselves can be accessed directly from Cython via
+
+.. code-block:: cython
+
+ cimport numpy.random
+
+C API for random
+----------------
+
+Access to various distributions is available via Cython or C-wrapper libraries
+like CFFI. All the functions accept a :c:type:`bitgen_t` as their first argument.
+
+.. c:type:: bitgen_t
+
+ The :c:type:`bitgen_t` holds the current state of the BitGenerator and
+ pointers to functions that return standard C types while advancing the
+ state.
+
+ .. code-block:: c
+
+ struct bitgen:
+ void *state
+ npy_uint64 (*next_uint64)(void *st) nogil
+ uint32_t (*next_uint32)(void *st) nogil
+ double (*next_double)(void *st) nogil
+ npy_uint64 (*next_raw)(void *st) nogil
+
+ ctypedef bitgen bitgen_t
+
+See :doc:`extending` for examples of using these functions.
+
+The functions are named with the following conventions:
+
+- "standard" refers to the reference values for any parameters. For instance
+ "standard_uniform" means a uniform distribution on the interval ``0.0`` to
+ ``1.0``
+
+- "fill" functions will fill the provided ``out`` with ``cnt`` values.
+
+- The functions without "standard" in their name require additional parameters
+ to describe the distributions.
+
+- ``zig`` in the name are based on a ziggurat lookup algorithm is used instead
+ of calculating the ``log``, which is significantly faster. The non-ziggurat
+ variants are used in corner cases and for legacy compatibility.
+
+
+.. c:function:: double random_standard_uniform(bitgen_t *bitgen_state)
+
+.. c:function:: void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out)
+
+.. c:function:: double random_standard_exponential(bitgen_t *bitgen_state)
+
+.. c:function:: void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out)
+
+.. c:function:: double random_standard_normal(bitgen_t* bitgen_state)
+
+.. c:function:: void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out)
+
+.. c:function:: void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out)
+
+.. c:function:: double random_standard_gamma(bitgen_t *bitgen_state, double shape)
+
+.. c:function:: float random_standard_uniform_f(bitgen_t *bitgen_state)
+
+.. c:function:: void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out)
+
+.. c:function:: float random_standard_exponential_f(bitgen_t *bitgen_state)
+
+.. c:function:: void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out)
+
+.. c:function:: float random_standard_normal_f(bitgen_t* bitgen_state)
+
+.. c:function:: float random_standard_gamma_f(bitgen_t *bitgen_state, float shape)
+
+.. c:function:: double random_normal(bitgen_t *bitgen_state, double loc, double scale)
+
+.. c:function:: double random_gamma(bitgen_t *bitgen_state, double shape, double scale)
+
+.. c:function:: float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale)
+
+.. c:function:: double random_exponential(bitgen_t *bitgen_state, double scale)
+
+.. c:function:: double random_uniform(bitgen_t *bitgen_state, double lower, double range)
+
+.. c:function:: double random_beta(bitgen_t *bitgen_state, double a, double b)
+
+.. c:function:: double random_chisquare(bitgen_t *bitgen_state, double df)
+
+.. c:function:: double random_f(bitgen_t *bitgen_state, double dfnum, double dfden)
+
+.. c:function:: double random_standard_cauchy(bitgen_t *bitgen_state)
+
+.. c:function:: double random_pareto(bitgen_t *bitgen_state, double a)
+
+.. c:function:: double random_weibull(bitgen_t *bitgen_state, double a)
+
+.. c:function:: double random_power(bitgen_t *bitgen_state, double a)
+
+.. c:function:: double random_laplace(bitgen_t *bitgen_state, double loc, double scale)
+
+.. c:function:: double random_gumbel(bitgen_t *bitgen_state, double loc, double scale)
+
+.. c:function:: double random_logistic(bitgen_t *bitgen_state, double loc, double scale)
+
+.. c:function:: double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma)
+
+.. c:function:: double random_rayleigh(bitgen_t *bitgen_state, double mode)
+
+.. c:function:: double random_standard_t(bitgen_t *bitgen_state, double df)
+
+.. c:function:: double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, double nonc)
+.. c:function:: double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, double dfden, double nonc)
+.. c:function:: double random_wald(bitgen_t *bitgen_state, double mean, double scale)
+
+.. c:function:: double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa)
+
+.. c:function:: double random_triangular(bitgen_t *bitgen_state, double left, double mode, double right)
+
+.. c:function:: npy_int64 random_poisson(bitgen_t *bitgen_state, double lam)
+
+.. c:function:: npy_int64 random_negative_binomial(bitgen_t *bitgen_state, double n, double p)
+
+.. c:type:: binomial_t
+
+ .. code-block:: c
+
+ typedef struct s_binomial_t {
+ int has_binomial; /* !=0: following parameters initialized for binomial */
+ double psave;
+ RAND_INT_TYPE nsave;
+ double r;
+ double q;
+ double fm;
+ RAND_INT_TYPE m;
+ double p1;
+ double xm;
+ double xl;
+ double xr;
+ double c;
+ double laml;
+ double lamr;
+ double p2;
+ double p3;
+ double p4;
+ } binomial_t;
+
+
+.. c:function:: npy_int64 random_binomial(bitgen_t *bitgen_state, double p, npy_int64 n, binomial_t *binomial)
+
+.. c:function:: npy_int64 random_logseries(bitgen_t *bitgen_state, double p)
+
+.. c:function:: npy_int64 random_geometric_search(bitgen_t *bitgen_state, double p)
+
+.. c:function:: npy_int64 random_geometric_inversion(bitgen_t *bitgen_state, double p)
+
+.. c:function:: npy_int64 random_geometric(bitgen_t *bitgen_state, double p)
+
+.. c:function:: npy_int64 random_zipf(bitgen_t *bitgen_state, double a)
+
+.. c:function:: npy_int64 random_hypergeometric(bitgen_t *bitgen_state, npy_int64 good, npy_int64 bad, npy_int64 sample)
+
+.. c:function:: npy_uint64 random_interval(bitgen_t *bitgen_state, npy_uint64 max)
+
+.. c:function:: void random_multinomial(bitgen_t *bitgen_state, npy_int64 n, npy_int64 *mnix, double *pix, npy_intp d, binomial_t *binomial)
+
+.. c:function:: int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, npy_int64 total, size_t num_colors, npy_int64 *colors, npy_int64 nsample, size_t num_variates, npy_int64 *variates)
+
+.. c:function:: void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, npy_int64 total, size_t num_colors, npy_int64 *colors, npy_int64 nsample, size_t num_variates, npy_int64 *variates)
+
+Generate a single integer
+
+.. c:function:: npy_int64 random_positive_int64(bitgen_t *bitgen_state)
+
+.. c:function:: npy_int32 random_positive_int32(bitgen_t *bitgen_state)
+
+.. c:function:: npy_int64 random_positive_int(bitgen_t *bitgen_state)
+
+.. c:function:: npy_uint64 random_uint(bitgen_t *bitgen_state)
+
+
+Generate random uint64 numbers in closed interval [off, off + rng].
+
+.. c:function:: npy_uint64 random_bounded_uint64(bitgen_t *bitgen_state, npy_uint64 off, npy_uint64 rng, npy_uint64 mask, bint use_masked)
+
+
--- /dev/null
+Extending via CFFI
+------------------
+
+.. literalinclude:: ../../../../../numpy/random/_examples/cffi/extending.py
+ :language: python
--- /dev/null
+extending.pyx
+-------------
+
+.. include:: ../../../../../../numpy/random/examples/extending.pyx
--- /dev/null
+extending.pyx
+-------------
+
+.. literalinclude:: ../../../../../../numpy/random/_examples/cython/extending.pyx
+ :language: cython
--- /dev/null
+extending_distributions.pyx
+---------------------------
+
+.. literalinclude:: ../../../../../../numpy/random/_examples/cython/extending_distributions.pyx
+ :language: cython
--- /dev/null
+
+.. _extending_cython_example:
+
+Extending `numpy.random` via Cython
+-----------------------------------
+
+
+.. toctree::
+ setup.py.rst
+ extending.pyx
+ extending_distributions.pyx
--- /dev/null
+setup.py
+--------
+
+.. literalinclude:: ../../../../../../numpy/random/_examples/cython/setup.py
+ :language: python
--- /dev/null
+Extending via Numba
+-------------------
+
+.. literalinclude:: ../../../../../numpy/random/_examples/numba/extending.py
+ :language: python
--- /dev/null
+Extending via Numba and CFFI
+----------------------------
+
+.. literalinclude:: ../../../../../numpy/random/_examples/numba/extending_distributions.py
+ :language: python
--- /dev/null
+.. currentmodule:: numpy.random
+
+.. _extending:
+
+Extending
+---------
+The BitGenerators have been designed to be extendable using standard tools for
+high-performance Python -- numba and Cython. The `~Generator` object can also
+be used with user-provided BitGenerators as long as these export a small set of
+required functions.
+
+Numba
+=====
+Numba can be used with either CTypes or CFFI. The current iteration of the
+BitGenerators all export a small set of functions through both interfaces.
+
+This example shows how numba can be used to produce gaussian samples using
+a pure Python implementation which is then compiled. The random numbers are
+provided by ``ctypes.next_double``.
+
+.. literalinclude:: ../../../../numpy/random/_examples/numba/extending.py
+ :language: python
+ :end-before: example 2
+
+Both CTypes and CFFI allow the more complicated distributions to be used
+directly in Numba after compiling the file distributions.c into a ``DLL`` or
+``so``. An example showing the use of a more complicated distribution is in
+the `examples` section below.
+
+.. _random_cython:
+
+Cython
+======
+
+Cython can be used to unpack the ``PyCapsule`` provided by a BitGenerator.
+This example uses `PCG64` and the example from above. The usual caveats
+for writing high-performance code using Cython -- removing bounds checks and
+wrap around, providing array alignment information -- still apply.
+
+.. literalinclude:: ../../../../numpy/random/_examples/cython/extending_distributions.pyx
+ :language: cython
+ :end-before: example 2
+
+The BitGenerator can also be directly accessed using the members of the basic
+RNG structure.
+
+.. literalinclude:: ../../../../numpy/random/_examples/cython/extending_distributions.pyx
+ :language: cython
+ :start-after: example 2
+
+See :ref:`extending_cython_example` for a complete working example including a
+minimal setup and cython files.
+
+CFFI
+====
+
+CFFI can be used to directly access the functions in
+``include/numpy/random/distributions.h``. Some "massaging" of the header
+file is required:
+
+.. literalinclude:: ../../../../numpy/random/_examples/cffi/extending.py
+ :language: python
+ :end-before: dlopen
+
+Once the header is parsed by ``ffi.cdef``, the functions can be accessed
+directly from the ``_generator`` shared object, using the `BitGenerator.cffi` interface.
+
+.. literalinclude:: ../../../../numpy/random/_examples/cffi/extending.py
+ :language: python
+ :start-after: dlopen
+
+
+New Basic RNGs
+==============
+`~Generator` can be used with other user-provided BitGenerators. The simplest
+way to write a new BitGenerator is to examine the pyx file of one of the
+existing BitGenerators. The key structure that must be provided is the
+``capsule`` which contains a ``PyCapsule`` to a struct pointer of type
+``bitgen_t``,
+
+.. code-block:: c
+
+ typedef struct bitgen {
+ void *state;
+ uint64_t (*next_uint64)(void *st);
+ uint32_t (*next_uint32)(void *st);
+ double (*next_double)(void *st);
+ uint64_t (*next_raw)(void *st);
+ } bitgen_t;
+
+which provides 5 pointers. The first is an opaque pointer to the data structure
+used by the BitGenerators. The next three are function pointers which return
+the next 64- and 32-bit unsigned integers, the next random double and the next
+raw value. This final function is used for testing and so can be set to
+the next 64-bit unsigned integer function if not needed. Functions inside
+``Generator`` use this structure as in
+
+.. code-block:: c
+
+ bitgen_state->next_uint64(bitgen_state->state)
+
+Examples
+========
+
+.. toctree::
+ Numba <examples/numba>
+ CFFI + Numba <examples/numba_cffi>
+ Cython <examples/cython/index>
+ CFFI <examples/cffi>
.. autosummary::
:toctree: generated/
- ~Generator.bit_generator
+ ~numpy.random.Generator.bit_generator
Simple random data
==================
.. autosummary::
:toctree: generated/
- ~Generator.integers
- ~Generator.random
- ~Generator.choice
- ~Generator.bytes
+ ~numpy.random.Generator.integers
+ ~numpy.random.Generator.random
+ ~numpy.random.Generator.choice
+ ~numpy.random.Generator.bytes
Permutations
============
.. autosummary::
:toctree: generated/
- ~Generator.shuffle
- ~Generator.permutation
+ ~numpy.random.Generator.shuffle
+ ~numpy.random.Generator.permutation
Distributions
=============
.. autosummary::
:toctree: generated/
- ~Generator.beta
- ~Generator.binomial
- ~Generator.chisquare
- ~Generator.dirichlet
- ~Generator.exponential
- ~Generator.f
- ~Generator.gamma
- ~Generator.geometric
- ~Generator.gumbel
- ~Generator.hypergeometric
- ~Generator.laplace
- ~Generator.logistic
- ~Generator.lognormal
- ~Generator.logseries
- ~Generator.multinomial
- ~Generator.multivariate_normal
- ~Generator.negative_binomial
- ~Generator.noncentral_chisquare
- ~Generator.noncentral_f
- ~Generator.normal
- ~Generator.pareto
- ~Generator.poisson
- ~Generator.power
- ~Generator.rayleigh
- ~Generator.standard_cauchy
- ~Generator.standard_exponential
- ~Generator.standard_gamma
- ~Generator.standard_normal
- ~Generator.standard_t
- ~Generator.triangular
- ~Generator.uniform
- ~Generator.vonmises
- ~Generator.wald
- ~Generator.weibull
- ~Generator.zipf
+ ~numpy.random.Generator.beta
+ ~numpy.random.Generator.binomial
+ ~numpy.random.Generator.chisquare
+ ~numpy.random.Generator.dirichlet
+ ~numpy.random.Generator.exponential
+ ~numpy.random.Generator.f
+ ~numpy.random.Generator.gamma
+ ~numpy.random.Generator.geometric
+ ~numpy.random.Generator.gumbel
+ ~numpy.random.Generator.hypergeometric
+ ~numpy.random.Generator.laplace
+ ~numpy.random.Generator.logistic
+ ~numpy.random.Generator.lognormal
+ ~numpy.random.Generator.logseries
+ ~numpy.random.Generator.multinomial
+ ~numpy.random.Generator.multivariate_hypergeometric
+ ~numpy.random.Generator.multivariate_normal
+ ~numpy.random.Generator.negative_binomial
+ ~numpy.random.Generator.noncentral_chisquare
+ ~numpy.random.Generator.noncentral_f
+ ~numpy.random.Generator.normal
+ ~numpy.random.Generator.pareto
+ ~numpy.random.Generator.poisson
+ ~numpy.random.Generator.power
+ ~numpy.random.Generator.rayleigh
+ ~numpy.random.Generator.standard_cauchy
+ ~numpy.random.Generator.standard_exponential
+ ~numpy.random.Generator.standard_gamma
+ ~numpy.random.Generator.standard_normal
+ ~numpy.random.Generator.standard_t
+ ~numpy.random.Generator.triangular
+ ~numpy.random.Generator.uniform
+ ~numpy.random.Generator.vonmises
+ ~numpy.random.Generator.wald
+ ~numpy.random.Generator.weibull
+ ~numpy.random.Generator.zipf
number of different BitGenerators. It exposes many different probability
distributions. See `NEP 19 <https://www.numpy.org/neps/
nep-0019-rng-policy.html>`_ for context on the updated random Numpy number
-routines. The legacy `.RandomState` random number routines are still
+routines. The legacy `RandomState` random number routines are still
available, but limited to a single BitGenerator.
-For convenience and backward compatibility, a single `~.RandomState`
+For convenience and backward compatibility, a single `RandomState`
instance's methods are imported into the numpy.random namespace, see
:ref:`legacy` for the complete list.
+.. _random-quick-start:
+
Quick Start
-----------
-By default, `~Generator` uses bits provided by `~pcg64.PCG64` which
-has better statistical properties than the legacy mt19937 random
-number generator in `~.RandomState`.
+Call `default_rng` to get a new instance of a `Generator`, then call its
+methods to obtain samples from different distributions. By default,
+`Generator` uses bits provided by `PCG64` which has better statistical
+properties than the legacy `MT19937` used in `RandomState`.
.. code-block:: python
- # Uses the old numpy.random.RandomState
+ # Do this
+ from numpy.random import default_rng
+ rng = default_rng()
+ vals = rng.standard_normal(10)
+ more_vals = rng.standard_normal(10)
+
+ # instead of this
from numpy import random
- random.standard_normal()
+ vals = random.standard_normal(10)
+ more_vals = random.standard_normal(10)
-`~Generator` can be used as a replacement for `~.RandomState`. Both class
-instances now hold a internal `BitGenerator` instance to provide the bit
+`Generator` can be used as a replacement for `RandomState`. Both class
+instances hold a internal `BitGenerator` instance to provide the bit
stream, it is accessible as ``gen.bit_generator``. Some long-overdue API
cleanup means that legacy and compatibility methods have been removed from
-`~.Generator`
+`Generator`
=================== ============== ============
-`~.RandomState` `~.Generator` Notes
+`RandomState` `Generator` Notes
------------------- -------------- ------------
``random_sample``, ``random`` Compatible with `random.random`
``rand``
``randint``, ``integers`` Add an ``endpoint`` kwarg
``random_integers``
------------------- -------------- ------------
-``tomaxint`` removed Use ``integers(0, np.iinfo(np.int).max,``
- ``endpoint=False)``
+``tomaxint`` removed Use ``integers(0, np.iinfo(np.int_).max,``
+ ``endpoint=False)``
------------------- -------------- ------------
-``seed`` removed Use `~.SeedSequence.spawn`
+``seed`` removed Use `SeedSequence.spawn`
=================== ============== ============
-See `new-or-different` for more information
-
-.. code-block:: python
-
- # As replacement for RandomState(); default_rng() instantiates Generator with
- # the default PCG64 BitGenerator.
- from numpy.random import default_rng
- rg = default_rng()
- rg.standard_normal()
- rg.bit_generator
+See :ref:`new-or-different` for more information
Something like the following code can be used to support both ``RandomState``
and ``Generator``, with the understanding that the interfaces are slightly
a = rg_integers(1000)
Seeds can be passed to any of the BitGenerators. The provided value is mixed
-via `~.SeedSequence` to spread a possible sequence of seeds across a wider
-range of initialization states for the BitGenerator. Here `~.PCG64` is used and
-is wrapped with a `~.Generator`.
+via `SeedSequence` to spread a possible sequence of seeds across a wider
+range of initialization states for the BitGenerator. Here `PCG64` is used and
+is wrapped with a `Generator`.
.. code-block:: python
Introduction
------------
The new infrastructure takes a different approach to producing random numbers
-from the `~.RandomState` object. Random number generation is separated into
+from the `RandomState` object. Random number generation is separated into
two components, a bit generator and a random generator.
The `BitGenerator` has a limited set of responsibilities. It manages state
alternative bit generators to be used with little code duplication.
The `Generator` is the user-facing object that is nearly identical to
-`.RandomState`. The canonical method to initialize a generator passes a
-`~.PCG64` bit generator as the sole argument.
+`RandomState`. The canonical method to initialize a generator passes a
+`PCG64` bit generator as the sole argument.
.. code-block:: python
rg.random()
One can also instantiate `Generator` directly with a `BitGenerator` instance.
-To use the older `~mt19937.MT19937` algorithm, one can instantiate it directly
+To use the older `MT19937` algorithm, one can instantiate it directly
and pass it to `Generator`.
.. code-block:: python
The Box-Muller method used to produce NumPy's normals is no longer available
in `Generator`. It is not possible to reproduce the exact random
values using Generator for the normal distribution or any other
- distribution that relies on the normal such as the `.RandomState.gamma` or
- `.RandomState.standard_t`. If you require bitwise backward compatible
- streams, use `.RandomState`.
+ distribution that relies on the normal such as the `RandomState.gamma` or
+ `RandomState.standard_t`. If you require bitwise backward compatible
+ streams, use `RandomState`.
* The Generator's normal, exponential and gamma functions use 256-step Ziggurat
methods which are 2-10 times faster than NumPy's Box-Muller or inverse CDF
* Optional ``out`` argument that allows existing arrays to be filled for
select distributions
* All BitGenerators can produce doubles, uint64s and uint32s via CTypes
-* `~.Generator.integers` is now the canonical way to generate integer
+ (`PCG64.ctypes`) and CFFI (`PCG64.cffi`). This allows the bit generators
+ to be used in numba.
+* The bit generators can be used in downstream projects via
+ :ref:`Cython <random_cython>`.
+* `Generator.integers` is now the canonical way to generate integer
random numbers from a discrete uniform distribution. The ``rand`` and
- ``randn`` methods are only available through the legacy `~.RandomState`.
+ ``randn`` methods are only available through the legacy `RandomState`.
The ``endpoint`` keyword can be used to specify open or closed intervals.
This replaces both ``randint`` and the deprecated ``random_integers``.
-* `~.Generator.random` is now the canonical way to generate floating-point
- random numbers, which replaces `.RandomState.random_sample`,
- `.RandomState.sample`, and `.RandomState.ranf`. This is consistent with
+* `Generator.random` is now the canonical way to generate floating-point
+ random numbers, which replaces `RandomState.random_sample`,
+ `RandomState.sample`, and `RandomState.ranf`. This is consistent with
Python's `random.random`.
-* All BitGenerators in numpy use `~SeedSequence` to convert seeds into
+* All BitGenerators in numpy use `SeedSequence` to convert seeds into
initialized states.
See :ref:`new-or-different` for a complete list of improvements and
Multithreaded Generation <multithreading>
new-or-different
Comparing Performance <performance>
+ c-api
+ Examples of using Numba, Cython, CFFI <extending>
-Original Source
-~~~~~~~~~~~~~~~
+Original Source of the Generator and BitGenerators
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This package was developed independently of NumPy and was integrated in version
1.17.0. The original repo is at https://github.com/bashtage/randomgen.
+
~RandomState.wald
~RandomState.weibull
~RandomState.zipf
+
+Functions in `numpy.random`
+===========================
+Many of the RandomState methods above are exported as functions in
+`numpy.random` This usage is discouraged, as it is implemented via a gloabl
+`RandomState` instance which is not advised on two counts:
+
+- It uses global state, which means results will change as the code changes
+
+- It uses a `RandomState` rather than the more modern `Generator`.
+
+For backward compatible legacy reasons, we cannot change this. See
+`random-quick-start`.
+
+.. autosummary::
+ :toctree: generated/
+
+ beta
+ binomial
+ bytes
+ chisquare
+ choice
+ dirichlet
+ exponential
+ f
+ gamma
+ geometric
+ get_state
+ gumbel
+ hypergeometric
+ laplace
+ logistic
+ lognormal
+ logseries
+ multinomial
+ multivariate_normal
+ negative_binomial
+ noncentral_chisquare
+ noncentral_f
+ normal
+ pareto
+ permutation
+ poisson
+ power
+ rand
+ randint
+ randn
+ random
+ random_integers
+ random_sample
+ ranf
+ rayleigh
+ sample
+ seed
+ set_state
+ shuffle
+ standard_cauchy
+ standard_exponential
+ standard_gamma
+ standard_normal
+ standard_t
+ triangular
+ uniform
+ vonmises
+ wald
+ weibull
+ zipf
+
self.n = n
self.executor = concurrent.futures.ThreadPoolExecutor(threads)
self.values = np.empty(n)
- self.step = np.ceil(n / threads).astype(np.int)
+ self.step = np.ceil(n / threads).astype(np.int_)
def fill(self):
def _fill(random_state, out, first, last):
The Box-Muller method used to produce NumPy's normals is no longer available
in `Generator`. It is not possible to reproduce the exact random
values using ``Generator`` for the normal distribution or any other
- distribution that relies on the normal such as the `gamma` or
- `standard_t`. If you require bitwise backward compatible
- streams, use `RandomState`.
+ distribution that relies on the normal such as the `Generator.gamma` or
+ `Generator.standard_t`. If you require bitwise backward compatible
+ streams, use `RandomState`, i.e., `RandomState.gamma` or
+ `RandomState.standard_t`.
Quick comparison of legacy `mtrand <legacy>`_ to the new `Generator`
Feature Older Equivalent Notes
------------------ -------------------- -------------
`~.Generator` `~.RandomState` ``Generator`` requires a stream
- source, called a `BitGenerator
- <bit_generators>` A number of these
- are provided. ``RandomState`` uses
+ source, called a `BitGenerator`
+ A number of these are provided.
+ ``RandomState`` uses
the Mersenne Twister `~.MT19937` by
default, but can also be instantiated
with any BitGenerator.
``randn`` methods are only available through the legacy `~.RandomState`.
This replaces both ``randint`` and the deprecated ``random_integers``.
* The Box-Muller method used to produce NumPy's normals is no longer available.
-* All bit generators can produce doubles, uint64s and uint32s via CTypes
- (`~PCG64.ctypes`) and CFFI (`~PCG64.cffi`). This allows these bit generators
- to be used in numba.
+* All bit generators can produce doubles, uint64s and
+ uint32s via CTypes (`~PCG64.ctypes`) and CFFI (`~PCG64.cffi`).
+ This allows these bit generators to be used in numba.
* The bit generators can be used in downstream projects via
Cython.
+
.. ipython:: python
from numpy.random import Generator, PCG64
are turned into high quality initial states (at least, with very high
probability).
-For example, `~mt19937.MT19937` has a state consisting of 624
+For example, `MT19937` has a state consisting of 624
`uint32` integers. A naive way to take a 32-bit integer seed would be to just set
the last element of the state to the 32-bit seed and leave the rest 0s. This is
-a valid state for `~mt19937.MT19937`, but not a good one. The Mersenne Twister
+a valid state for `MT19937`, but not a good one. The Mersenne Twister
algorithm `suffers if there are too many 0s`_. Similarly, two adjacent 32-bit
integer seeds (i.e. ``12345`` and ``12346``) would produce very similar
streams.
.. [2] In this calculation, we can ignore the amount of numbers drawn from each
stream. Each of the PRNGs we provide has some extra protection built in
that avoids overlaps if the `~SeedSequence` pools differ in the
- slightest bit. `~pcg64.PCG64` has :math:`2^{127}` separate cycles
+ slightest bit. `PCG64` has :math:`2^{127}` separate cycles
determined by the seed in addition to the position in the
:math:`2^{128}` long period for each cycle, so one has to both get on or
near the same cycle *and* seed a nearby position in the cycle.
- `~philox.Philox` has completely independent cycles determined by the seed.
- `~sfc64.SFC64` incorporates a 64-bit counter so every unique seed is at
+ `Philox` has completely independent cycles determined by the seed.
+ `SFC64` incorporates a 64-bit counter so every unique seed is at
least :math:`2^{64}` iterations away from any other seed. And
- finally, `~mt19937.MT19937` has just an unimaginably huge period. Getting
- a collision internal to `~SeedSequence` is the way a failure would be
+ finally, `MT19937` has just an unimaginably huge period. Getting
+ a collision internal to `SeedSequence` is the way a failure would be
observed.
.. _`implements an algorithm`: http://www.pcg-random.org/posts/developing-a-seed_seq-alternative.html
Independent Streams
-------------------
-:class:`~philox.Philox` is a counter-based RNG based which generates values by
+`Philox` is a counter-based RNG based which generates values by
encrypting an incrementing counter using weak cryptographic primitives. The
seed determines the key that is used for the encryption. Unique keys create
-unique, independent streams. :class:`~philox.Philox` lets you bypass the
+unique, independent streams. `Philox` lets you bypass the
seeding algorithm to directly set the 128-bit key. Similar, but different, keys
will still create independent streams.
Recommendation
**************
-The recommended generator for general use is :class:`~pcg64.PCG64`. It is
+The recommended generator for general use is `PCG64`. It is
statistically high quality, full-featured, and fast on most platforms, but
somewhat slow when compiled for 32-bit processes.
-:class:`~philox.Philox` is fairly slow, but its statistical properties have
+`Philox` is fairly slow, but its statistical properties have
very high quality, and it is easy to get assuredly-independent stream by using
unique keys. If that is the style you wish to use for parallel streams, or you
are porting from another system that uses that style, then
-:class:`~philox.Philox` is your choice.
+`Philox` is your choice.
-:class:`~sfc64.SFC64` is statistically high quality and very fast. However, it
+`SFC64` is statistically high quality and very fast. However, it
lacks jumpability. If you are not using that capability and want lots of speed,
even on 32-bit processes, this is your choice.
-:class:`~mt19937.MT19937` `fails some statistical tests`_ and is not especially
+`MT19937` `fails some statistical tests`_ and is not especially
fast compared to modern PRNGs. For these reasons, we mostly do not recommend
using it on its own, only through the legacy `~.RandomState` for
reproducing old results. That said, it has a very long history as a default in
*******
The timings below are the time in ns to produce 1 random value from a
-specific distribution. The original :class:`~mt19937.MT19937` generator is
+specific distribution. The original `MT19937` generator is
much slower since it requires 2 32-bit values to equal the output of the
faster generators.
Integer performance has a similar ordering.
The pattern is similar for other, more complex generators. The normal
-performance of the legacy :class:`~.RandomState` generator is much
+performance of the legacy `RandomState` generator is much
lower than the other since it uses the Box-Muller transformation rather
than the Ziggurat generator. The performance gap for Exponentials is also
large due to the cost of computing the log function to invert the CDF.
The column labeled MT19973 is used the same 32-bit generator as
-:class:`~.RandomState` but produces random values using
-:class:`~Generator`.
+`RandomState` but produces random values using
+`Generator`.
.. csv-table::
:header: ,MT19937,PCG64,Philox,SFC64,RandomState
Poissons,67.6,52.4,69.2,46.4,78.1
The next table presents the performance in percentage relative to values
-generated by the legacy generator, `RandomState(MT19937())`. The overall
+generated by the legacy generator, ``RandomState(MT19937())``. The overall
performance was computed using a geometric mean.
.. csv-table::
:toctree: generated/
copyto
+ shape
Changing array shape
====================
less_equal
greater
less
+ compare_chararrays
String information
------------------
ma.MaskedArray.tobytes
-Pickling and unpickling
-~~~~~~~~~~~~~~~~~~~~~~~
-.. autosummary::
- :toctree: generated/
-
- ma.dump
- ma.dumps
- ma.load
- ma.loads
-
-
Filling a masked array
~~~~~~~~~~~~~~~~~~~~~~
.. autosummary::
shares_memory
may_share_memory
+ byte_bounds
Array mixins
------------
:toctree: generated/
lib.NumpyVersion
+
+Utility
+-------
+
+.. autosummary::
+ :toctree: generated/
+
+ get_include
+ deprecate
+ deprecate_with_doc
+
+Matlab-like Functions
+---------------------
+.. autosummary::
+ :toctree: generated/
+
+ who
+ disp
\ No newline at end of file
.. autosummary::
:toctree: generated/
- decorators.deprecated
- decorators.knownfailureif
- decorators.setastest
- decorators.skipif
- decorators.slow
+ dec.deprecated
+ dec.knownfailureif
+ dec.setastest
+ dec.skipif
+ dec.slow
decorate_methods
Test Running
- *d* acts like a (5,6) array where the single value is repeated.
-.. _ufuncs.output-type:
+.. _ufuncs-output-type:
Output type determination
=========================
.. admonition:: Figure
- Code segment showing the "can cast safely" table for a 32-bit system.
+ Code segment showing the "can cast safely" table for a 64-bit system.
+ Generally the output depends on the system; your system might result in
+ a different table.
+ >>> mark = {False: ' -', True: ' Y'}
>>> def print_table(ntypes):
- ... print 'X',
- ... for char in ntypes: print char,
- ... print
+ ... print('X ' + ' '.join(ntypes))
... for row in ntypes:
- ... print row,
+ ... print(row, end='')
... for col in ntypes:
- ... print int(np.can_cast(row, col)),
- ... print
+ ... print(mark[np.can_cast(row, col)], end='')
+ ... print()
+ ...
>>> print_table(np.typecodes['All'])
X ? b h i l q p B H I L Q P e f d g F D G S U V O M m
- ? 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
- b 0 1 1 1 1 1 1 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0
- h 0 0 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0
- i 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- l 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- q 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- p 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- B 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0
- H 0 0 0 1 1 1 1 0 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 0
- I 0 0 0 0 1 1 1 0 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0
- L 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0
- Q 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0
- P 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0
- e 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0
- f 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0
- d 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- g 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 1 1 1 1 0 0
- F 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 0 0
- D 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0
- G 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0
- S 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0
- U 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0
- V 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0
- O 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0
- M 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
- m 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
-
+ ? Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Y
+ b - Y Y Y Y Y Y - - - - - - Y Y Y Y Y Y Y Y Y Y Y - Y
+ h - - Y Y Y Y Y - - - - - - - Y Y Y Y Y Y Y Y Y Y - Y
+ i - - - Y Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+ l - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+ q - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+ p - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+ B - - Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Y
+ H - - - Y Y Y Y - Y Y Y Y Y - Y Y Y Y Y Y Y Y Y Y - Y
+ I - - - - Y Y Y - - Y Y Y Y - - Y Y - Y Y Y Y Y Y - Y
+ L - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - -
+ Q - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - -
+ P - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - -
+ e - - - - - - - - - - - - - Y Y Y Y Y Y Y Y Y Y Y - -
+ f - - - - - - - - - - - - - - Y Y Y Y Y Y Y Y Y Y - -
+ d - - - - - - - - - - - - - - - Y Y - Y Y Y Y Y Y - -
+ g - - - - - - - - - - - - - - - - Y - - Y Y Y Y Y - -
+ F - - - - - - - - - - - - - - - - - Y Y Y Y Y Y Y - -
+ D - - - - - - - - - - - - - - - - - - Y Y Y Y Y Y - -
+ G - - - - - - - - - - - - - - - - - - - Y Y Y Y Y - -
+ S - - - - - - - - - - - - - - - - - - - - Y Y Y Y - -
+ U - - - - - - - - - - - - - - - - - - - - - Y Y Y - -
+ V - - - - - - - - - - - - - - - - - - - - - - Y Y - -
+ O - - - - - - - - - - - - - - - - - - - - - - Y Y - -
+ M - - - - - - - - - - - - - - - - - - - - - - Y Y Y -
+ m - - - - - - - - - - - - - - - - - - - - - - Y Y - Y
You should note that, while included in the table for completeness,
the 'S', 'U', and 'V' types cannot be operated on by ufuncs. Also,
.. versionadded:: 1.10
The 'out' keyword argument is expected to be a tuple with one entry per
- output (which can be `None` for arrays to be allocated by the ufunc).
+ output (which can be None for arrays to be allocated by the ufunc).
For ufuncs with a single output, passing a single array (instead of a
tuple holding a single array) is also valid.
The *axis* keyword specifies the axis of the array over which the reduction
will take place (with negative values counting backwards). Generally, it is an
integer, though for :meth:`ufunc.reduce`, it can also be a tuple of `int` to
-reduce over several axes at once, or `None`, to reduce over all axes.
+reduce over several axes at once, or None, to reduce over all axes.
The *dtype* keyword allows you to manage a very common problem that arises
when naively using :meth:`ufunc.reduce`. Sometimes you may
have an array of a certain data type and wish to add up all of its
Release Notes
*************
-.. include:: ../release/1.17.5-notes.rst
-.. include:: ../release/1.17.4-notes.rst
-.. include:: ../release/1.17.3-notes.rst
-.. include:: ../release/1.17.2-notes.rst
-.. include:: ../release/1.17.1-notes.rst
-.. include:: ../release/1.17.0-notes.rst
-.. include:: ../release/1.16.6-notes.rst
-.. include:: ../release/1.16.5-notes.rst
-.. include:: ../release/1.16.4-notes.rst
-.. include:: ../release/1.16.3-notes.rst
-.. include:: ../release/1.16.2-notes.rst
-.. include:: ../release/1.16.1-notes.rst
-.. include:: ../release/1.16.0-notes.rst
-.. include:: ../release/1.15.4-notes.rst
-.. include:: ../release/1.15.3-notes.rst
-.. include:: ../release/1.15.2-notes.rst
-.. include:: ../release/1.15.1-notes.rst
-.. include:: ../release/1.15.0-notes.rst
-.. include:: ../release/1.14.6-notes.rst
-.. include:: ../release/1.14.5-notes.rst
-.. include:: ../release/1.14.4-notes.rst
-.. include:: ../release/1.14.3-notes.rst
-.. include:: ../release/1.14.2-notes.rst
-.. include:: ../release/1.14.1-notes.rst
-.. include:: ../release/1.14.0-notes.rst
-.. include:: ../release/1.13.3-notes.rst
-.. include:: ../release/1.13.2-notes.rst
-.. include:: ../release/1.13.1-notes.rst
-.. include:: ../release/1.13.0-notes.rst
-.. include:: ../release/1.12.1-notes.rst
-.. include:: ../release/1.12.0-notes.rst
-.. include:: ../release/1.11.3-notes.rst
-.. include:: ../release/1.11.2-notes.rst
-.. include:: ../release/1.11.1-notes.rst
-.. include:: ../release/1.11.0-notes.rst
-.. include:: ../release/1.10.4-notes.rst
-.. include:: ../release/1.10.3-notes.rst
-.. include:: ../release/1.10.2-notes.rst
-.. include:: ../release/1.10.1-notes.rst
-.. include:: ../release/1.10.0-notes.rst
-.. include:: ../release/1.9.2-notes.rst
-.. include:: ../release/1.9.1-notes.rst
-.. include:: ../release/1.9.0-notes.rst
-.. include:: ../release/1.8.2-notes.rst
-.. include:: ../release/1.8.1-notes.rst
-.. include:: ../release/1.8.0-notes.rst
-.. include:: ../release/1.7.2-notes.rst
-.. include:: ../release/1.7.1-notes.rst
-.. include:: ../release/1.7.0-notes.rst
-.. include:: ../release/1.6.2-notes.rst
-.. include:: ../release/1.6.1-notes.rst
-.. include:: ../release/1.6.0-notes.rst
-.. include:: ../release/1.5.0-notes.rst
-.. include:: ../release/1.4.0-notes.rst
-.. include:: ../release/1.3.0-notes.rst
+.. toctree::
+ :maxdepth: 3
+
+ 1.18.0 <release/1.18.0-notes>
+ 1.17.4 <release/1.17.4-notes>
+ 1.17.3 <release/1.17.3-notes>
+ 1.17.2 <release/1.17.2-notes>
+ 1.17.1 <release/1.17.1-notes>
+ 1.17.0 <release/1.17.0-notes>
+ 1.16.5 <release/1.16.5-notes>
+ 1.16.4 <release/1.16.4-notes>
+ 1.16.3 <release/1.16.3-notes>
+ 1.16.2 <release/1.16.2-notes>
+ 1.16.1 <release/1.16.1-notes>
+ 1.16.0 <release/1.16.0-notes>
+ 1.15.4 <release/1.15.4-notes>
+ 1.15.3 <release/1.15.3-notes>
+ 1.15.2 <release/1.15.2-notes>
+ 1.15.1 <release/1.15.1-notes>
+ 1.15.0 <release/1.15.0-notes>
+ 1.14.6 <release/1.14.6-notes>
+ 1.14.5 <release/1.14.5-notes>
+ 1.14.4 <release/1.14.4-notes>
+ 1.14.3 <release/1.14.3-notes>
+ 1.14.2 <release/1.14.2-notes>
+ 1.14.1 <release/1.14.1-notes>
+ 1.14.0 <release/1.14.0-notes>
+ 1.13.3 <release/1.13.3-notes>
+ 1.13.2 <release/1.13.2-notes>
+ 1.13.1 <release/1.13.1-notes>
+ 1.13.0 <release/1.13.0-notes>
+ 1.12.1 <release/1.12.1-notes>
+ 1.12.0 <release/1.12.0-notes>
+ 1.11.3 <release/1.11.3-notes>
+ 1.11.2 <release/1.11.2-notes>
+ 1.11.1 <release/1.11.1-notes>
+ 1.11.0 <release/1.11.0-notes>
+ 1.10.4 <release/1.10.4-notes>
+ 1.10.3 <release/1.10.3-notes>
+ 1.10.2 <release/1.10.2-notes>
+ 1.10.1 <release/1.10.1-notes>
+ 1.10.0 <release/1.10.0-notes>
+ 1.9.2 <release/1.9.2-notes>
+ 1.9.1 <release/1.9.1-notes>
+ 1.9.0 <release/1.9.0-notes>
+ 1.8.2 <release/1.8.2-notes>
+ 1.8.1 <release/1.8.1-notes>
+ 1.8.0 <release/1.8.0-notes>
+ 1.7.2 <release/1.7.2-notes>
+ 1.7.1 <release/1.7.1-notes>
+ 1.7.0 <release/1.7.0-notes>
+ 1.6.2 <release/1.6.2-notes>
+ 1.6.1 <release/1.6.1-notes>
+ 1.6.0 <release/1.6.0-notes>
+ 1.5.0 <release/1.5.0-notes>
+ 1.4.0 <release/1.4.0-notes>
+ 1.3.0 <release/1.3.0-notes>
--- /dev/null
+==========================
+NumPy 1.10.0 Release Notes
+==========================
+
+This release supports Python 2.6 - 2.7 and 3.2 - 3.5.
+
+
+Highlights
+==========
+* numpy.distutils now supports parallel compilation via the --parallel/-j
+ argument passed to setup.py build
+* numpy.distutils now supports additional customization via site.cfg to
+ control compilation parameters, i.e. runtime libraries, extra
+ linking/compilation flags.
+* Addition of *np.linalg.multi_dot*: compute the dot product of two or more
+ arrays in a single function call, while automatically selecting the fastest
+ evaluation order.
+* The new function `np.stack` provides a general interface for joining a
+ sequence of arrays along a new axis, complementing `np.concatenate` for
+ joining along an existing axis.
+* Addition of `nanprod` to the set of nanfunctions.
+* Support for the '@' operator in Python 3.5.
+
+Dropped Support
+===============
+
+* The _dotblas module has been removed. CBLAS Support is now in
+ Multiarray.
+* The testcalcs.py file has been removed.
+* The polytemplate.py file has been removed.
+* npy_PyFile_Dup and npy_PyFile_DupClose have been removed from
+ npy_3kcompat.h.
+* splitcmdline has been removed from numpy/distutils/exec_command.py.
+* try_run and get_output have been removed from
+ numpy/distutils/command/config.py
+* The a._format attribute is no longer supported for array printing.
+* Keywords ``skiprows`` and ``missing`` removed from np.genfromtxt.
+* Keyword ``old_behavior`` removed from np.correlate.
+
+Future Changes
+==============
+
+* In array comparisons like ``arr1 == arr2``, many corner cases
+ involving strings or structured dtypes that used to return scalars
+ now issue ``FutureWarning`` or ``DeprecationWarning``, and in the
+ future will be change to either perform elementwise comparisons or
+ raise an error.
+* In ``np.lib.split`` an empty array in the result always had dimension
+ ``(0,)`` no matter the dimensions of the array being split. In Numpy 1.11
+ that behavior will be changed so that the dimensions will be preserved. A
+ ``FutureWarning`` for this change has been in place since Numpy 1.9 but,
+ due to a bug, sometimes no warning was raised and the dimensions were
+ already preserved.
+* The SafeEval class will be removed in Numpy 1.11.
+* The alterdot and restoredot functions will be removed in Numpy 1.11.
+
+See below for more details on these changes.
+
+Compatibility notes
+===================
+
+Default casting rule change
+---------------------------
+Default casting for inplace operations has changed to ``'same_kind'``. For
+instance, if n is an array of integers, and f is an array of floats, then
+``n += f`` will result in a ``TypeError``, whereas in previous Numpy
+versions the floats would be silently cast to ints. In the unlikely case
+that the example code is not an actual bug, it can be updated in a backward
+compatible way by rewriting it as ``np.add(n, f, out=n, casting='unsafe')``.
+The old ``'unsafe'`` default has been deprecated since Numpy 1.7.
+
+numpy version string
+--------------------
+The numpy version string for development builds has been changed from
+``x.y.z.dev-githash`` to ``x.y.z.dev0+githash`` (note the +) in order to comply
+with PEP 440.
+
+relaxed stride checking
+-----------------------
+NPY_RELAXED_STRIDE_CHECKING is now true by default.
+
+UPDATE: In 1.10.2 the default value of NPY_RELAXED_STRIDE_CHECKING was
+changed to false for back compatibility reasons. More time is needed before
+it can be made the default. As part of the roadmap a deprecation of
+dimension changing views of f_contiguous not c_contiguous arrays was also
+added.
+
+Concatenation of 1d arrays along any but ``axis=0`` raises ``IndexError``
+-------------------------------------------------------------------------
+Using axis != 0 has raised a DeprecationWarning since NumPy 1.7, it now
+raises an error.
+
+*np.ravel*, *np.diagonal* and *np.diag* now preserve subtypes
+-------------------------------------------------------------
+There was inconsistent behavior between *x.ravel()* and *np.ravel(x)*, as
+well as between *x.diagonal()* and *np.diagonal(x)*, with the methods
+preserving subtypes while the functions did not. This has been fixed and
+the functions now behave like the methods, preserving subtypes except in
+the case of matrices. Matrices are special cased for backward
+compatibility and still return 1-D arrays as before. If you need to
+preserve the matrix subtype, use the methods instead of the functions.
+
+*rollaxis* and *swapaxes* always return a view
+----------------------------------------------
+Previously, a view was returned except when no change was made in the order
+of the axes, in which case the input array was returned. A view is now
+returned in all cases.
+
+*nonzero* now returns base ndarrays
+-----------------------------------
+Previously, an inconsistency existed between 1-D inputs (returning a
+base ndarray) and higher dimensional ones (which preserved subclasses).
+Behavior has been unified, and the return will now be a base ndarray.
+Subclasses can still override this behavior by providing their own
+*nonzero* method.
+
+C API
+-----
+The changes to *swapaxes* also apply to the *PyArray_SwapAxes* C function,
+which now returns a view in all cases.
+
+The changes to *nonzero* also apply to the *PyArray_Nonzero* C function,
+which now returns a base ndarray in all cases.
+
+The dtype structure (PyArray_Descr) has a new member at the end to cache
+its hash value. This shouldn't affect any well-written applications.
+
+The change to the concatenation function DeprecationWarning also affects
+PyArray_ConcatenateArrays,
+
+recarray field return types
+---------------------------
+Previously the returned types for recarray fields accessed by attribute and by
+index were inconsistent, and fields of string type were returned as chararrays.
+Now, fields accessed by either attribute or indexing will return an ndarray for
+fields of non-structured type, and a recarray for fields of structured type.
+Notably, this affect recarrays containing strings with whitespace, as trailing
+whitespace is trimmed from chararrays but kept in ndarrays of string type.
+Also, the dtype.type of nested structured fields is now inherited.
+
+recarray views
+--------------
+Viewing an ndarray as a recarray now automatically converts the dtype to
+np.record. See new record array documentation. Additionally, viewing a recarray
+with a non-structured dtype no longer converts the result's type to ndarray -
+the result will remain a recarray.
+
+'out' keyword argument of ufuncs now accepts tuples of arrays
+-------------------------------------------------------------
+When using the 'out' keyword argument of a ufunc, a tuple of arrays, one per
+ufunc output, can be provided. For ufuncs with a single output a single array
+is also a valid 'out' keyword argument. Previously a single array could be
+provided in the 'out' keyword argument, and it would be used as the first
+output for ufuncs with multiple outputs, is deprecated, and will result in a
+`DeprecationWarning` now and an error in the future.
+
+byte-array indices now raises an IndexError
+-------------------------------------------
+Indexing an ndarray using a byte-string in Python 3 now raises an IndexError
+instead of a ValueError.
+
+Masked arrays containing objects with arrays
+--------------------------------------------
+For such (rare) masked arrays, getting a single masked item no longer returns a
+corrupted masked array, but a fully masked version of the item.
+
+Median warns and returns nan when invalid values are encountered
+----------------------------------------------------------------
+Similar to mean, median and percentile now emits a Runtime warning and
+returns `NaN` in slices where a `NaN` is present.
+To compute the median or percentile while ignoring invalid values use the
+new `nanmedian` or `nanpercentile` functions.
+
+Functions available from numpy.ma.testutils have changed
+--------------------------------------------------------
+All functions from numpy.testing were once available from
+numpy.ma.testutils but not all of them were redefined to work with masked
+arrays. Most of those functions have now been removed from
+numpy.ma.testutils with a small subset retained in order to preserve
+backward compatibility. In the long run this should help avoid mistaken use
+of the wrong functions, but it may cause import problems for some.
+
+
+New Features
+============
+
+Reading extra flags from site.cfg
+---------------------------------
+Previously customization of compilation of dependency libraries and numpy
+itself was only accomblishable via code changes in the distutils package.
+Now numpy.distutils reads in the following extra flags from each group of the
+*site.cfg*:
+
+* ``runtime_library_dirs/rpath``, sets runtime library directories to override
+ ``LD_LIBRARY_PATH``
+* ``extra_compile_args``, add extra flags to the compilation of sources
+* ``extra_link_args``, add extra flags when linking libraries
+
+This should, at least partially, complete user customization.
+
+*np.cbrt* to compute cube root for real floats
+----------------------------------------------
+*np.cbrt* wraps the C99 cube root function *cbrt*.
+Compared to *np.power(x, 1./3.)* it is well defined for negative real floats
+and a bit faster.
+
+numpy.distutils now allows parallel compilation
+-----------------------------------------------
+By passing *--parallel=n* or *-j n* to *setup.py build* the compilation of
+extensions is now performed in *n* parallel processes.
+The parallelization is limited to files within one extension so projects using
+Cython will not profit because it builds extensions from single files.
+
+*genfromtxt* has a new ``max_rows`` argument
+--------------------------------------------
+A ``max_rows`` argument has been added to *genfromtxt* to limit the
+number of rows read in a single call. Using this functionality, it is
+possible to read in multiple arrays stored in a single file by making
+repeated calls to the function.
+
+New function *np.broadcast_to* for invoking array broadcasting
+--------------------------------------------------------------
+*np.broadcast_to* manually broadcasts an array to a given shape according to
+numpy's broadcasting rules. The functionality is similar to broadcast_arrays,
+which in fact has been rewritten to use broadcast_to internally, but only a
+single array is necessary.
+
+New context manager *clear_and_catch_warnings* for testing warnings
+-------------------------------------------------------------------
+When Python emits a warning, it records that this warning has been emitted in
+the module that caused the warning, in a module attribute
+``__warningregistry__``. Once this has happened, it is not possible to emit
+the warning again, unless you clear the relevant entry in
+``__warningregistry__``. This makes is hard and fragile to test warnings,
+because if your test comes after another that has already caused the warning,
+you will not be able to emit the warning or test it. The context manager
+``clear_and_catch_warnings`` clears warnings from the module registry on entry
+and resets them on exit, meaning that warnings can be re-raised.
+
+*cov* has new ``fweights`` and ``aweights`` arguments
+-----------------------------------------------------
+The ``fweights`` and ``aweights`` arguments add new functionality to
+covariance calculations by applying two types of weighting to observation
+vectors. An array of ``fweights`` indicates the number of repeats of each
+observation vector, and an array of ``aweights`` provides their relative
+importance or probability.
+
+Support for the '@' operator in Python 3.5+
+-------------------------------------------
+Python 3.5 adds support for a matrix multiplication operator '@' proposed
+in PEP465. Preliminary support for that has been implemented, and an
+equivalent function ``matmul`` has also been added for testing purposes and
+use in earlier Python versions. The function is preliminary and the order
+and number of its optional arguments can be expected to change.
+
+New argument ``norm`` to fft functions
+--------------------------------------
+The default normalization has the direct transforms unscaled and the inverse
+transforms are scaled by :math:`1/n`. It is possible to obtain unitary
+transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
+`None`) so that both direct and inverse transforms will be scaled by
+:math:`1/\\sqrt{n}`.
+
+
+Improvements
+============
+
+*np.digitize* using binary search
+---------------------------------
+*np.digitize* is now implemented in terms of *np.searchsorted*. This means
+that a binary search is used to bin the values, which scales much better
+for larger number of bins than the previous linear search. It also removes
+the requirement for the input array to be 1-dimensional.
+
+*np.poly* now casts integer inputs to float
+-------------------------------------------
+*np.poly* will now cast 1-dimensional input arrays of integer type to double
+precision floating point, to prevent integer overflow when computing the monic
+polynomial. It is still possible to obtain higher precision results by
+passing in an array of object type, filled e.g. with Python ints.
+
+*np.interp* can now be used with periodic functions
+---------------------------------------------------
+*np.interp* now has a new parameter *period* that supplies the period of the
+input data *xp*. In such case, the input data is properly normalized to the
+given period and one end point is added to each extremity of *xp* in order to
+close the previous and the next period cycles, resulting in the correct
+interpolation behavior.
+
+*np.pad* supports more input types for ``pad_width`` and ``constant_values``
+----------------------------------------------------------------------------
+``constant_values`` parameters now accepts NumPy arrays and float values.
+NumPy arrays are supported as input for ``pad_width``, and an exception is
+raised if its values are not of integral type.
+
+*np.argmax* and *np.argmin* now support an ``out`` argument
+-----------------------------------------------------------
+The ``out`` parameter was added to *np.argmax* and *np.argmin* for consistency
+with *ndarray.argmax* and *ndarray.argmin*. The new parameter behaves exactly
+as it does in those methods.
+
+More system C99 complex functions detected and used
+---------------------------------------------------
+All of the functions ``in complex.h`` are now detected. There are new
+fallback implementations of the following functions.
+
+* npy_ctan,
+* npy_cacos, npy_casin, npy_catan
+* npy_ccosh, npy_csinh, npy_ctanh,
+* npy_cacosh, npy_casinh, npy_catanh
+
+As a result of these improvements, there will be some small changes in
+returned values, especially for corner cases.
+
+*np.loadtxt* support for the strings produced by the ``float.hex`` method
+-------------------------------------------------------------------------
+The strings produced by ``float.hex`` look like ``0x1.921fb54442d18p+1``,
+so this is not the hex used to represent unsigned integer types.
+
+*np.isclose* properly handles minimal values of integer dtypes
+--------------------------------------------------------------
+In order to properly handle minimal values of integer types, *np.isclose* will
+now cast to the float dtype during comparisons. This aligns its behavior with
+what was provided by *np.allclose*.
+
+*np.allclose* uses *np.isclose* internally.
+-------------------------------------------
+*np.allclose* now uses *np.isclose* internally and inherits the ability to
+compare NaNs as equal by setting ``equal_nan=True``. Subclasses, such as
+*np.ma.MaskedArray*, are also preserved now.
+
+*np.genfromtxt* now handles large integers correctly
+----------------------------------------------------
+*np.genfromtxt* now correctly handles integers larger than ``2**31-1`` on
+32-bit systems and larger than ``2**63-1`` on 64-bit systems (it previously
+crashed with an ``OverflowError`` in these cases). Integers larger than
+``2**63-1`` are converted to floating-point values.
+
+*np.load*, *np.save* have pickle backward compatibility flags
+-------------------------------------------------------------
+
+The functions *np.load* and *np.save* have additional keyword
+arguments for controlling backward compatibility of pickled Python
+objects. This enables Numpy on Python 3 to load npy files containing
+object arrays that were generated on Python 2.
+
+MaskedArray support for more complicated base classes
+-----------------------------------------------------
+Built-in assumptions that the baseclass behaved like a plain array are being
+removed. In particular, setting and getting elements and ranges will respect
+baseclass overrides of ``__setitem__`` and ``__getitem__``, and arithmetic
+will respect overrides of ``__add__``, ``__sub__``, etc.
+
+Changes
+=======
+
+dotblas functionality moved to multiarray
+-----------------------------------------
+The cblas versions of dot, inner, and vdot have been integrated into
+the multiarray module. In particular, vdot is now a multiarray function,
+which it was not before.
+
+stricter check of gufunc signature compliance
+---------------------------------------------
+Inputs to generalized universal functions are now more strictly checked
+against the function's signature: all core dimensions are now required to
+be present in input arrays; core dimensions with the same label must have
+the exact same size; and output core dimension's must be specified, either
+by a same label input core dimension or by a passed-in output array.
+
+views returned from *np.einsum* are writeable
+---------------------------------------------
+Views returned by *np.einsum* will now be writeable whenever the input
+array is writeable.
+
+*np.argmin* skips NaT values
+----------------------------
+
+*np.argmin* now skips NaT values in datetime64 and timedelta64 arrays,
+making it consistent with *np.min*, *np.argmax* and *np.max*.
+
+
+Deprecations
+============
+
+Array comparisons involving strings or structured dtypes
+--------------------------------------------------------
+
+Normally, comparison operations on arrays perform elementwise
+comparisons and return arrays of booleans. But in some corner cases,
+especially involving strings are structured dtypes, NumPy has
+historically returned a scalar instead. For example::
+
+ ### Current behaviour
+
+ np.arange(2) == "foo"
+ # -> False
+
+ np.arange(2) < "foo"
+ # -> True on Python 2, error on Python 3
+
+ np.ones(2, dtype="i4,i4") == np.ones(2, dtype="i4,i4,i4")
+ # -> False
+
+Continuing work started in 1.9, in 1.10 these comparisons will now
+raise ``FutureWarning`` or ``DeprecationWarning``, and in the future
+they will be modified to behave more consistently with other
+comparison operations, e.g.::
+
+ ### Future behaviour
+
+ np.arange(2) == "foo"
+ # -> array([False, False])
+
+ np.arange(2) < "foo"
+ # -> error, strings and numbers are not orderable
+
+ np.ones(2, dtype="i4,i4") == np.ones(2, dtype="i4,i4,i4")
+ # -> [False, False]
+
+SafeEval
+--------
+The SafeEval class in numpy/lib/utils.py is deprecated and will be removed
+in the next release.
+
+alterdot, restoredot
+--------------------
+The alterdot and restoredot functions no longer do anything, and are
+deprecated.
+
+pkgload, PackageLoader
+----------------------
+These ways of loading packages are now deprecated.
+
+bias, ddof arguments to corrcoef
+--------------------------------
+
+The values for the ``bias`` and ``ddof`` arguments to the ``corrcoef``
+function canceled in the division implied by the correlation coefficient and
+so had no effect on the returned values.
+
+We now deprecate these arguments to ``corrcoef`` and the masked array version
+``ma.corrcoef``.
+
+Because we are deprecating the ``bias`` argument to ``ma.corrcoef``, we also
+deprecate the use of the ``allow_masked`` argument as a positional argument,
+as its position will change with the removal of ``bias``. ``allow_masked``
+will in due course become a keyword-only argument.
+
+dtype string representation changes
+-----------------------------------
+Since 1.6, creating a dtype object from its string representation, e.g.
+``'f4'``, would issue a deprecation warning if the size did not correspond
+to an existing type, and default to creating a dtype of the default size
+for the type. Starting with this release, this will now raise a ``TypeError``.
+
+The only exception is object dtypes, where both ``'O4'`` and ``'O8'`` will
+still issue a deprecation warning. This platform-dependent representation
+will raise an error in the next release.
+
+In preparation for this upcoming change, the string representation of an
+object dtype, i.e. ``np.dtype(object).str``, no longer includes the item
+size, i.e. will return ``'|O'`` instead of ``'|O4'`` or ``'|O8'`` as
+before.
--- /dev/null
+==========================
+NumPy 1.10.1 Release Notes
+==========================
+
+This release deals with a few build problems that showed up in 1.10.0. Most
+users would not have seen these problems. The differences are:
+
+* Compiling with msvc9 or msvc10 for 32 bit Windows now requires SSE2.
+ This was the easiest fix for what looked to be some miscompiled code when
+ SSE2 was not used. If you need to compile for 32 bit Windows systems
+ without SSE2 support, mingw32 should still work.
+
+* Make compiling with VS2008 python2.7 SDK easier
+
+* Change Intel compiler options so that code will also be generated to
+ support systems without SSE4.2.
+
+* Some _config test functions needed an explicit integer return in
+ order to avoid the openSUSE rpmlinter erring out.
+
+* We ran into a problem with pipy not allowing reuse of filenames and a
+ resulting proliferation of *.*.*.postN releases. Not only were the names
+ getting out of hand, some packages were unable to work with the postN
+ suffix.
+
+
+Numpy 1.10.1 supports Python 2.6 - 2.7 and 3.2 - 3.5.
+
+
+Commits:
+
+45a3d84 DEP: Remove warning for `full` when dtype is set.
+0c1a5df BLD: import setuptools to allow compile with VS2008 python2.7 sdk
+04211c6 BUG: mask nan to 1 in ordered compare
+826716f DOC: Document the reason msvc requires SSE2 on 32 bit platforms.
+49fa187 BLD: enable SSE2 for 32-bit msvc 9 and 10 compilers
+dcbc4cc MAINT: remove Wreturn-type warnings from config checks
+d6564cb BLD: do not build exclusively for SSE4.2 processors
+15cb66f BLD: do not build exclusively for SSE4.2 processors
+c38bc08 DOC: fix var. reference in percentile docstring
+78497f4 DOC: Sync 1.10.0-notes.rst in 1.10.x branch with master.
+
--- /dev/null
+==========================
+NumPy 1.10.2 Release Notes
+==========================
+
+This release deals with a number of bugs that turned up in 1.10.1 and
+adds various build and release improvements.
+
+Numpy 1.10.1 supports Python 2.6 - 2.7 and 3.2 - 3.5.
+
+
+Compatibility notes
+===================
+
+Relaxed stride checking is no longer the default
+------------------------------------------------
+There were back compatibility problems involving views changing the dtype of
+multidimensional Fortran arrays that need to be dealt with over a longer
+timeframe.
+
+Fix swig bug in ``numpy.i``
+---------------------------
+Relaxed stride checking revealed a bug in ``array_is_fortran(a)``, that was
+using PyArray_ISFORTRAN to check for Fortran contiguity instead of
+PyArray_IS_F_CONTIGUOUS. You may want to regenerate swigged files using the
+updated numpy.i
+
+Deprecate views changing dimensions in fortran order
+----------------------------------------------------
+This deprecates assignment of a new descriptor to the dtype attribute of
+a non-C-contiguous array if it result in changing the shape. This
+effectively bars viewing a multidimensional Fortran array using a dtype
+that changes the element size along the first axis.
+
+The reason for the deprecation is that, when relaxed strides checking is
+enabled, arrays that are both C and Fortran contiguous are always treated
+as C contiguous which breaks some code that depended the two being mutually
+exclusive for non-scalar arrays of ndim > 1. This deprecation prepares the
+way to always enable relaxed stride checking.
+
+
+Issues Fixed
+============
+
+* gh-6019 Masked array repr fails for structured array with multi-dimensional column.
+* gh-6462 Median of empty array produces IndexError.
+* gh-6467 Performance regression for record array access.
+* gh-6468 numpy.interp uses 'left' value even when x[0]==xp[0].
+* gh-6475 np.allclose returns a memmap when one of its arguments is a memmap.
+* gh-6491 Error in broadcasting stride_tricks array.
+* gh-6495 Unrecognized command line option '-ffpe-summary' in gfortran.
+* gh-6497 Failure of reduce operation on recarrays.
+* gh-6498 Mention change in default casting rule in 1.10 release notes.
+* gh-6530 The partition function errors out on empty input.
+* gh-6532 numpy.inner return wrong inaccurate value sometimes.
+* gh-6563 Intent(out) broken in recent versions of f2py.
+* gh-6569 Cannot run tests after 'python setup.py build_ext -i'
+* gh-6572 Error in broadcasting stride_tricks array component.
+* gh-6575 BUG: Split produces empty arrays with wrong number of dimensions
+* gh-6590 Fortran Array problem in numpy 1.10.
+* gh-6602 Random __all__ missing choice and dirichlet.
+* gh-6611 ma.dot no longer always returns a masked array in 1.10.
+* gh-6618 NPY_FORTRANORDER in make_fortran() in numpy.i
+* gh-6636 Memory leak in nested dtypes in numpy.recarray
+* gh-6641 Subsetting recarray by fields yields a structured array.
+* gh-6667 ma.make_mask handles ma.nomask input incorrectly.
+* gh-6675 Optimized blas detection broken in master and 1.10.
+* gh-6678 Getting unexpected error from: X.dtype = complex (or Y = X.view(complex))
+* gh-6718 f2py test fail in pip installed numpy-1.10.1 in virtualenv.
+* gh-6719 Error compiling Cython file: Pythonic division not allowed without gil.
+* gh-6771 Numpy.rec.fromarrays losing dtype metadata between versions 1.9.2 and 1.10.1
+* gh-6781 The travis-ci script in maintenance/1.10.x needs fixing.
+* gh-6807 Windows testing errors for 1.10.2
+
+
+Merged PRs
+==========
+
+The following PRs have been merged into 1.10.2. When the PR is a backport,
+the PR number for the original PR against master is listed.
+
+* gh-5773 MAINT: Hide testing helper tracebacks when using them with pytest.
+* gh-6094 BUG: Fixed a bug with string representation of masked structured arrays.
+* gh-6208 MAINT: Speedup field access by removing unneeded safety checks.
+* gh-6460 BUG: Replacing the os.environ.clear by less invasive procedure.
+* gh-6470 BUG: Fix AttributeError in numpy distutils.
+* gh-6472 MAINT: Use Python 3.5 instead of 3.5-dev for travis 3.5 testing.
+* gh-6474 REL: Update Paver script for sdist and auto-switch test warnings.
+* gh-6478 BUG: Fix Intel compiler flags for OS X build.
+* gh-6481 MAINT: LIBPATH with spaces is now supported Python 2.7+ and Win32.
+* gh-6487 BUG: Allow nested use of parameters in definition of arrays in f2py.
+* gh-6488 BUG: Extend common blocks rather than overwriting in f2py.
+* gh-6499 DOC: Mention that default casting for inplace operations has changed.
+* gh-6500 BUG: Recarrays viewed as subarrays don't convert to np.record type.
+* gh-6501 REL: Add "make upload" command for built docs, update "make dist".
+* gh-6526 BUG: Fix use of __doc__ in setup.py for -OO mode.
+* gh-6527 BUG: Fix the IndexError when taking the median of an empty array.
+* gh-6537 BUG: Make ma.atleast_* with scalar argument return arrays.
+* gh-6538 BUG: Fix ma.masked_values does not shrink mask if requested.
+* gh-6546 BUG: Fix inner product regression for non-contiguous arrays.
+* gh-6553 BUG: Fix partition and argpartition error for empty input.
+* gh-6556 BUG: Error in broadcast_arrays with as_strided array.
+* gh-6558 MAINT: Minor update to "make upload" doc build command.
+* gh-6562 BUG: Disable view safety checks in recarray.
+* gh-6567 BUG: Revert some import * fixes in f2py.
+* gh-6574 DOC: Release notes for Numpy 1.10.2.
+* gh-6577 BUG: Fix for #6569, allowing build_ext --inplace
+* gh-6579 MAINT: Fix mistake in doc upload rule.
+* gh-6596 BUG: Fix swig for relaxed stride checking.
+* gh-6606 DOC: Update 1.10.2 release notes.
+* gh-6614 BUG: Add choice and dirichlet to numpy.random.__all__.
+* gh-6621 BUG: Fix swig make_fortran function.
+* gh-6628 BUG: Make allclose return python bool.
+* gh-6642 BUG: Fix memleak in _convert_from_dict.
+* gh-6643 ENH: make recarray.getitem return a recarray.
+* gh-6653 BUG: Fix ma dot to always return masked array.
+* gh-6668 BUG: ma.make_mask should always return nomask for nomask argument.
+* gh-6686 BUG: Fix a bug in assert_string_equal.
+* gh-6695 BUG: Fix removing tempdirs created during build.
+* gh-6697 MAINT: Fix spurious semicolon in macro definition of PyArray_FROM_OT.
+* gh-6698 TST: test np.rint bug for large integers.
+* gh-6717 BUG: Readd fallback CBLAS detection on linux.
+* gh-6721 BUG: Fix for #6719.
+* gh-6726 BUG: Fix bugs exposed by relaxed stride rollback.
+* gh-6757 BUG: link cblas library if cblas is detected.
+* gh-6756 TST: only test f2py, not f2py2.7 etc, fixes #6718.
+* gh-6747 DEP: Deprecate changing shape of non-C-contiguous array via descr.
+* gh-6775 MAINT: Include from __future__ boilerplate in some files missing it.
+* gh-6780 BUG: metadata is not copied to base_dtype.
+* gh-6783 BUG: Fix travis ci testing for new google infrastructure.
+* gh-6785 BUG: Quick and dirty fix for interp.
+* gh-6813 TST,BUG: Make test_mvoid_multidim_print work for 32 bit systems.
+* gh-6817 BUG: Disable 32-bit msvc9 compiler optimizations for npy_rint.
+* gh-6819 TST: Fix test_mvoid_multidim_print failures on Python 2.x for Windows.
+
+Initial support for mingwpy was reverted as it was causing problems for
+non-windows builds.
+
+* gh-6536 BUG: Revert gh-5614 to fix non-windows build problems
+
+A fix for np.lib.split was reverted because it resulted in "fixing"
+behavior that will be present in the Numpy 1.11 and that was already
+present in Numpy 1.9. See the discussion of the issue at gh-6575 for
+clarification.
+
+* gh-6576 BUG: Revert gh-6376 to fix split behavior for empty arrays.
+
+Relaxed stride checking was reverted. There were back compatibility
+problems involving views changing the dtype of multidimensional Fortran
+arrays that need to be dealt with over a longer timeframe.
+
+* gh-6735 MAINT: Make no relaxed stride checking the default for 1.10.
+
+
+Notes
+=====
+A bug in the Numpy 1.10.1 release resulted in exceptions being raised for
+``RuntimeWarning`` and ``DeprecationWarning`` in projects depending on Numpy.
+That has been fixed.
--- /dev/null
+==========================
+NumPy 1.10.3 Release Notes
+==========================
+
+N/A this release did not happen due to various screwups involving PyPi.
--- /dev/null
+==========================
+NumPy 1.10.4 Release Notes
+==========================
+
+This release is a bugfix source release motivated by a segfault regression.
+No windows binaries are provided for this release, as there appear to be
+bugs in the toolchain we use to generate those files. Hopefully that
+problem will be fixed for the next release. In the meantime, we suggest
+using one of the providers of windows binaries.
+
+Compatibility notes
+===================
+
+* The trace function now calls the trace method on subclasses of ndarray,
+ except for matrix, for which the current behavior is preserved. This is
+ to help with the units package of AstroPy and hopefully will not cause
+ problems.
+
+Issues Fixed
+============
+
+* gh-6922 BUG: numpy.recarray.sort segfaults on Windows.
+* gh-6937 BUG: busday_offset does the wrong thing with modifiedpreceding roll.
+* gh-6949 BUG: Type is lost when slicing a subclass of recarray.
+
+Merged PRs
+==========
+
+The following PRs have been merged into 1.10.4. When the PR is a backport,
+the PR number for the original PR against master is listed.
+
+* gh-6840 TST: Update travis testing script in 1.10.x
+* gh-6843 BUG: Fix use of python 3 only FileNotFoundError in test_f2py.
+* gh-6884 REL: Update pavement.py and setup.py to reflect current version.
+* gh-6916 BUG: Fix test_f2py so it runs correctly in runtests.py.
+* gh-6924 BUG: Fix segfault gh-6922.
+* gh-6942 Fix datetime roll='modifiedpreceding' bug.
+* gh-6943 DOC,BUG: Fix some latex generation problems.
+* gh-6950 BUG trace is not subclass aware, np.trace(ma) != ma.trace().
+* gh-6952 BUG recarray slices should preserve subclass.
--- /dev/null
+==========================
+NumPy 1.11.0 Release Notes
+==========================
+
+This release supports Python 2.6 - 2.7 and 3.2 - 3.5 and contains a number
+of enhancements and improvements. Note also the build system changes listed
+below as they may have subtle effects.
+
+No Windows (TM) binaries are provided for this release due to a broken
+toolchain. One of the providers of Python packages for Windows (TM) is your
+best bet.
+
+
+Highlights
+==========
+
+Details of these improvements can be found below.
+
+* The datetime64 type is now timezone naive.
+* A dtype parameter has been added to ``randint``.
+* Improved detection of two arrays possibly sharing memory.
+* Automatic bin size estimation for ``np.histogram``.
+* Speed optimization of A @ A.T and dot(A, A.T).
+* New function ``np.moveaxis`` for reordering array axes.
+
+
+Build System Changes
+====================
+
+* Numpy now uses ``setuptools`` for its builds instead of plain distutils.
+ This fixes usage of ``install_requires='numpy'`` in the ``setup.py`` files of
+ projects that depend on Numpy (see gh-6551). It potentially affects the way
+ that build/install methods for Numpy itself behave though. Please report any
+ unexpected behavior on the Numpy issue tracker.
+* Bento build support and related files have been removed.
+* Single file build support and related files have been removed.
+
+
+Future Changes
+==============
+
+The following changes are scheduled for Numpy 1.12.0.
+
+* Support for Python 2.6, 3.2, and 3.3 will be dropped.
+* Relaxed stride checking will become the default. See the 1.8.0 release
+ notes for a more extended discussion of what this change implies.
+* The behavior of the datetime64 "not a time" (NaT) value will be changed
+ to match that of floating point "not a number" (NaN) values: all
+ comparisons involving NaT will return False, except for NaT != NaT which
+ will return True.
+* Indexing with floats will raise IndexError,
+ e.g., a[0, 0.0].
+* Indexing with non-integer array_like will raise ``IndexError``,
+ e.g., ``a['1', '2']``
+* Indexing with multiple ellipsis will raise ``IndexError``,
+ e.g., ``a[..., ...]``.
+* Non-integers used as index values will raise ``TypeError``,
+ e.g., in ``reshape``, ``take``, and specifying reduce axis.
+
+
+In a future release the following changes will be made.
+
+* The ``rand`` function exposed in ``numpy.testing`` will be removed. That
+ function is left over from early Numpy and was implemented using the
+ Python random module. The random number generators from ``numpy.random``
+ should be used instead.
+* The ``ndarray.view`` method will only allow c_contiguous arrays to be
+ viewed using a dtype of different size causing the last dimension to
+ change. That differs from the current behavior where arrays that are
+ f_contiguous but not c_contiguous can be viewed as a dtype type of
+ different size causing the first dimension to change.
+* Slicing a ``MaskedArray`` will return views of both data **and** mask.
+ Currently the mask is copy-on-write and changes to the mask in the slice do
+ not propagate to the original mask. See the FutureWarnings section below for
+ details.
+
+
+Compatibility notes
+===================
+
+datetime64 changes
+------------------
+In prior versions of NumPy the experimental datetime64 type always stored
+times in UTC. By default, creating a datetime64 object from a string or
+printing it would convert from or to local time::
+
+ # old behavior
+ >>> np.datetime64('2000-01-01T00:00:00')
+ numpy.datetime64('2000-01-01T00:00:00-0800') # note the timezone offset -08:00
+
+
+A consensus of datetime64 users agreed that this behavior is undesirable
+and at odds with how datetime64 is usually used (e.g., by `pandas
+<http://pandas.pydata.org>`__). For most use cases, a timezone naive datetime
+type is preferred, similar to the ``datetime.datetime`` type in the Python
+standard library. Accordingly, datetime64 no longer assumes that input is in
+local time, nor does it print local times::
+
+ >>> np.datetime64('2000-01-01T00:00:00')
+ numpy.datetime64('2000-01-01T00:00:00')
+
+For backwards compatibility, datetime64 still parses timezone offsets, which
+it handles by converting to UTC. However, the resulting datetime is timezone
+naive::
+
+ >>> np.datetime64('2000-01-01T00:00:00-08')
+ DeprecationWarning: parsing timezone aware datetimes is deprecated;
+ this will raise an error in the future
+ numpy.datetime64('2000-01-01T08:00:00')
+
+As a corollary to this change, we no longer prohibit casting between datetimes
+with date units and datetimes with time units. With timezone naive datetimes,
+the rule for casting from dates to times is no longer ambiguous.
+
+``linalg.norm`` return type changes
+-----------------------------------
+The return type of the ``linalg.norm`` function is now floating point without
+exception. Some of the norm types previously returned integers.
+
+polynomial fit changes
+----------------------
+The various fit functions in the numpy polynomial package no longer accept
+non-integers for degree specification.
+
+*np.dot* now raises ``TypeError`` instead of ``ValueError``
+-----------------------------------------------------------
+This behaviour mimics that of other functions such as ``np.inner``. If the two
+arguments cannot be cast to a common type, it could have raised a ``TypeError``
+or ``ValueError`` depending on their order. Now, ``np.dot`` will now always
+raise a ``TypeError``.
+
+FutureWarning to changed behavior
+---------------------------------
+
+* In ``np.lib.split`` an empty array in the result always had dimension
+ ``(0,)`` no matter the dimensions of the array being split. This
+ has been changed so that the dimensions will be preserved. A
+ ``FutureWarning`` for this change has been in place since Numpy 1.9 but,
+ due to a bug, sometimes no warning was raised and the dimensions were
+ already preserved.
+
+``%`` and ``//`` operators
+--------------------------
+These operators are implemented with the ``remainder`` and ``floor_divide``
+functions respectively. Those functions are now based around ``fmod`` and are
+computed together so as to be compatible with each other and with the Python
+versions for float types. The results should be marginally more accurate or
+outright bug fixes compared to the previous results, but they may
+differ significantly in cases where roundoff makes a difference in the integer
+returned by ``floor_divide``. Some corner cases also change, for instance, NaN
+is always returned for both functions when the divisor is zero,
+``divmod(1.0, inf)`` returns ``(0.0, 1.0)`` except on MSVC 2008, and
+``divmod(-1.0, inf)`` returns ``(-1.0, inf)``.
+
+C API
+-----
+
+Removed the ``check_return`` and ``inner_loop_selector`` members of
+the ``PyUFuncObject`` struct (replacing them with ``reserved`` slots
+to preserve struct layout). These were never used for anything, so
+it's unlikely that any third-party code is using them either, but we
+mention it here for completeness.
+
+
+object dtype detection for old-style classes
+--------------------------------------------
+
+In python 2, objects which are instances of old-style user-defined classes no
+longer automatically count as 'object' type in the dtype-detection handler.
+Instead, as in python 3, they may potentially count as sequences, but only if
+they define both a `__len__` and a `__getitem__` method. This fixes a segfault
+and inconsistency between python 2 and 3.
+
+New Features
+============
+
+* ``np.histogram`` now provides plugin estimators for automatically
+ estimating the optimal number of bins. Passing one of ['auto', 'fd',
+ 'scott', 'rice', 'sturges'] as the argument to 'bins' results in the
+ corresponding estimator being used.
+
+* A benchmark suite using `Airspeed Velocity
+ <https://asv.readthedocs.io/>`__ has been added, converting the
+ previous vbench-based one. You can run the suite locally via ``python
+ runtests.py --bench``. For more details, see ``benchmarks/README.rst``.
+
+* A new function ``np.shares_memory`` that can check exactly whether two
+ arrays have memory overlap is added. ``np.may_share_memory`` also now has
+ an option to spend more effort to reduce false positives.
+
+* ``SkipTest`` and ``KnownFailureException`` exception classes are exposed
+ in the ``numpy.testing`` namespace. Raise them in a test function to mark
+ the test to be skipped or mark it as a known failure, respectively.
+
+* ``f2py.compile`` has a new ``extension`` keyword parameter that allows the
+ fortran extension to be specified for generated temp files. For instance,
+ the files can be specifies to be ``*.f90``. The ``verbose`` argument is
+ also activated, it was previously ignored.
+
+* A ``dtype`` parameter has been added to ``np.random.randint``
+ Random ndarrays of the following types can now be generated:
+
+ - ``np.bool_``,
+ - ``np.int8``, ``np.uint8``,
+ - ``np.int16``, ``np.uint16``,
+ - ``np.int32``, ``np.uint32``,
+ - ``np.int64``, ``np.uint64``,
+ - ``np.int_ ``, ``np.intp``
+
+ The specification is by precision rather than by C type. Hence, on some
+ platforms ``np.int64`` may be a ``long`` instead of ``long long`` even if
+ the specified dtype is ``long long`` because the two may have the same
+ precision. The resulting type depends on which C type numpy uses for the
+ given precision. The byteorder specification is also ignored, the
+ generated arrays are always in native byte order.
+
+* A new ``np.moveaxis`` function allows for moving one or more array axes
+ to a new position by explicitly providing source and destination axes.
+ This function should be easier to use than the current ``rollaxis``
+ function as well as providing more functionality.
+
+* The ``deg`` parameter of the various ``numpy.polynomial`` fits has been
+ extended to accept a list of the degrees of the terms to be included in
+ the fit, the coefficients of all other terms being constrained to zero.
+ The change is backward compatible, passing a scalar ``deg`` will behave
+ as before.
+
+* A divmod function for float types modeled after the Python version has
+ been added to the npy_math library.
+
+
+Improvements
+============
+
+``np.gradient`` now supports an ``axis`` argument
+-------------------------------------------------
+The ``axis`` parameter was added to ``np.gradient`` for consistency. It
+allows to specify over which axes the gradient is calculated.
+
+``np.lexsort`` now supports arrays with object data-type
+--------------------------------------------------------
+The function now internally calls the generic ``npy_amergesort`` when the
+type does not implement a merge-sort kind of ``argsort`` method.
+
+``np.ma.core.MaskedArray`` now supports an ``order`` argument
+-------------------------------------------------------------
+When constructing a new ``MaskedArray`` instance, it can be configured with
+an ``order`` argument analogous to the one when calling ``np.ndarray``. The
+addition of this argument allows for the proper processing of an ``order``
+argument in several MaskedArray-related utility functions such as
+``np.ma.core.array`` and ``np.ma.core.asarray``.
+
+Memory and speed improvements for masked arrays
+-----------------------------------------------
+Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses
+``np.ones`` (resp. ``np.zeros``) to create the mask, which is faster and
+avoid a big memory peak. Another optimization was done to avoid a memory
+peak and useless computations when printing a masked array.
+
+``ndarray.tofile`` now uses fallocate on linux
+----------------------------------------------
+The function now uses the fallocate system call to reserve sufficient
+disk space on file systems that support it.
+
+Optimizations for operations of the form ``A.T @ A`` and ``A @ A.T``
+--------------------------------------------------------------------
+Previously, ``gemm`` BLAS operations were used for all matrix products. Now,
+if the matrix product is between a matrix and its transpose, it will use
+``syrk`` BLAS operations for a performance boost. This optimization has been
+extended to ``@``, ``numpy.dot``, ``numpy.inner``, and ``numpy.matmul``.
+
+**Note:** Requires the transposed and non-transposed matrices to share data.
+
+``np.testing.assert_warns`` can now be used as a context manager
+----------------------------------------------------------------
+This matches the behavior of ``assert_raises``.
+
+Speed improvement for np.random.shuffle
+---------------------------------------
+``np.random.shuffle`` is now much faster for 1d ndarrays.
+
+
+Changes
+=======
+
+Pyrex support was removed from ``numpy.distutils``
+--------------------------------------------------
+The method ``build_src.generate_a_pyrex_source`` will remain available; it
+has been monkeypatched by users to support Cython instead of Pyrex. It's
+recommended to switch to a better supported method of build Cython
+extensions though.
+
+``np.broadcast`` can now be called with a single argument
+---------------------------------------------------------
+The resulting object in that case will simply mimic iteration over
+a single array. This change obsoletes distinctions like
+
+ if len(x) == 1:
+ shape = x[0].shape
+ else:
+ shape = np.broadcast(\*x).shape
+
+Instead, ``np.broadcast`` can be used in all cases.
+
+``np.trace`` now respects array subclasses
+------------------------------------------
+This behaviour mimics that of other functions such as ``np.diagonal`` and
+ensures, e.g., that for masked arrays ``np.trace(ma)`` and ``ma.trace()`` give
+the same result.
+
+``np.dot`` now raises ``TypeError`` instead of ``ValueError``
+-------------------------------------------------------------
+This behaviour mimics that of other functions such as ``np.inner``. If the two
+arguments cannot be cast to a common type, it could have raised a ``TypeError``
+or ``ValueError`` depending on their order. Now, ``np.dot`` will now always
+raise a ``TypeError``.
+
+``linalg.norm`` return type changes
+-----------------------------------
+The ``linalg.norm`` function now does all its computations in floating point
+and returns floating results. This change fixes bugs due to integer overflow
+and the failure of abs with signed integers of minimum value, e.g., int8(-128).
+For consistency, floats are used even where an integer might work.
+
+
+Deprecations
+============
+
+Views of arrays in Fortran order
+--------------------------------
+The F_CONTIGUOUS flag was used to signal that views using a dtype that
+changed the element size would change the first index. This was always
+problematical for arrays that were both F_CONTIGUOUS and C_CONTIGUOUS
+because C_CONTIGUOUS took precedence. Relaxed stride checking results in
+more such dual contiguous arrays and breaks some existing code as a result.
+Note that this also affects changing the dtype by assigning to the dtype
+attribute of an array. The aim of this deprecation is to restrict views to
+C_CONTIGUOUS arrays at some future time. A work around that is backward
+compatible is to use ``a.T.view(...).T`` instead. A parameter may also be
+added to the view method to explicitly ask for Fortran order views, but
+that will not be backward compatible.
+
+Invalid arguments for array ordering
+------------------------------------
+It is currently possible to pass in arguments for the ``order``
+parameter in methods like ``array.flatten`` or ``array.ravel``
+that were not one of the following: 'C', 'F', 'A', 'K' (note that
+all of these possible values are both unicode and case insensitive).
+Such behavior will not be allowed in future releases.
+
+Random number generator in the ``testing`` namespace
+----------------------------------------------------
+The Python standard library random number generator was previously exposed
+in the ``testing`` namespace as ``testing.rand``. Using this generator is
+not recommended and it will be removed in a future release. Use generators
+from ``numpy.random`` namespace instead.
+
+Random integer generation on a closed interval
+----------------------------------------------
+In accordance with the Python C API, which gives preference to the half-open
+interval over the closed one, ``np.random.random_integers`` is being
+deprecated in favor of calling ``np.random.randint``, which has been
+enhanced with the ``dtype`` parameter as described under "New Features".
+However, ``np.random.random_integers`` will not be removed anytime soon.
+
+
+FutureWarnings
+==============
+
+Assigning to slices/views of ``MaskedArray``
+--------------------------------------------
+Currently a slice of a masked array contains a view of the original data and a
+copy-on-write view of the mask. Consequently, any changes to the slice's mask
+will result in a copy of the original mask being made and that new mask being
+changed rather than the original. For example, if we make a slice of the
+original like so, ``view = original[:]``, then modifications to the data in one
+array will affect the data of the other but, because the mask will be copied
+during assignment operations, changes to the mask will remain local. A similar
+situation occurs when explicitly constructing a masked array using
+``MaskedArray(data, mask)``, the returned array will contain a view of ``data``
+but the mask will be a copy-on-write view of ``mask``.
+
+In the future, these cases will be normalized so that the data and mask arrays
+are treated the same way and modifications to either will propagate between
+views. In 1.11, numpy will issue a ``MaskedArrayFutureWarning`` warning
+whenever user code modifies the mask of a view that in the future may cause
+values to propagate back to the original. To silence these warnings and make
+your code robust against the upcoming changes, you have two options: if you
+want to keep the current behavior, call ``masked_view.unshare_mask()`` before
+modifying the mask. If you want to get the future behavior early, use
+``masked_view._sharedmask = False``. However, note that setting the
+``_sharedmask`` attribute will break following explicit calls to
+``masked_view.unshare_mask()``.
--- /dev/null
+==========================
+NumPy 1.11.1 Release Notes
+==========================
+
+Numpy 1.11.1 supports Python 2.6 - 2.7 and 3.2 - 3.5. It fixes bugs and
+regressions found in Numpy 1.11.0 and includes several build related
+improvements. Wheels for Linux, Windows, and OSX can be found on pypi.
+
+Fixes Merged
+============
+
+- #7506 BUG: Make sure numpy imports on python 2.6 when nose is unavailable.
+- #7530 BUG: Floating exception with invalid axis in np.lexsort.
+- #7535 BUG: Extend glibc complex trig functions blacklist to glibc < 2.18.
+- #7551 BUG: Allow graceful recovery for no compiler.
+- #7558 BUG: Constant padding expected wrong type in constant_values.
+- #7578 BUG: Fix OverflowError in Python 3.x. in swig interface.
+- #7590 BLD: Fix configparser.InterpolationSyntaxError.
+- #7597 BUG: Make np.ma.take work on scalars.
+- #7608 BUG: linalg.norm(): Don't convert object arrays to float.
+- #7638 BLD: Correct C compiler customization in system_info.py.
+- #7654 BUG: ma.median of 1d array should return a scalar.
+- #7656 BLD: Remove hardcoded Intel compiler flag -xSSE4.2.
+- #7660 BUG: Temporary fix for str(mvoid) for object field types.
+- #7665 BUG: Fix incorrect printing of 1D masked arrays.
+- #7670 BUG: Correct initial index estimate in histogram.
+- #7671 BUG: Boolean assignment no GIL release when transfer needs API.
+- #7676 BUG: Fix handling of right edge of final histogram bin.
+- #7680 BUG: Fix np.clip bug NaN handling for Visual Studio 2015.
+- #7724 BUG: Fix segfaults in np.random.shuffle.
+- #7731 MAINT: Change mkl_info.dir_env_var from MKL to MKLROOT.
+- #7737 BUG: Fix issue on OS X with Python 3.x, npymath.ini not installed.
--- /dev/null
+==========================
+NumPy 1.11.2 Release Notes
+==========================
+
+Numpy 1.11.2 supports Python 2.6 - 2.7 and 3.2 - 3.5. It fixes bugs and
+regressions found in Numpy 1.11.1 and includes several build related
+improvements. Wheels for Linux, Windows, and OS X can be found on PyPI.
+
+Pull Requests Merged
+====================
+
+Fixes overridden by later merges and release notes updates are omitted.
+
+- #7736 BUG: Many functions silently drop 'keepdims' kwarg.
+- #7738 ENH: Add extra kwargs and update doc of many MA methods.
+- #7778 DOC: Update Numpy 1.11.1 release notes.
+- #7793 BUG: MaskedArray.count treats negative axes incorrectly.
+- #7816 BUG: Fix array too big error for wide dtypes.
+- #7821 BUG: Make sure npy_mul_with_overflow_<type> detects overflow.
+- #7824 MAINT: Allocate fewer bytes for empty arrays.
+- #7847 MAINT,DOC: Fix some imp module uses and update f2py.compile docstring.
+- #7849 MAINT: Fix remaining uses of deprecated Python imp module.
+- #7851 BLD: Fix ATLAS version detection.
+- #7896 BUG: Construct ma.array from np.array which contains padding.
+- #7904 BUG: Fix float16 type not being called due to wrong ordering.
+- #7917 BUG: Production install of numpy should not require nose.
+- #7919 BLD: Fixed MKL detection for recent versions of this library.
+- #7920 BUG: Fix for issue #7835 (ma.median of 1d).
+- #7932 BUG: Monkey-patch _msvccompile.gen_lib_option like other compilers.
+- #7939 BUG: Check for HAVE_LDOUBLE_DOUBLE_DOUBLE_LE in npy_math_complex.
+- #7953 BUG: Guard against buggy comparisons in generic quicksort.
+- #7954 BUG: Use keyword arguments to initialize Extension base class.
+- #7955 BUG: Make sure numpy globals keep identity after reload.
+- #7972 BUG: MSVCCompiler grows 'lib' & 'include' env strings exponentially.
+- #8005 BLD: Remove __NUMPY_SETUP__ from builtins at end of setup.py.
+- #8010 MAINT: Remove leftover imp module imports.
+- #8020 BUG: Fix return of np.ma.count if keepdims is True and axis is None.
+- #8024 BUG: Fix numpy.ma.median.
+- #8031 BUG: Fix np.ma.median with only one non-masked value.
+- #8044 BUG: Fix bug in NpyIter buffering with discontinuous arrays.
--- /dev/null
+==========================
+NumPy 1.11.3 Release Notes
+==========================
+
+Numpy 1.11.3 fixes a bug that leads to file corruption when very large files
+opened in append mode are used in ``ndarray.tofile``. It supports Python
+versions 2.6 - 2.7 and 3.2 - 3.5. Wheels for Linux, Windows, and OS X can be
+found on PyPI.
+
+
+Contributors to maintenance/1.11.3
+==================================
+
+A total of 2 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+- Charles Harris
+- Pavel Potocek +
+
+Pull Requests Merged
+====================
+
+- `#8341 <https://github.com/numpy/numpy/pull/8341>`__: BUG: Fix ndarray.tofile large file corruption in append mode.
+- `#8346 <https://github.com/numpy/numpy/pull/8346>`__: TST: Fix tests in PR #8341 for NumPy 1.11.x
+
--- /dev/null
+==========================
+NumPy 1.12.0 Release Notes
+==========================
+
+This release supports Python 2.7 and 3.4 - 3.6.
+
+Highlights
+==========
+The NumPy 1.12.0 release contains a large number of fixes and improvements, but
+few that stand out above all others. That makes picking out the highlights
+somewhat arbitrary but the following may be of particular interest or indicate
+areas likely to have future consequences.
+
+* Order of operations in ``np.einsum`` can now be optimized for large speed improvements.
+* New ``signature`` argument to ``np.vectorize`` for vectorizing with core dimensions.
+* The ``keepdims`` argument was added to many functions.
+* New context manager for testing warnings
+* Support for BLIS in numpy.distutils
+* Much improved support for PyPy (not yet finished)
+
+Dropped Support
+===============
+
+* Support for Python 2.6, 3.2, and 3.3 has been dropped.
+
+
+Added Support
+=============
+
+* Support for PyPy 2.7 v5.6.0 has been added. While not complete (nditer
+ ``updateifcopy`` is not supported yet), this is a milestone for PyPy's
+ C-API compatibility layer.
+
+
+Build System Changes
+====================
+
+* Library order is preserved, instead of being reordered to match that of
+ the directories.
+
+
+Deprecations
+============
+
+Assignment of ndarray object's ``data`` attribute
+-------------------------------------------------
+Assigning the 'data' attribute is an inherently unsafe operation as pointed
+out in gh-7083. Such a capability will be removed in the future.
+
+Unsafe int casting of the num attribute in ``linspace``
+-------------------------------------------------------
+``np.linspace`` now raises DeprecationWarning when num cannot be safely
+interpreted as an integer.
+
+Insufficient bit width parameter to ``binary_repr``
+---------------------------------------------------
+If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
+represent the number in base 2 (positive) or 2's complement (negative) form,
+the function used to silently ignore the parameter and return a representation
+using the minimal number of bits needed for the form in question. Such behavior
+is now considered unsafe from a user perspective and will raise an error in the
+future.
+
+
+Future Changes
+==============
+
+* In 1.13 NAT will always compare False except for ``NAT != NAT``,
+ which will be True. In short, NAT will behave like NaN
+* In 1.13 ``np.average`` will preserve subclasses, to match the behavior of most
+ other numpy functions such as np.mean. In particular, this means calls which
+ returned a scalar may return a 0-d subclass object instead.
+
+Multiple-field manipulation of structured arrays
+------------------------------------------------
+In 1.13 the behavior of structured arrays involving multiple fields will change
+in two ways:
+
+First, indexing a structured array with multiple fields (eg,
+``arr[['f1', 'f3']]``) will return a view into the original array in 1.13,
+instead of a copy. Note the returned view will have extra padding bytes
+corresponding to intervening fields in the original array, unlike the copy in
+1.12, which will affect code such as ``arr[['f1', 'f3']].view(newdtype)``.
+
+Second, for numpy versions 1.6 to 1.12 assignment between structured arrays
+occurs "by field name": Fields in the destination array are set to the
+identically-named field in the source array or to 0 if the source does not have
+a field::
+
+ >>> a = np.array([(1,2),(3,4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ >>> b = np.ones(2, dtype=[('z', 'i4'), ('y', 'i4'), ('x', 'i4')])
+ >>> b[:] = a
+ >>> b
+ array([(0, 2, 1), (0, 4, 3)],
+ dtype=[('z', '<i4'), ('y', '<i4'), ('x', '<i4')])
+
+In 1.13 assignment will instead occur "by position": The Nth field of the
+destination will be set to the Nth field of the source regardless of field
+name. The old behavior can be obtained by using indexing to reorder the fields
+before
+assignment, e.g., ``b[['x', 'y']] = a[['y', 'x']]``.
+
+
+Compatibility notes
+===================
+
+DeprecationWarning to error
+---------------------------
+
+* Indexing with floats raises ``IndexError``,
+ e.g., a[0, 0.0].
+* Indexing with non-integer array_like raises ``IndexError``,
+ e.g., ``a['1', '2']``
+* Indexing with multiple ellipsis raises ``IndexError``,
+ e.g., ``a[..., ...]``.
+* Non-integers used as index values raise ``TypeError``,
+ e.g., in ``reshape``, ``take``, and specifying reduce axis.
+
+FutureWarning to changed behavior
+---------------------------------
+
+* ``np.full`` now returns an array of the fill-value's dtype if no dtype is
+ given, instead of defaulting to float.
+* ``np.average`` will emit a warning if the argument is a subclass of ndarray,
+ as the subclass will be preserved starting in 1.13. (see Future Changes)
+
+``power`` and ``**`` raise errors for integer to negative integer powers
+------------------------------------------------------------------------
+The previous behavior depended on whether numpy scalar integers or numpy
+integer arrays were involved.
+
+For arrays
+
+* Zero to negative integer powers returned least integral value.
+* Both 1, -1 to negative integer powers returned correct values.
+* The remaining integers returned zero when raised to negative integer powers.
+
+For scalars
+
+* Zero to negative integer powers returned least integral value.
+* Both 1, -1 to negative integer powers returned correct values.
+* The remaining integers sometimes returned zero, sometimes the
+ correct float depending on the integer type combination.
+
+All of these cases now raise a ``ValueError`` except for those integer
+combinations whose common type is float, for instance uint64 and int8. It was
+felt that a simple rule was the best way to go rather than have special
+exceptions for the integer units. If you need negative powers, use an inexact
+type.
+
+Relaxed stride checking is the default
+--------------------------------------
+This will have some impact on code that assumed that ``F_CONTIGUOUS`` and
+``C_CONTIGUOUS`` were mutually exclusive and could be set to determine the
+default order for arrays that are now both.
+
+The ``np.percentile`` 'midpoint' interpolation method fixed for exact indices
+-----------------------------------------------------------------------------
+The 'midpoint' interpolator now gives the same result as 'lower' and 'higher' when
+the two coincide. Previous behavior of 'lower' + 0.5 is fixed.
+
+``keepdims`` kwarg is passed through to user-class methods
+----------------------------------------------------------
+numpy functions that take a ``keepdims`` kwarg now pass the value
+through to the corresponding methods on ndarray sub-classes. Previously the
+``keepdims`` keyword would be silently dropped. These functions now have
+the following behavior:
+
+1. If user does not provide ``keepdims``, no keyword is passed to the underlying
+ method.
+2. Any user-provided value of ``keepdims`` is passed through as a keyword
+ argument to the method.
+
+This will raise in the case where the method does not support a
+``keepdims`` kwarg and the user explicitly passes in ``keepdims``.
+
+The following functions are changed: ``sum``, ``product``,
+``sometrue``, ``alltrue``, ``any``, ``all``, ``amax``, ``amin``,
+``prod``, ``mean``, ``std``, ``var``, ``nanmin``, ``nanmax``,
+``nansum``, ``nanprod``, ``nanmean``, ``nanmedian``, ``nanvar``,
+``nanstd``
+
+``bitwise_and`` identity changed
+--------------------------------
+The previous identity was 1, it is now -1. See entry in Improvements for
+more explanation.
+
+ma.median warns and returns nan when unmasked invalid values are encountered
+----------------------------------------------------------------------------
+Similar to unmasked median the masked median `ma.median` now emits a Runtime
+warning and returns `NaN` in slices where an unmasked `NaN` is present.
+
+Greater consistency in ``assert_almost_equal``
+----------------------------------------------
+The precision check for scalars has been changed to match that for arrays. It
+is now::
+
+ abs(actual - desired) < 1.5 * 10**(-decimal)
+
+Note that this is looser than previously documented, but agrees with the
+previous implementation used in ``assert_array_almost_equal``. Due to the
+change in implementation some very delicate tests may fail that did not
+fail before.
+
+``NoseTester`` behaviour of warnings during testing
+---------------------------------------------------
+When ``raise_warnings="develop"`` is given, all uncaught warnings will now
+be considered a test failure. Previously only selected ones were raised.
+Warnings which are not caught or raised (mostly when in release mode)
+will be shown once during the test cycle similar to the default python
+settings.
+
+``assert_warns`` and ``deprecated`` decorator more specific
+-----------------------------------------------------------
+The ``assert_warns`` function and context manager are now more specific
+to the given warning category. This increased specificity leads to them
+being handled according to the outer warning settings. This means that
+no warning may be raised in cases where a wrong category warning is given
+and ignored outside the context. Alternatively the increased specificity
+may mean that warnings that were incorrectly ignored will now be shown
+or raised. See also the new ``suppress_warnings`` context manager.
+The same is true for the ``deprecated`` decorator.
+
+C API
+-----
+No changes.
+
+
+New Features
+============
+
+Writeable keyword argument for ``as_strided``
+---------------------------------------------
+``np.lib.stride_tricks.as_strided`` now has a ``writeable``
+keyword argument. It can be set to False when no write operation
+to the returned array is expected to avoid accidental
+unpredictable writes.
+
+``axes`` keyword argument for ``rot90``
+---------------------------------------
+The ``axes`` keyword argument in ``rot90`` determines the plane in which the
+array is rotated. It defaults to ``axes=(0,1)`` as in the original function.
+
+Generalized ``flip``
+--------------------
+``flipud`` and ``fliplr`` reverse the elements of an array along axis=0 and
+axis=1 respectively. The newly added ``flip`` function reverses the elements of
+an array along any given axis.
+
+* ``np.count_nonzero`` now has an ``axis`` parameter, allowing
+ non-zero counts to be generated on more than just a flattened
+ array object.
+
+BLIS support in ``numpy.distutils``
+-----------------------------------
+Building against the BLAS implementation provided by the BLIS library is now
+supported. See the ``[blis]`` section in ``site.cfg.example`` (in the root of
+the numpy repo or source distribution).
+
+Hook in ``numpy/__init__.py`` to run distribution-specific checks
+-----------------------------------------------------------------
+Binary distributions of numpy may need to run specific hardware checks or load
+specific libraries during numpy initialization. For example, if we are
+distributing numpy with a BLAS library that requires SSE2 instructions, we
+would like to check the machine on which numpy is running does have SSE2 in
+order to give an informative error.
+
+Add a hook in ``numpy/__init__.py`` to import a ``numpy/_distributor_init.py``
+file that will remain empty (bar a docstring) in the standard numpy source,
+but that can be overwritten by people making binary distributions of numpy.
+
+New nanfunctions ``nancumsum`` and ``nancumprod`` added
+-------------------------------------------------------
+Nan-functions ``nancumsum`` and ``nancumprod`` have been added to
+compute ``cumsum`` and ``cumprod`` by ignoring nans.
+
+``np.interp`` can now interpolate complex values
+------------------------------------------------
+``np.lib.interp(x, xp, fp)`` now allows the interpolated array ``fp``
+to be complex and will interpolate at ``complex128`` precision.
+
+New polynomial evaluation function ``polyvalfromroots`` added
+-------------------------------------------------------------
+The new function ``polyvalfromroots`` evaluates a polynomial at given points
+from the roots of the polynomial. This is useful for higher order polynomials,
+where expansion into polynomial coefficients is inaccurate at machine
+precision.
+
+New array creation function ``geomspace`` added
+-----------------------------------------------
+The new function ``geomspace`` generates a geometric sequence. It is similar
+to ``logspace``, but with start and stop specified directly:
+``geomspace(start, stop)`` behaves the same as
+``logspace(log10(start), log10(stop))``.
+
+New context manager for testing warnings
+----------------------------------------
+A new context manager ``suppress_warnings`` has been added to the testing
+utils. This context manager is designed to help reliably test warnings.
+Specifically to reliably filter/ignore warnings. Ignoring warnings
+by using an "ignore" filter in Python versions before 3.4.x can quickly
+result in these (or similar) warnings not being tested reliably.
+
+The context manager allows to filter (as well as record) warnings similar
+to the ``catch_warnings`` context, but allows for easier specificity.
+Also printing warnings that have not been filtered or nesting the
+context manager will work as expected. Additionally, it is possible
+to use the context manager as a decorator which can be useful when
+multiple tests give need to hide the same warning.
+
+New masked array functions ``ma.convolve`` and ``ma.correlate`` added
+---------------------------------------------------------------------
+These functions wrapped the non-masked versions, but propagate through masked
+values. There are two different propagation modes. The default causes masked
+values to contaminate the result with masks, but the other mode only outputs
+masks if there is no alternative.
+
+New ``float_power`` ufunc
+-------------------------
+The new ``float_power`` ufunc is like the ``power`` function except all
+computation is done in a minimum precision of float64. There was a long
+discussion on the numpy mailing list of how to treat integers to negative
+integer powers and a popular proposal was that the ``__pow__`` operator should
+always return results of at least float64 precision. The ``float_power``
+function implements that option. Note that it does not support object arrays.
+
+``np.loadtxt`` now supports a single integer as ``usecol`` argument
+-------------------------------------------------------------------
+Instead of using ``usecol=(n,)`` to read the nth column of a file
+it is now allowed to use ``usecol=n``. Also the error message is
+more user friendly when a non-integer is passed as a column index.
+
+Improved automated bin estimators for ``histogram``
+---------------------------------------------------
+Added 'doane' and 'sqrt' estimators to ``histogram`` via the ``bins``
+argument. Added support for range-restricted histograms with automated
+bin estimation.
+
+``np.roll`` can now roll multiple axes at the same time
+-------------------------------------------------------
+The ``shift`` and ``axis`` arguments to ``roll`` are now broadcast against each
+other, and each specified axis is shifted accordingly.
+
+The ``__complex__`` method has been implemented for the ndarrays
+----------------------------------------------------------------
+Calling ``complex()`` on a size 1 array will now cast to a python
+complex.
+
+``pathlib.Path`` objects now supported
+--------------------------------------
+The standard ``np.load``, ``np.save``, ``np.loadtxt``, ``np.savez``, and similar
+functions can now take ``pathlib.Path`` objects as an argument instead of a
+filename or open file object.
+
+New ``bits`` attribute for ``np.finfo``
+---------------------------------------
+This makes ``np.finfo`` consistent with ``np.iinfo`` which already has that
+attribute.
+
+New ``signature`` argument to ``np.vectorize``
+----------------------------------------------
+This argument allows for vectorizing user defined functions with core
+dimensions, in the style of NumPy's
+:ref:`generalized universal functions<c-api.generalized-ufuncs>`. This allows
+for vectorizing a much broader class of functions. For example, an arbitrary
+distance metric that combines two vectors to produce a scalar could be
+vectorized with ``signature='(n),(n)->()'``. See ``np.vectorize`` for full
+details.
+
+Emit py3kwarnings for division of integer arrays
+------------------------------------------------
+To help people migrate their code bases from Python 2 to Python 3, the
+python interpreter has a handy option -3, which issues warnings at runtime.
+One of its warnings is for integer division::
+
+ $ python -3 -c "2/3"
+
+ -c:1: DeprecationWarning: classic int division
+
+In Python 3, the new integer division semantics also apply to numpy arrays.
+With this version, numpy will emit a similar warning::
+
+ $ python -3 -c "import numpy as np; np.array(2)/np.array(3)"
+
+ -c:1: DeprecationWarning: numpy: classic int division
+
+numpy.sctypes now includes bytes on Python3 too
+-----------------------------------------------
+Previously, it included str (bytes) and unicode on Python2, but only str
+(unicode) on Python3.
+
+
+Improvements
+============
+
+``bitwise_and`` identity changed
+--------------------------------
+The previous identity was 1 with the result that all bits except the LSB were
+masked out when the reduce method was used. The new identity is -1, which
+should work properly on twos complement machines as all bits will be set to
+one.
+
+Generalized Ufuncs will now unlock the GIL
+------------------------------------------
+Generalized Ufuncs, including most of the linalg module, will now unlock
+the Python global interpreter lock.
+
+Caches in `np.fft` are now bounded in total size and item count
+---------------------------------------------------------------
+The caches in `np.fft` that speed up successive FFTs of the same length can no
+longer grow without bounds. They have been replaced with LRU (least recently
+used) caches that automatically evict no longer needed items if either the
+memory size or item count limit has been reached.
+
+Improved handling of zero-width string/unicode dtypes
+-----------------------------------------------------
+Fixed several interfaces that explicitly disallowed arrays with zero-width
+string dtypes (i.e. ``dtype('S0')`` or ``dtype('U0')``, and fixed several
+bugs where such dtypes were not handled properly. In particular, changed
+``ndarray.__new__`` to not implicitly convert ``dtype('S0')`` to
+``dtype('S1')`` (and likewise for unicode) when creating new arrays.
+
+Integer ufuncs vectorized with AVX2
+-----------------------------------
+If the cpu supports it at runtime the basic integer ufuncs now use AVX2
+instructions. This feature is currently only available when compiled with GCC.
+
+Order of operations optimization in ``np.einsum``
+--------------------------------------------------
+``np.einsum`` now supports the ``optimize`` argument which will optimize the
+order of contraction. For example, ``np.einsum`` would complete the chain dot
+example ``np.einsum(‘ij,jk,kl->il’, a, b, c)`` in a single pass which would
+scale like ``N^4``; however, when ``optimize=True`` ``np.einsum`` will create
+an intermediate array to reduce this scaling to ``N^3`` or effectively
+``np.dot(a, b).dot(c)``. Usage of intermediate tensors to reduce scaling has
+been applied to the general einsum summation notation. See ``np.einsum_path``
+for more details.
+
+quicksort has been changed to an introsort
+------------------------------------------
+The quicksort kind of ``np.sort`` and ``np.argsort`` is now an introsort which
+is regular quicksort but changing to a heapsort when not enough progress is
+made. This retains the good quicksort performance while changing the worst case
+runtime from ``O(N^2)`` to ``O(N*log(N))``.
+
+``ediff1d`` improved performance and subclass handling
+------------------------------------------------------
+The ediff1d function uses an array instead on a flat iterator for the
+subtraction. When to_begin or to_end is not None, the subtraction is performed
+in place to eliminate a copy operation. A side effect is that certain
+subclasses are handled better, namely astropy.Quantity, since the complete
+array is created, wrapped, and then begin and end values are set, instead of
+using concatenate.
+
+Improved precision of ``ndarray.mean`` for float16 arrays
+---------------------------------------------------------
+The computation of the mean of float16 arrays is now carried out in float32 for
+improved precision. This should be useful in packages such as Theano
+where the precision of float16 is adequate and its smaller footprint is
+desirable.
+
+
+Changes
+=======
+
+All array-like methods are now called with keyword arguments in fromnumeric.py
+------------------------------------------------------------------------------
+Internally, many array-like methods in fromnumeric.py were being called with
+positional arguments instead of keyword arguments as their external signatures
+were doing. This caused a complication in the downstream 'pandas' library
+that encountered an issue with 'numpy' compatibility. Now, all array-like
+methods in this module are called with keyword arguments instead.
+
+Operations on np.memmap objects return numpy arrays in most cases
+-----------------------------------------------------------------
+Previously operations on a memmap object would misleadingly return a memmap
+instance even if the result was actually not memmapped. For example,
+``arr + 1`` or ``arr + arr`` would return memmap instances, although no memory
+from the output array is memmapped. Version 1.12 returns ordinary numpy arrays
+from these operations.
+
+Also, reduction of a memmap (e.g. ``.sum(axis=None``) now returns a numpy
+scalar instead of a 0d memmap.
+
+stacklevel of warnings increased
+--------------------------------
+The stacklevel for python based warnings was increased so that most warnings
+will report the offending line of the user code instead of the line the
+warning itself is given. Passing of stacklevel is now tested to ensure that
+new warnings will receive the ``stacklevel`` argument.
+
+This causes warnings with the "default" or "module" filter to be shown once
+for every offending user code line or user module instead of only once. On
+python versions before 3.4, this can cause warnings to appear that were falsely
+ignored before, which may be surprising especially in test suits.
--- /dev/null
+==========================
+NumPy 1.12.1 Release Notes
+==========================
+
+NumPy 1.12.1 supports Python 2.7 and 3.4 - 3.6 and fixes bugs and regressions
+found in NumPy 1.12.0. In particular, the regression in f2py constant parsing
+is fixed. Wheels for Linux, Windows, and OSX can be found on pypi,
+
+Bugs Fixed
+==========
+
+* BUG: Fix wrong future nat warning and equiv type logic error...
+* BUG: Fix wrong masked median for some special cases
+* DOC: Place np.average in inline code
+* TST: Work around isfinite inconsistency on i386
+* BUG: Guard against replacing constants without '_' spec in f2py.
+* BUG: Fix mean for float 16 non-array inputs for 1.12
+* BUG: Fix calling python api with error set and minor leaks for...
+* BUG: Make iscomplexobj compatible with custom dtypes again
+* BUG: Fix undefined behaviour induced by bad __array_wrap__
+* BUG: Fix MaskedArray.__setitem__
+* BUG: PPC64el machines are POWER for Fortran in f2py
+* BUG: Look up methods on MaskedArray in `_frommethod`
+* BUG: Remove extra digit in binary_repr at limit
+* BUG: Fix deepcopy regression for empty arrays.
+* BUG: Fix ma.median for empty ndarrays
--- /dev/null
+==========================
+NumPy 1.13.0 Release Notes
+==========================
+
+This release supports Python 2.7 and 3.4 - 3.6.
+
+
+Highlights
+==========
+
+ * Operations like ``a + b + c`` will reuse temporaries on some platforms,
+ resulting in less memory use and faster execution.
+ * Inplace operations check if inputs overlap outputs and create temporaries
+ to avoid problems.
+ * New ``__array_ufunc__`` attribute provides improved ability for classes to
+ override default ufunc behavior.
+ * New ``np.block`` function for creating blocked arrays.
+
+
+New functions
+=============
+
+* New ``np.positive`` ufunc.
+* New ``np.divmod`` ufunc provides more efficient divmod.
+* New ``np.isnat`` ufunc tests for NaT special values.
+* New ``np.heaviside`` ufunc computes the Heaviside function.
+* New ``np.isin`` function, improves on ``in1d``.
+* New ``np.block`` function for creating blocked arrays.
+* New ``PyArray_MapIterArrayCopyIfOverlap`` added to NumPy C-API.
+
+See below for details.
+
+
+Deprecations
+============
+
+* Calling ``np.fix``, ``np.isposinf``, and ``np.isneginf`` with ``f(x, y=out)``
+ is deprecated - the argument should be passed as ``f(x, out=out)``, which
+ matches other ufunc-like interfaces.
+* Use of the C-API ``NPY_CHAR`` type number deprecated since version 1.7 will
+ now raise deprecation warnings at runtime. Extensions built with older f2py
+ versions need to be recompiled to remove the warning.
+* ``np.ma.argsort``, ``np.ma.minimum.reduce``, and ``np.ma.maximum.reduce``
+ should be called with an explicit `axis` argument when applied to arrays with
+ more than 2 dimensions, as the default value of this argument (``None``) is
+ inconsistent with the rest of numpy (``-1``, ``0``, and ``0``, respectively).
+* ``np.ma.MaskedArray.mini`` is deprecated, as it almost duplicates the
+ functionality of ``np.MaskedArray.min``. Exactly equivalent behaviour
+ can be obtained with ``np.ma.minimum.reduce``.
+* The single-argument form of ``np.ma.minimum`` and ``np.ma.maximum`` is
+ deprecated. ``np.maximum``. ``np.ma.minimum(x)`` should now be spelt
+ ``np.ma.minimum.reduce(x)``, which is consistent with how this would be done
+ with ``np.minimum``.
+* Calling ``ndarray.conjugate`` on non-numeric dtypes is deprecated (it
+ should match the behavior of ``np.conjugate``, which throws an error).
+* Calling ``expand_dims`` when the ``axis`` keyword does not satisfy
+ ``-a.ndim - 1 <= axis <= a.ndim``, where ``a`` is the array being reshaped,
+ is deprecated.
+
+
+Future Changes
+==============
+
+* Assignment between structured arrays with different field names will change
+ in NumPy 1.14. Previously, fields in the dst would be set to the value of the
+ identically-named field in the src. In numpy 1.14 fields will instead be
+ assigned 'by position': The n-th field of the dst will be set to the n-th
+ field of the src array. Note that the ``FutureWarning`` raised in NumPy 1.12
+ incorrectly reported this change as scheduled for NumPy 1.13 rather than
+ NumPy 1.14.
+
+
+Build System Changes
+====================
+
+* ``numpy.distutils`` now automatically determines C-file dependencies with
+ GCC compatible compilers.
+
+
+Compatibility notes
+===================
+
+Error type changes
+------------------
+
+* ``numpy.hstack()`` now throws ``ValueError`` instead of ``IndexError`` when
+ input is empty.
+* Functions taking an axis argument, when that argument is out of range, now
+ throw ``np.AxisError`` instead of a mixture of ``IndexError`` and
+ ``ValueError``. For backwards compatibility, ``AxisError`` subclasses both of
+ these.
+
+Tuple object dtypes
+-------------------
+
+Support has been removed for certain obscure dtypes that were unintentionally
+allowed, of the form ``(old_dtype, new_dtype)``, where either of the dtypes
+is or contains the ``object`` dtype. As an exception, dtypes of the form
+``(object, [('name', object)])`` are still supported due to evidence of
+existing use.
+
+DeprecationWarning to error
+---------------------------
+See Changes section for more detail.
+
+* ``partition``, TypeError when non-integer partition index is used.
+* ``NpyIter_AdvancedNew``, ValueError when ``oa_ndim == 0`` and ``op_axes`` is NULL
+* ``negative(bool_)``, TypeError when negative applied to booleans.
+* ``subtract(bool_, bool_)``, TypeError when subtracting boolean from boolean.
+* ``np.equal, np.not_equal``, object identity doesn't override failed comparison.
+* ``np.equal, np.not_equal``, object identity doesn't override non-boolean comparison.
+* Deprecated boolean indexing behavior dropped. See Changes below for details.
+* Deprecated ``np.alterdot()`` and ``np.restoredot()`` removed.
+
+FutureWarning to changed behavior
+---------------------------------
+See Changes section for more detail.
+
+* ``numpy.average`` preserves subclasses
+* ``array == None`` and ``array != None`` do element-wise comparison.
+* ``np.equal, np.not_equal``, object identity doesn't override comparison result.
+
+dtypes are now always true
+--------------------------
+
+Previously ``bool(dtype)`` would fall back to the default python
+implementation, which checked if ``len(dtype) > 0``. Since ``dtype`` objects
+implement ``__len__`` as the number of record fields, ``bool`` of scalar dtypes
+would evaluate to ``False``, which was unintuitive. Now ``bool(dtype) == True``
+for all dtypes.
+
+``__getslice__`` and ``__setslice__`` are no longer needed in ``ndarray`` subclasses
+------------------------------------------------------------------------------------
+When subclassing np.ndarray in Python 2.7, it is no longer _necessary_ to
+implement ``__*slice__`` on the derived class, as ``__*item__`` will intercept
+these calls correctly.
+
+Any code that did implement these will work exactly as before. Code that
+invokes``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will
+now issue a DeprecationWarning - ``.__getitem__(slice(start, end))`` should be
+used instead.
+
+Indexing MaskedArrays/Constants with ``...`` (ellipsis) now returns MaskedArray
+-------------------------------------------------------------------------------
+This behavior mirrors that of np.ndarray, and accounts for nested arrays in
+MaskedArrays of object dtype, and ellipsis combined with other forms of
+indexing.
+
+C API changes
+=============
+
+GUfuncs on empty arrays and NpyIter axis removal
+------------------------------------------------
+It is now allowed to remove a zero-sized axis from NpyIter. Which may mean
+that code removing axes from NpyIter has to add an additional check when
+accessing the removed dimensions later on.
+
+The largest followup change is that gufuncs are now allowed to have zero-sized
+inner dimensions. This means that a gufunc now has to anticipate an empty inner
+dimension, while this was never possible and an error raised instead.
+
+For most gufuncs no change should be necessary. However, it is now possible
+for gufuncs with a signature such as ``(..., N, M) -> (..., M)`` to return
+a valid result if ``N=0`` without further wrapping code.
+
+``PyArray_MapIterArrayCopyIfOverlap`` added to NumPy C-API
+----------------------------------------------------------
+Similar to ``PyArray_MapIterArray`` but with an additional ``copy_if_overlap``
+argument. If ``copy_if_overlap != 0``, checks if input has memory overlap with
+any of the other arrays and make copies as appropriate to avoid problems if the
+input is modified during the iteration. See the documentation for more complete
+documentation.
+
+
+New Features
+============
+
+``__array_ufunc__`` added
+-------------------------
+This is the renamed and redesigned ``__numpy_ufunc__``. Any class, ndarray
+subclass or not, can define this method or set it to ``None`` in order to
+override the behavior of NumPy's ufuncs. This works quite similarly to Python's
+``__mul__`` and other binary operation routines. See the documentation for a
+more detailed description of the implementation and behavior of this new
+option. The API is provisional, we do not yet guarantee backward compatibility
+as modifications may be made pending feedback. See `NEP 13`_ and
+documentation_ for more details.
+
+.. _`NEP 13`: http://www.numpy.org/neps/nep-0013-ufunc-overrides.html
+.. _documentation: https://github.com/numpy/numpy/blob/master/doc/source/reference/arrays.classes.rst
+
+New ``positive`` ufunc
+----------------------
+This ufunc corresponds to unary `+`, but unlike `+` on an ndarray it will raise
+an error if array values do not support numeric operations.
+
+New ``divmod`` ufunc
+--------------------
+This ufunc corresponds to the Python builtin `divmod`, and is used to implement
+`divmod` when called on numpy arrays. ``np.divmod(x, y)`` calculates a result
+equivalent to ``(np.floor_divide(x, y), np.remainder(x, y))`` but is
+approximately twice as fast as calling the functions separately.
+
+``np.isnat`` ufunc tests for NaT special datetime and timedelta values
+----------------------------------------------------------------------
+The new ufunc ``np.isnat`` finds the positions of special NaT values
+within datetime and timedelta arrays. This is analogous to ``np.isnan``.
+
+``np.heaviside`` ufunc computes the Heaviside function
+------------------------------------------------------
+The new function ``np.heaviside(x, h0)`` (a ufunc) computes the Heaviside
+function:
+
+.. code::
+
+ { 0 if x < 0,
+ heaviside(x, h0) = { h0 if x == 0,
+ { 1 if x > 0.
+
+``np.block`` function for creating blocked arrays
+-------------------------------------------------
+Add a new ``block`` function to the current stacking functions ``vstack``,
+``hstack``, and ``stack``. This allows concatenation across multiple axes
+simultaneously, with a similar syntax to array creation, but where elements
+can themselves be arrays. For instance::
+
+ >>> A = np.eye(2) * 2
+ >>> B = np.eye(3) * 3
+ >>> np.block([
+ ... [A, np.zeros((2, 3))],
+ ... [np.ones((3, 2)), B ]
+ ... ])
+ array([[ 2., 0., 0., 0., 0.],
+ [ 0., 2., 0., 0., 0.],
+ [ 1., 1., 3., 0., 0.],
+ [ 1., 1., 0., 3., 0.],
+ [ 1., 1., 0., 0., 3.]])
+
+While primarily useful for block matrices, this works for arbitrary dimensions
+of arrays.
+
+It is similar to Matlab's square bracket notation for creating block matrices.
+
+``isin`` function, improving on ``in1d``
+----------------------------------------
+The new function ``isin`` tests whether each element of an N-dimensonal
+array is present anywhere within a second array. It is an enhancement
+of ``in1d`` that preserves the shape of the first array.
+
+Temporary elision
+-----------------
+On platforms providing the ``backtrace`` function NumPy will try to avoid
+creating temporaries in expression involving basic numeric types.
+For example ``d = a + b + c`` is transformed to ``d = a + b; d += c`` which can
+improve performance for large arrays as less memory bandwidth is required to
+perform the operation.
+
+``axes`` argument for ``unique``
+--------------------------------
+In an N-dimensional array, the user can now choose the axis along which to look
+for duplicate N-1-dimensional elements using ``numpy.unique``. The original
+behaviour is recovered if ``axis=None`` (default).
+
+``np.gradient`` now supports unevenly spaced data
+-------------------------------------------------
+Users can now specify a not-constant spacing for data.
+In particular ``np.gradient`` can now take:
+
+1. A single scalar to specify a sample distance for all dimensions.
+2. N scalars to specify a constant sample distance for each dimension.
+ i.e. ``dx``, ``dy``, ``dz``, ...
+3. N arrays to specify the coordinates of the values along each dimension of F.
+ The length of the array must match the size of the corresponding dimension
+4. Any combination of N scalars/arrays with the meaning of 2. and 3.
+
+This means that, e.g., it is now possible to do the following::
+
+ >>> f = np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float_)
+ >>> dx = 2.
+ >>> y = [1., 1.5, 3.5]
+ >>> np.gradient(f, dx, y)
+ [array([[ 1. , 1. , -0.5], [ 1. , 1. , -0.5]]),
+ array([[ 2. , 2. , 2. ], [ 2. , 1.7, 0.5]])]
+
+Support for returning arrays of arbitrary dimensions in ``apply_along_axis``
+----------------------------------------------------------------------------
+Previously, only scalars or 1D arrays could be returned by the function passed
+to ``apply_along_axis``. Now, it can return an array of any dimensionality
+(including 0D), and the shape of this array replaces the axis of the array
+being iterated over.
+
+``.ndim`` property added to ``dtype`` to complement ``.shape``
+--------------------------------------------------------------
+For consistency with ``ndarray`` and ``broadcast``, ``d.ndim`` is a shorthand
+for ``len(d.shape)``.
+
+Support for tracemalloc in Python 3.6
+-------------------------------------
+NumPy now supports memory tracing with tracemalloc_ module of Python 3.6 or
+newer. Memory allocations from NumPy are placed into the domain defined by
+``numpy.lib.tracemalloc_domain``.
+Note that NumPy allocation will not show up in tracemalloc_ of earlier Python
+versions.
+
+.. _tracemalloc: https://docs.python.org/3/library/tracemalloc.html
+
+NumPy may be built with relaxed stride checking debugging
+---------------------------------------------------------
+Setting NPY_RELAXED_STRIDES_DEBUG=1 in the environment when relaxed stride
+checking is enabled will cause NumPy to be compiled with the affected strides
+set to the maximum value of npy_intp in order to help detect invalid usage of
+the strides in downstream projects. When enabled, invalid usage often results
+in an error being raised, but the exact type of error depends on the details of
+the code. TypeError and OverflowError have been observed in the wild.
+
+It was previously the case that this option was disabled for releases and
+enabled in master and changing between the two required editing the code. It is
+now disabled by default but can be enabled for test builds.
+
+
+Improvements
+============
+
+Ufunc behavior for overlapping inputs
+-------------------------------------
+
+Operations where ufunc input and output operands have memory overlap
+produced undefined results in previous NumPy versions, due to data
+dependency issues. In NumPy 1.13.0, results from such operations are
+now defined to be the same as for equivalent operations where there is
+no memory overlap.
+
+Operations affected now make temporary copies, as needed to eliminate
+data dependency. As detecting these cases is computationally
+expensive, a heuristic is used, which may in rare cases result to
+needless temporary copies. For operations where the data dependency
+is simple enough for the heuristic to analyze, temporary copies will
+not be made even if the arrays overlap, if it can be deduced copies
+are not necessary. As an example,``np.add(a, b, out=a)`` will not
+involve copies.
+
+To illustrate a previously undefined operation::
+
+ >>> x = np.arange(16).astype(float)
+ >>> np.add(x[1:], x[:-1], out=x[1:])
+
+In NumPy 1.13.0 the last line is guaranteed to be equivalent to::
+
+ >>> np.add(x[1:].copy(), x[:-1].copy(), out=x[1:])
+
+A similar operation with simple non-problematic data dependence is::
+
+ >>> x = np.arange(16).astype(float)
+ >>> np.add(x[1:], x[:-1], out=x[:-1])
+
+It will continue to produce the same results as in previous NumPy
+versions, and will not involve unnecessary temporary copies.
+
+The change applies also to in-place binary operations, for example::
+
+ >>> x = np.random.rand(500, 500)
+ >>> x += x.T
+
+This statement is now guaranteed to be equivalent to ``x[...] = x + x.T``,
+whereas in previous NumPy versions the results were undefined.
+
+Partial support for 64-bit f2py extensions with MinGW
+-----------------------------------------------------
+Extensions that incorporate Fortran libraries can now be built using the free
+MinGW_ toolset, also under Python 3.5. This works best for extensions that only
+do calculations and uses the runtime modestly (reading and writing from files,
+for instance). Note that this does not remove the need for Mingwpy; if you make
+extensive use of the runtime, you will most likely run into issues_. Instead,
+it should be regarded as a band-aid until Mingwpy is fully functional.
+
+Extensions can also be compiled using the MinGW toolset using the runtime
+library from the (moveable) WinPython 3.4 distribution, which can be useful for
+programs with a PySide1/Qt4 front-end.
+
+.. _MinGW: https://sf.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Personal%20Builds/mingw-builds/6.2.0/threads-win32/seh/
+
+.. _issues: https://mingwpy.github.io/issues.html
+
+Performance improvements for ``packbits`` and ``unpackbits``
+------------------------------------------------------------
+The functions ``numpy.packbits`` with boolean input and ``numpy.unpackbits`` have
+been optimized to be a significantly faster for contiguous data.
+
+Fix for PPC long double floating point information
+--------------------------------------------------
+In previous versions of NumPy, the ``finfo`` function returned invalid
+information about the `double double`_ format of the ``longdouble`` float type
+on Power PC (PPC). The invalid values resulted from the failure of the NumPy
+algorithm to deal with the variable number of digits in the significand
+that are a feature of `PPC long doubles`. This release by-passes the failing
+algorithm by using heuristics to detect the presence of the PPC double double
+format. A side-effect of using these heuristics is that the ``finfo``
+function is faster than previous releases.
+
+.. _PPC long doubles: https://www.ibm.com/support/knowledgecenter/en/ssw_aix_71/com.ibm.aix.genprogc/128bit_long_double_floating-point_datatype.htm
+
+.. _double double: https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
+
+Better default repr for ``ndarray`` subclasses
+----------------------------------------------
+Subclasses of ndarray with no ``repr`` specialization now correctly indent
+their data and type lines.
+
+More reliable comparisons of masked arrays
+------------------------------------------
+Comparisons of masked arrays were buggy for masked scalars and failed for
+structured arrays with dimension higher than one. Both problems are now
+solved. In the process, it was ensured that in getting the result for a
+structured array, masked fields are properly ignored, i.e., the result is equal
+if all fields that are non-masked in both are equal, thus making the behaviour
+identical to what one gets by comparing an unstructured masked array and then
+doing ``.all()`` over some axis.
+
+np.matrix with booleans elements can now be created using the string syntax
+---------------------------------------------------------------------------
+``np.matrix`` failed whenever one attempts to use it with booleans, e.g.,
+``np.matrix('True')``. Now, this works as expected.
+
+More ``linalg`` operations now accept empty vectors and matrices
+----------------------------------------------------------------
+All of the following functions in ``np.linalg`` now work when given input
+arrays with a 0 in the last two dimensions: ``det``, ``slogdet``, ``pinv``,
+``eigvals``, ``eigvalsh``, ``eig``, ``eigh``.
+
+Bundled version of LAPACK is now 3.2.2
+--------------------------------------
+NumPy comes bundled with a minimal implementation of lapack for systems without
+a lapack library installed, under the name of ``lapack_lite``. This has been
+upgraded from LAPACK 3.0.0 (June 30, 1999) to LAPACK 3.2.2 (June 30, 2010). See
+the `LAPACK changelogs`_ for details on the all the changes this entails.
+
+While no new features are exposed through ``numpy``, this fixes some bugs
+regarding "workspace" sizes, and in some places may use faster algorithms.
+
+.. _`LAPACK changelogs`: http://www.netlib.org/lapack/release_notes.html#_4_history_of_lapack_releases
+
+``reduce`` of ``np.hypot.reduce`` and ``np.logical_xor`` allowed in more cases
+------------------------------------------------------------------------------
+This now works on empty arrays, returning 0, and can reduce over multiple axes.
+Previously, a ``ValueError`` was thrown in these cases.
+
+Better ``repr`` of object arrays
+--------------------------------
+Object arrays that contain themselves no longer cause a recursion error.
+
+Object arrays that contain ``list`` objects are now printed in a way that makes
+clear the difference between a 2d object array, and a 1d object array of lists.
+
+Changes
+=======
+
+``argsort`` on masked arrays takes the same default arguments as ``sort``
+-------------------------------------------------------------------------
+By default, ``argsort`` now places the masked values at the end of the sorted
+array, in the same way that ``sort`` already did. Additionally, the
+``end_with`` argument is added to ``argsort``, for consistency with ``sort``.
+Note that this argument is not added at the end, so breaks any code that
+passed ``fill_value`` as a positional argument.
+
+``average`` now preserves subclasses
+------------------------------------
+For ndarray subclasses, ``numpy.average`` will now return an instance of the
+subclass, matching the behavior of most other NumPy functions such as ``mean``.
+As a consequence, also calls that returned a scalar may now return a subclass
+array scalar.
+
+``array == None`` and ``array != None`` do element-wise comparison
+------------------------------------------------------------------
+Previously these operations returned scalars ``False`` and ``True`` respectively.
+
+``np.equal, np.not_equal`` for object arrays ignores object identity
+--------------------------------------------------------------------
+Previously, these functions always treated identical objects as equal. This had
+the effect of overriding comparison failures, comparison of objects that did
+not return booleans, such as np.arrays, and comparison of objects where the
+results differed from object identity, such as NaNs.
+
+Boolean indexing changes
+------------------------
+* Boolean array-likes (such as lists of python bools) are always treated as
+ boolean indexes.
+
+* Boolean scalars (including python ``True``) are legal boolean indexes and
+ never treated as integers.
+
+* Boolean indexes must match the dimension of the axis that they index.
+
+* Boolean indexes used on the lhs of an assignment must match the dimensions of
+ the rhs.
+
+* Boolean indexing into scalar arrays return a new 1-d array. This means that
+ ``array(1)[array(True)]`` gives ``array([1])`` and not the original array.
+
+``np.random.multivariate_normal`` behavior with bad covariance matrix
+---------------------------------------------------------------------
+
+It is now possible to adjust the behavior the function will have when dealing
+with the covariance matrix by using two new keyword arguments:
+
+* ``tol`` can be used to specify a tolerance to use when checking that
+ the covariance matrix is positive semidefinite.
+
+* ``check_valid`` can be used to configure what the function will do in the
+ presence of a matrix that is not positive semidefinite. Valid options are
+ ``ignore``, ``warn`` and ``raise``. The default value, ``warn`` keeps the
+ the behavior used on previous releases.
+
+``assert_array_less`` compares ``np.inf`` and ``-np.inf`` now
+-------------------------------------------------------------
+Previously, ``np.testing.assert_array_less`` ignored all infinite values. This
+is not the expected behavior both according to documentation and intuitively.
+Now, -inf < x < inf is considered ``True`` for any real number x and all
+other cases fail.
+
+``assert_array_`` and masked arrays ``assert_equal`` hide less warnings
+-----------------------------------------------------------------------
+Some warnings that were previously hidden by the ``assert_array_``
+functions are not hidden anymore. In most cases the warnings should be
+correct and, should they occur, will require changes to the tests using
+these functions.
+For the masked array ``assert_equal`` version, warnings may occur when
+comparing NaT. The function presently does not handle NaT or NaN
+specifically and it may be best to avoid it at this time should a warning
+show up due to this change.
+
+``offset`` attribute value in ``memmap`` objects
+------------------------------------------------
+The ``offset`` attribute in a ``memmap`` object is now set to the
+offset into the file. This is a behaviour change only for offsets
+greater than ``mmap.ALLOCATIONGRANULARITY``.
+
+``np.real`` and ``np.imag`` return scalars for scalar inputs
+------------------------------------------------------------
+Previously, ``np.real`` and ``np.imag`` used to return array objects when
+provided a scalar input, which was inconsistent with other functions like
+``np.angle`` and ``np.conj``.
+
+The polynomial convenience classes cannot be passed to ufuncs
+-------------------------------------------------------------
+The ABCPolyBase class, from which the convenience classes are derived, sets
+``__array_ufun__ = None`` in order of opt out of ufuncs. If a polynomial
+convenience class instance is passed as an argument to a ufunc, a ``TypeError``
+will now be raised.
+
+Output arguments to ufuncs can be tuples also for ufunc methods
+---------------------------------------------------------------
+For calls to ufuncs, it was already possible, and recommended, to use an
+``out`` argument with a tuple for ufuncs with multiple outputs. This has now
+been extended to output arguments in the ``reduce``, ``accumulate``, and
+``reduceat`` methods. This is mostly for compatibility with ``__array_ufunc``;
+there are no ufuncs yet that have more than one output.
--- /dev/null
+==========================
+NumPy 1.13.1 Release Notes
+==========================
+
+This is a bugfix release for problems found in 1.13.0. The major changes are
+fixes for the new memory overlap detection and temporary elision as well as
+reversion of the removal of the boolean binary ``-`` operator. Users of 1.13.0
+should upgrade.
+
+Thr Python versions supported are 2.7 and 3.4 - 3.6. Note that the Python 3.6
+wheels available from PIP are built against 3.6.1, hence will not work when
+used with 3.6.0 due to Python bug 29943_. NumPy 1.13.2 will be released shortly
+after Python 3.6.2 is out to fix that problem. If you are using 3.6.0 the
+workaround is to upgrade to 3.6.1 or use an earlier Python version.
+
+.. _29943: https://bugs.python.org/issue29943
+
+
+Pull requests merged
+====================
+A total of 19 pull requests were merged for this release.
+
+* #9240 DOC: BLD: fix lots of Sphinx warnings/errors.
+* #9255 Revert "DEP: Raise TypeError for subtract(bool, bool)."
+* #9261 BUG: don't elide into readonly and updateifcopy temporaries for...
+* #9262 BUG: fix missing keyword rename for common block in numpy.f2py
+* #9263 BUG: handle resize of 0d array
+* #9267 DOC: update f2py front page and some doc build metadata.
+* #9299 BUG: Fix Intel compilation on Unix.
+* #9317 BUG: fix wrong ndim used in empty where check
+* #9319 BUG: Make extensions compilable with MinGW on Py2.7
+* #9339 BUG: Prevent crash if ufunc doc string is null
+* #9340 BUG: umath: un-break ufunc where= when no out= is given
+* #9371 DOC: Add isnat/positive ufunc to documentation
+* #9372 BUG: Fix error in fromstring function from numpy.core.records...
+* #9373 BUG: ')' is printed at the end pointer of the buffer in numpy.f2py.
+* #9374 DOC: Create NumPy 1.13.1 release notes.
+* #9376 BUG: Prevent hang traversing ufunc userloop linked list
+* #9377 DOC: Use x1 and x2 in the heaviside docstring.
+* #9378 DOC: Add $PARAMS to the isnat docstring
+* #9379 DOC: Update the 1.13.1 release notes
+
+
+Contributors
+============
+A total of 12 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Andras Deak +
+* Bob Eldering +
+* Charles Harris
+* Daniel Hrisca +
+* Eric Wieser
+* Joshua Leahy +
+* Julian Taylor
+* Michael Seifert
+* Pauli Virtanen
+* Ralf Gommers
+* Roland Kaufmann
+* Warren Weckesser
--- /dev/null
+==========================
+NumPy 1.13.2 Release Notes
+==========================
+
+This is a bugfix release for some problems found since 1.13.1. The most
+important fixes are for CVE-2017-12852 and temporary elision. Users of earlier
+versions of 1.13 should upgrade.
+
+The Python versions supported are 2.7 and 3.4 - 3.6. The Python 3.6 wheels
+available from PIP are built with Python 3.6.2 and should be compatible with
+all previous versions of Python 3.6. The Windows wheels are now built
+with OpenBlas instead ATLAS, which should improve the performance of the linear
+algebra functions.
+
+Contributors
+============
+
+A total of 12 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Brandon Carter
+* Charles Harris
+* Eric Wieser
+* Iryna Shcherbina +
+* James Bourbeau +
+* Jonathan Helmus
+* Julian Taylor
+* Matti Picus
+* Michael Lamparski +
+* Michael Seifert
+* Ralf Gommers
+
+Pull requests merged
+====================
+
+A total of 20 pull requests were merged for this release.
+
+* #9390 BUG: Return the poly1d coefficients array directly
+* #9555 BUG: Fix regression in 1.13.x in distutils.mingw32ccompiler.
+* #9556 BUG: Fix true_divide when dtype=np.float64 specified.
+* #9557 DOC: Fix some rst markup in numpy/doc/basics.py.
+* #9558 BLD: Remove -xhost flag from IntelFCompiler.
+* #9559 DOC: Removes broken docstring example (source code, png, pdf)...
+* #9580 BUG: Add hypot and cabs functions to WIN32 blacklist.
+* #9732 BUG: Make scalar function elision check if temp is writeable.
+* #9736 BUG: Various fixes to np.gradient
+* #9742 BUG: Fix np.pad for CVE-2017-12852
+* #9744 BUG: Check for exception in sort functions, add tests
+* #9745 DOC: Add whitespace after "versionadded::" directive so it actually...
+* #9746 BUG: Memory leak in np.dot of size 0
+* #9747 BUG: Adjust gfortran version search regex
+* #9757 BUG: Cython 0.27 breaks NumPy on Python 3.
+* #9764 BUG: Ensure `_npy_scaled_cexp{,f,l}` is defined when needed.
+* #9765 BUG: PyArray_CountNonzero does not check for exceptions
+* #9766 BUG: Fixes histogram monotonicity check for unsigned bin values
+* #9767 BUG: Ensure consistent result dtype of count_nonzero
+* #9771 BUG, MAINT: Fix mtrand for Cython 0.27.
--- /dev/null
+==========================
+NumPy 1.13.3 Release Notes
+==========================
+
+This is a bugfix release for some problems found since 1.13.1. The most
+important fixes are for CVE-2017-12852 and temporary elision. Users of earlier
+versions of 1.13 should upgrade.
+
+The Python versions supported are 2.7 and 3.4 - 3.6. The Python 3.6 wheels
+available from PIP are built with Python 3.6.2 and should be compatible with
+all previous versions of Python 3.6. It was cythonized with Cython 0.26.1,
+which should be free of the bugs found in 0.27 while also being compatible with
+Python 3.7-dev. The Windows wheels were built with OpenBlas instead ATLAS,
+which should improve the performance of the linear algebra functions.
+
+The NumPy 1.13.3 release is a re-release of 1.13.2, which suffered from a
+bug in Cython 0.27.0.
+
+Contributors
+============
+
+A total of 12 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Brandon Carter
+* Charles Harris
+* Eric Wieser
+* Iryna Shcherbina +
+* James Bourbeau +
+* Jonathan Helmus
+* Julian Taylor
+* Matti Picus
+* Michael Lamparski +
+* Michael Seifert
+* Ralf Gommers
+
+Pull requests merged
+====================
+
+A total of 22 pull requests were merged for this release.
+
+* #9390 BUG: Return the poly1d coefficients array directly
+* #9555 BUG: Fix regression in 1.13.x in distutils.mingw32ccompiler.
+* #9556 BUG: Fix true_divide when dtype=np.float64 specified.
+* #9557 DOC: Fix some rst markup in numpy/doc/basics.py.
+* #9558 BLD: Remove -xhost flag from IntelFCompiler.
+* #9559 DOC: Removes broken docstring example (source code, png, pdf)...
+* #9580 BUG: Add hypot and cabs functions to WIN32 blacklist.
+* #9732 BUG: Make scalar function elision check if temp is writeable.
+* #9736 BUG: Various fixes to np.gradient
+* #9742 BUG: Fix np.pad for CVE-2017-12852
+* #9744 BUG: Check for exception in sort functions, add tests
+* #9745 DOC: Add whitespace after "versionadded::" directive so it actually...
+* #9746 BUG: Memory leak in np.dot of size 0
+* #9747 BUG: Adjust gfortran version search regex
+* #9757 BUG: Cython 0.27 breaks NumPy on Python 3.
+* #9764 BUG: Ensure `_npy_scaled_cexp{,f,l}` is defined when needed.
+* #9765 BUG: PyArray_CountNonzero does not check for exceptions
+* #9766 BUG: Fixes histogram monotonicity check for unsigned bin values
+* #9767 BUG: Ensure consistent result dtype of count_nonzero
+* #9771 BUG: MAINT: Fix mtrand for Cython 0.27.
+* #9772 DOC: Create the 1.13.2 release notes.
+* #9794 DOC: Create 1.13.3 release notes.
--- /dev/null
+==========================
+NumPy 1.14.0 Release Notes
+==========================
+
+Numpy 1.14.0 is the result of seven months of work and contains a large number
+of bug fixes and new features, along with several changes with potential
+compatibility issues. The major change that users will notice are the
+stylistic changes in the way numpy arrays and scalars are printed, a change
+that will affect doctests. See below for details on how to preserve the
+old style printing when needed.
+
+A major decision affecting future development concerns the schedule for
+dropping Python 2.7 support in the runup to 2020. The decision has been made to
+support 2.7 for all releases made in 2018, with the last release being
+designated a long term release with support for bug fixes extending through
+2019. In 2019 support for 2.7 will be dropped in all new releases. More details
+can be found in `NEP 12`_.
+
+This release supports Python 2.7 and 3.4 - 3.6.
+
+.. _`NEP 12`: http://www.numpy.org/neps/nep-0014-dropping-python2.7-proposal.html
+
+
+Highlights
+==========
+
+* The `np.einsum` function uses BLAS when possible
+
+* ``genfromtxt``, ``loadtxt``, ``fromregex`` and ``savetxt`` can now handle
+ files with arbitrary Python supported encoding.
+
+* Major improvements to printing of NumPy arrays and scalars.
+
+
+New functions
+=============
+
+* ``parametrize``: decorator added to numpy.testing
+
+* ``chebinterpolate``: Interpolate function at Chebyshev points.
+
+* ``format_float_positional`` and ``format_float_scientific`` : format
+ floating-point scalars unambiguously with control of rounding and padding.
+
+* ``PyArray_ResolveWritebackIfCopy`` and ``PyArray_SetWritebackIfCopyBase``,
+ new C-API functions useful in achieving PyPy compatibity.
+
+
+Deprecations
+============
+
+* Using ``np.bool_`` objects in place of integers is deprecated. Previously
+ ``operator.index(np.bool_)`` was legal and allowed constructs such as
+ ``[1, 2, 3][np.True_]``. That was misleading, as it behaved differently from
+ ``np.array([1, 2, 3])[np.True_]``.
+
+* Truth testing of an empty array is deprecated. To check if an array is not
+ empty, use ``array.size > 0``.
+
+* Calling ``np.bincount`` with ``minlength=None`` is deprecated.
+ ``minlength=0`` should be used instead.
+
+* Calling ``np.fromstring`` with the default value of the ``sep`` argument is
+ deprecated. When that argument is not provided, a broken version of
+ ``np.frombuffer`` is used that silently accepts unicode strings and -- after
+ encoding them as either utf-8 (python 3) or the default encoding
+ (python 2) -- treats them as binary data. If reading binary data is
+ desired, ``np.frombuffer`` should be used directly.
+
+* The ``style`` option of array2string is deprecated in non-legacy printing mode.
+
+* ``PyArray_SetUpdateIfCopyBase`` has been deprecated. For NumPy versions >= 1.14
+ use ``PyArray_SetWritebackIfCopyBase`` instead, see `C API changes` below for
+ more details.
+
+
+
+* The use of ``UPDATEIFCOPY`` arrays is deprecated, see `C API changes` below
+ for details. We will not be dropping support for those arrays, but they are
+ not compatible with PyPy.
+
+
+Future Changes
+==============
+
+* ``np.issubdtype`` will stop downcasting dtype-like arguments.
+ It might be expected that ``issubdtype(np.float32, 'float64')`` and
+ ``issubdtype(np.float32, np.float64)`` mean the same thing - however, there
+ was an undocumented special case that translated the former into
+ ``issubdtype(np.float32, np.floating)``, giving the surprising result of True.
+
+ This translation now gives a warning that explains what translation is
+ occurring. In the future, the translation will be disabled, and the first
+ example will be made equivalent to the second.
+
+* ``np.linalg.lstsq`` default for ``rcond`` will be changed. The ``rcond``
+ parameter to ``np.linalg.lstsq`` will change its default to machine precision
+ times the largest of the input array dimensions. A FutureWarning is issued
+ when ``rcond`` is not passed explicitly.
+
+* ``a.flat.__array__()`` will return a writeable copy of ``a`` when ``a`` is
+ non-contiguous. Previously it returned an UPDATEIFCOPY array when ``a`` was
+ writeable. Currently it returns a non-writeable copy. See gh-7054 for a
+ discussion of the issue.
+
+* Unstructured void array's ``.item`` method will return a bytes object. In the
+ future, calling ``.item()`` on arrays or scalars of ``np.void`` datatype will
+ return a ``bytes`` object instead of a buffer or int array, the same as
+ returned by ``bytes(void_scalar)``. This may affect code which assumed the
+ return value was mutable, which will no longer be the case. A
+ ``FutureWarning`` is now issued when this would occur.
+
+
+Compatibility notes
+===================
+
+The mask of a masked array view is also a view rather than a copy
+-----------------------------------------------------------------
+There was a FutureWarning about this change in NumPy 1.11.x. In short, it is
+now the case that, when changing a view of a masked array, changes to the mask
+are propagated to the original. That was not previously the case. This change
+affects slices in particular. Note that this does not yet work properly if the
+mask of the original array is ``nomask`` and the mask of the view is changed.
+See gh-5580 for an extended discussion. The original behavior of having a copy
+of the mask can be obtained by calling the ``unshare_mask`` method of the view.
+
+``np.ma.masked`` is no longer writeable
+---------------------------------------
+Attempts to mutate the ``masked`` constant now error, as the underlying arrays
+are marked readonly. In the past, it was possible to get away with::
+
+ # emulating a function that sometimes returns np.ma.masked
+ val = random.choice([np.ma.masked, 10])
+ var_arr = np.asarray(val)
+ val_arr += 1 # now errors, previously changed np.ma.masked.data
+
+``np.ma`` functions producing ``fill_value`` s have changed
+-----------------------------------------------------------
+Previously, ``np.ma.default_fill_value`` would return a 0d array, but
+``np.ma.minimum_fill_value`` and ``np.ma.maximum_fill_value`` would return a
+tuple of the fields. Instead, all three methods return a structured ``np.void``
+object, which is what you would already find in the ``.fill_value`` attribute.
+
+Additionally, the dtype guessing now matches that of ``np.array`` - so when
+passing a python scalar ``x``, ``maximum_fill_value(x)`` is always the same as
+``maximum_fill_value(np.array(x))``. Previously ``x = long(1)`` on Python 2
+violated this assumption.
+
+``a.flat.__array__()`` returns non-writeable arrays when ``a`` is non-contiguous
+--------------------------------------------------------------------------------
+The intent is that the UPDATEIFCOPY array previously returned when ``a`` was
+non-contiguous will be replaced by a writeable copy in the future. This
+temporary measure is aimed to notify folks who expect the underlying array be
+modified in this situation that that will no longer be the case. The most
+likely places for this to be noticed is when expressions of the form
+``np.asarray(a.flat)`` are used, or when ``a.flat`` is passed as the out
+parameter to a ufunc.
+
+``np.tensordot`` now returns zero array when contracting over 0-length dimension
+--------------------------------------------------------------------------------
+Previously ``np.tensordot`` raised a ValueError when contracting over 0-length
+dimension. Now it returns a zero array, which is consistent with the behaviour
+of ``np.dot`` and ``np.einsum``.
+
+``numpy.testing`` reorganized
+-----------------------------
+This is not expected to cause problems, but possibly something has been left
+out. If you experience an unexpected import problem using ``numpy.testing``
+let us know.
+
+``np.asfarray`` no longer accepts non-dtypes through the ``dtype`` argument
+---------------------------------------------------------------------------
+This previously would accept ``dtype=some_array``, with the implied semantics
+of ``dtype=some_array.dtype``. This was undocumented, unique across the numpy
+functions, and if used would likely correspond to a typo.
+
+1D ``np.linalg.norm`` preserves float input types, even for arbitrary orders
+----------------------------------------------------------------------------
+Previously, this would promote to ``float64`` when arbitrary orders were
+passed, despite not doing so under the simple cases::
+
+ >>> f32 = np.float32([[1, 2]])
+ >>> np.linalg.norm(f32, 2.0, axis=-1).dtype
+ dtype('float32')
+ >>> np.linalg.norm(f32, 2.0001, axis=-1).dtype
+ dtype('float64') # numpy 1.13
+ dtype('float32') # numpy 1.14
+
+This change affects only ``float32`` and ``float16`` arrays.
+
+``count_nonzero(arr, axis=())`` now counts over no axes, not all axes
+---------------------------------------------------------------------
+Elsewhere, ``axis==()`` is always understood as "no axes", but
+`count_nonzero` had a special case to treat this as "all axes". This was
+inconsistent and surprising. The correct way to count over all axes has always
+been to pass ``axis == None``.
+
+``__init__.py`` files added to test directories
+-----------------------------------------------
+This is for pytest compatibility in the case of duplicate test file names in
+the different directories. As a result, ``run_module_suite`` no longer works,
+i.e., ``python <path-to-test-file>`` results in an error.
+
+``.astype(bool)`` on unstructured void arrays now calls ``bool`` on each element
+--------------------------------------------------------------------------------
+On Python 2, ``void_array.astype(bool)`` would always return an array of
+``True``, unless the dtype is ``V0``. On Python 3, this operation would usually
+crash. Going forwards, `astype` matches the behavior of ``bool(np.void)``,
+considering a buffer of all zeros as false, and anything else as true.
+Checks for ``V0`` can still be done with ``arr.dtype.itemsize == 0``.
+
+``MaskedArray.squeeze`` never returns ``np.ma.masked``
+------------------------------------------------------
+``np.squeeze`` is documented as returning a view, but the masked variant would
+sometimes return ``masked``, which is not a view. This has been fixed, so that
+the result is always a view on the original masked array.
+This breaks any code that used ``masked_arr.squeeze() is np.ma.masked``, but
+fixes code that writes to the result of `.squeeze()`.
+
+Renamed first parameter of ``can_cast`` from ``from`` to ``from_``
+------------------------------------------------------------------
+The previous parameter name ``from`` is a reserved keyword in Python, which made
+it difficult to pass the argument by name. This has been fixed by renaming
+the parameter to ``from_``.
+
+``isnat`` raises ``TypeError`` when passed wrong type
+------------------------------------------------------
+The ufunc ``isnat`` used to raise a ``ValueError`` when it was not passed
+variables of type ``datetime`` or ``timedelta``. This has been changed to
+raising a ``TypeError``.
+
+``dtype.__getitem__`` raises ``TypeError`` when passed wrong type
+-----------------------------------------------------------------
+When indexed with a float, the dtype object used to raise ``ValueError``.
+
+User-defined types now need to implement ``__str__`` and ``__repr__``
+---------------------------------------------------------------------
+Previously, user-defined types could fall back to a default implementation of
+``__str__`` and ``__repr__`` implemented in numpy, but this has now been
+removed. Now user-defined types will fall back to the python default
+``object.__str__`` and ``object.__repr__``.
+
+Many changes to array printing, disableable with the new "legacy" printing mode
+-------------------------------------------------------------------------------
+The ``str`` and ``repr`` of ndarrays and numpy scalars have been changed in
+a variety of ways. These changes are likely to break downstream user's
+doctests.
+
+These new behaviors can be disabled to mostly reproduce numpy 1.13 behavior by
+enabling the new 1.13 "legacy" printing mode. This is enabled by calling
+``np.set_printoptions(legacy="1.13")``, or using the new ``legacy`` argument to
+``np.array2string``, as ``np.array2string(arr, legacy='1.13')``.
+
+In summary, the major changes are:
+
+* For floating-point types:
+
+ * The ``repr`` of float arrays often omits a space previously printed
+ in the sign position. See the new ``sign`` option to ``np.set_printoptions``.
+ * Floating-point arrays and scalars use a new algorithm for decimal
+ representations, giving the shortest unique representation. This will
+ usually shorten ``float16`` fractional output, and sometimes ``float32`` and
+ ``float128`` output. ``float64`` should be unaffected. See the new
+ ``floatmode`` option to ``np.set_printoptions``.
+ * Float arrays printed in scientific notation no longer use fixed-precision,
+ and now instead show the shortest unique representation.
+ * The ``str`` of floating-point scalars is no longer truncated in python2.
+
+* For other data types:
+
+ * Non-finite complex scalars print like ``nanj`` instead of ``nan*j``.
+ * ``NaT`` values in datetime arrays are now properly aligned.
+ * Arrays and scalars of ``np.void`` datatype are now printed using hex
+ notation.
+
+* For line-wrapping:
+
+ * The "dtype" part of ndarray reprs will now be printed on the next line
+ if there isn't space on the last line of array output.
+ * The ``linewidth`` format option is now always respected.
+ The `repr` or `str` of an array will never exceed this, unless a single
+ element is too wide.
+ * The last line of an array string will never have more elements than earlier
+ lines.
+ * An extra space is no longer inserted on the first line if the elements are
+ too wide.
+
+* For summarization (the use of ``...`` to shorten long arrays):
+
+ * A trailing comma is no longer inserted for ``str``.
+ Previously, ``str(np.arange(1001))`` gave
+ ``'[ 0 1 2 ..., 998 999 1000]'``, which has an extra comma.
+ * For arrays of 2-D and beyond, when ``...`` is printed on its own line in
+ order to summarize any but the last axis, newlines are now appended to that
+ line to match its leading newlines and a trailing space character is
+ removed.
+
+* ``MaskedArray`` arrays now separate printed elements with commas, always
+ print the dtype, and correctly wrap the elements of long arrays to multiple
+ lines. If there is more than 1 dimension, the array attributes are now
+ printed in a new "left-justified" printing style.
+* ``recarray`` arrays no longer print a trailing space before their dtype, and
+ wrap to the right number of columns.
+* 0d arrays no longer have their own idiosyncratic implementations of ``str``
+ and ``repr``. The ``style`` argument to ``np.array2string`` is deprecated.
+* Arrays of ``bool`` datatype will omit the datatype in the ``repr``.
+* User-defined ``dtypes`` (subclasses of ``np.generic``) now need to
+ implement ``__str__`` and ``__repr__``.
+
+Some of these changes are described in more detail below. If you need to retain
+the previous behavior for doctests or other reasons, you may want to do
+something like::
+
+ # FIXME: We need the str/repr formatting used in Numpy < 1.14.
+ try:
+ np.set_printoptions(legacy='1.13')
+ except TypeError:
+ pass
+
+
+C API changes
+=============
+
+PyPy compatible alternative to ``UPDATEIFCOPY`` arrays
+------------------------------------------------------
+``UPDATEIFCOPY`` arrays are contiguous copies of existing arrays, possibly with
+different dimensions, whose contents are copied back to the original array when
+their refcount goes to zero and they are deallocated. Because PyPy does not use
+refcounts, they do not function correctly with PyPy. NumPy is in the process of
+eliminating their use internally and two new C-API functions,
+
+* ``PyArray_SetWritebackIfCopyBase``
+* ``PyArray_ResolveWritebackIfCopy``,
+
+have been added together with a complimentary flag,
+``NPY_ARRAY_WRITEBACKIFCOPY``. Using the new functionality also requires that
+some flags be changed when new arrays are created, to wit:
+``NPY_ARRAY_INOUT_ARRAY`` should be replaced by ``NPY_ARRAY_INOUT_ARRAY2`` and
+``NPY_ARRAY_INOUT_FARRAY`` should be replaced by ``NPY_ARRAY_INOUT_FARRAY2``.
+Arrays created with these new flags will then have the ``WRITEBACKIFCOPY``
+semantics.
+
+If PyPy compatibility is not a concern, these new functions can be ignored,
+although there will be a ``DeprecationWarning``. If you do wish to pursue PyPy
+compatibility, more information on these functions and their use may be found
+in the c-api_ documentation and the example in how-to-extend_.
+
+.. _c-api: https://github.com/numpy/numpy/blob/master/doc/source/reference/c-api.array.rst
+.. _how-to-extend: https://github.com/numpy/numpy/blob/master/doc/source/user/c-info.how-to-extend.rst
+
+
+New Features
+============
+
+Encoding argument for text IO functions
+---------------------------------------
+``genfromtxt``, ``loadtxt``, ``fromregex`` and ``savetxt`` can now handle files
+with arbitrary encoding supported by Python via the encoding argument.
+For backward compatibility the argument defaults to the special ``bytes`` value
+which continues to treat text as raw byte values and continues to pass latin1
+encoded bytes to custom converters.
+Using any other value (including ``None`` for system default) will switch the
+functions to real text IO so one receives unicode strings instead of bytes in
+the resulting arrays.
+
+External ``nose`` plugins are usable by ``numpy.testing.Tester``
+----------------------------------------------------------------
+``numpy.testing.Tester`` is now aware of ``nose`` plugins that are outside the
+``nose`` built-in ones. This allows using, for example, ``nose-timer`` like
+so: ``np.test(extra_argv=['--with-timer', '--timer-top-n', '20'])`` to
+obtain the runtime of the 20 slowest tests. An extra keyword ``timer`` was
+also added to ``Tester.test``, so ``np.test(timer=20)`` will also report the 20
+slowest tests.
+
+``parametrize`` decorator added to ``numpy.testing``
+----------------------------------------------------
+A basic ``parametrize`` decorator is now available in ``numpy.testing``. It is
+intended to allow rewriting yield based tests that have been deprecated in
+pytest so as to facilitate the transition to pytest in the future. The nose
+testing framework has not been supported for several years and looks like
+abandonware.
+
+The new ``parametrize`` decorator does not have the full functionality of the
+one in pytest. It doesn't work for classes, doesn't support nesting, and does
+not substitute variable names. Even so, it should be adequate to rewrite the
+NumPy tests.
+
+``chebinterpolate`` function added to ``numpy.polynomial.chebyshev``
+--------------------------------------------------------------------
+The new ``chebinterpolate`` function interpolates a given function at the
+Chebyshev points of the first kind. A new ``Chebyshev.interpolate`` class
+method adds support for interpolation over arbitrary intervals using the scaled
+and shifted Chebyshev points of the first kind.
+
+Support for reading lzma compressed text files in Python 3
+----------------------------------------------------------
+With Python versions containing the ``lzma`` module the text IO functions can
+now transparently read from files with ``xz`` or ``lzma`` extension.
+
+``sign`` option added to ``np.setprintoptions`` and ``np.array2string``
+-----------------------------------------------------------------------
+This option controls printing of the sign of floating-point types, and may be
+one of the characters '-', '+' or ' '. With '+' numpy always prints the sign of
+positive values, with ' ' it always prints a space (whitespace character) in
+the sign position of positive values, and with '-' it will omit the sign
+character for positive values. The new default is '-'.
+
+This new default changes the float output relative to numpy 1.13. The old
+behavior can be obtained in 1.13 "legacy" printing mode, see compatibility
+notes above.
+
+``hermitian`` option added to``np.linalg.matrix_rank``
+------------------------------------------------------
+The new ``hermitian`` option allows choosing between standard SVD based matrix
+rank calculation and the more efficient eigenvalue based method for
+symmetric/hermitian matrices.
+
+``threshold`` and ``edgeitems`` options added to ``np.array2string``
+--------------------------------------------------------------------
+These options could previously be controlled using ``np.set_printoptions``, but
+now can be changed on a per-call basis as arguments to ``np.array2string``.
+
+``concatenate`` and ``stack`` gained an ``out`` argument
+--------------------------------------------------------
+A preallocated buffer of the desired dtype can now be used for the output of
+these functions.
+
+Support for PGI flang compiler on Windows
+-----------------------------------------
+The PGI flang compiler is a Fortran front end for LLVM released by NVIDIA under
+the Apache 2 license. It can be invoked by ::
+
+ python setup.py config --compiler=clang --fcompiler=flang install
+
+There is little experience with this new compiler, so any feedback from people
+using it will be appreciated.
+
+
+Improvements
+============
+
+Numerator degrees of freedom in ``random.noncentral_f`` need only be positive.
+------------------------------------------------------------------------------
+Prior to NumPy 1.14.0, the numerator degrees of freedom needed to be > 1, but
+the distribution is valid for values > 0, which is the new requirement.
+
+The GIL is released for all ``np.einsum`` variations
+----------------------------------------------------
+Some specific loop structures which have an accelerated loop version
+did not release the GIL prior to NumPy 1.14.0. This oversight has been
+fixed.
+
+The `np.einsum` function will use BLAS when possible and optimize by default
+----------------------------------------------------------------------------
+The ``np.einsum`` function will now call ``np.tensordot`` when appropriate.
+Because ``np.tensordot`` uses BLAS when possible, that will speed up execution.
+By default, ``np.einsum`` will also attempt optimization as the overhead is
+small relative to the potential improvement in speed.
+
+``f2py`` now handles arrays of dimension 0
+------------------------------------------
+``f2py`` now allows for the allocation of arrays of dimension 0. This allows
+for more consistent handling of corner cases downstream.
+
+``numpy.distutils`` supports using MSVC and mingw64-gfortran together
+---------------------------------------------------------------------
+Numpy distutils now supports using Mingw64 gfortran and MSVC compilers
+together. This enables the production of Python extension modules on Windows
+containing Fortran code while retaining compatibility with the
+binaries distributed by Python.org. Not all use cases are supported,
+but most common ways to wrap Fortran for Python are functional.
+
+Compilation in this mode is usually enabled automatically, and can be
+selected via the ``--fcompiler`` and ``--compiler`` options to
+``setup.py``. Moreover, linking Fortran codes to static OpenBLAS is
+supported; by default a gfortran compatible static archive
+``openblas.a`` is looked for.
+
+``np.linalg.pinv`` now works on stacked matrices
+------------------------------------------------
+Previously it was limited to a single 2d array.
+
+``numpy.save`` aligns data to 64 bytes instead of 16
+----------------------------------------------------
+Saving NumPy arrays in the ``npy`` format with ``numpy.save`` inserts
+padding before the array data to align it at 64 bytes. Previously
+this was only 16 bytes (and sometimes less due to a bug in the code
+for version 2). Now the alignment is 64 bytes, which matches the
+widest SIMD instruction set commonly available, and is also the most
+common cache line size. This makes ``npy`` files easier to use in
+programs which open them with ``mmap``, especially on Linux where an
+``mmap`` offset must be a multiple of the page size.
+
+NPZ files now can be written without using temporary files
+----------------------------------------------------------
+In Python 3.6+ ``numpy.savez`` and ``numpy.savez_compressed`` now write
+directly to a ZIP file, without creating intermediate temporary files.
+
+Better support for empty structured and string types
+----------------------------------------------------
+Structured types can contain zero fields, and string dtypes can contain zero
+characters. Zero-length strings still cannot be created directly, and must be
+constructed through structured dtypes::
+
+ str0 = np.empty(10, np.dtype([('v', str, N)]))['v']
+ void0 = np.empty(10, np.void)
+
+It was always possible to work with these, but the following operations are
+now supported for these arrays:
+
+ * `arr.sort()`
+ * `arr.view(bytes)`
+ * `arr.resize(...)`
+ * `pickle.dumps(arr)`
+
+Support for ``decimal.Decimal`` in ``np.lib.financial``
+-------------------------------------------------------
+Unless otherwise stated all functions within the ``financial`` package now
+support using the ``decimal.Decimal`` built-in type.
+
+Float printing now uses "dragon4" algorithm for shortest decimal representation
+-------------------------------------------------------------------------------
+The ``str`` and ``repr`` of floating-point values (16, 32, 64 and 128 bit) are
+now printed to give the shortest decimal representation which uniquely
+identifies the value from others of the same type. Previously this was only
+true for ``float64`` values. The remaining float types will now often be shorter
+than in numpy 1.13. Arrays printed in scientific notation now also use the
+shortest scientific representation, instead of fixed precision as before.
+
+ Additionally, the `str` of float scalars scalars will no longer be truncated
+ in python2, unlike python2 `float`s. `np.double` scalars now have a ``str``
+ and ``repr`` identical to that of a python3 float.
+
+New functions ``np.format_float_scientific`` and ``np.format_float_positional``
+are provided to generate these decimal representations.
+
+A new option ``floatmode`` has been added to ``np.set_printoptions`` and
+``np.array2string``, which gives control over uniqueness and rounding of
+printed elements in an array. The new default is ``floatmode='maxprec'`` with
+``precision=8``, which will print at most 8 fractional digits, or fewer if an
+element can be uniquely represented with fewer. A useful new mode is
+``floatmode="unique"``, which will output enough digits to specify the array
+elements uniquely.
+
+Numpy complex-floating-scalars with values like ``inf*j`` or ``nan*j`` now
+print as ``infj`` and ``nanj``, like the pure-python ``complex`` type.
+
+The ``FloatFormat`` and ``LongFloatFormat`` classes are deprecated and should
+both be replaced by ``FloatingFormat``. Similarly ``ComplexFormat`` and
+``LongComplexFormat`` should be replaced by ``ComplexFloatingFormat``.
+
+``void`` datatype elements are now printed in hex notation
+----------------------------------------------------------
+A hex representation compatible with the python ``bytes`` type is now printed
+for unstructured ``np.void`` elements, e.g., ``V4`` datatype. Previously, in
+python2 the raw void data of the element was printed to stdout, or in python3
+the integer byte values were shown.
+
+printing style for ``void`` datatypes is now independently customizable
+-----------------------------------------------------------------------
+The printing style of ``np.void`` arrays is now independently customizable
+using the ``formatter`` argument to ``np.set_printoptions``, using the
+``'void'`` key, instead of the catch-all ``numpystr`` key as before.
+
+Reduced memory usage of ``np.loadtxt``
+--------------------------------------
+``np.loadtxt`` now reads files in chunks instead of all at once which decreases
+its memory usage significantly for large files.
+
+
+Changes
+=======
+
+Multiple-field indexing/assignment of structured arrays
+-------------------------------------------------------
+The indexing and assignment of structured arrays with multiple fields has
+changed in a number of ways, as warned about in previous releases.
+
+First, indexing a structured array with multiple fields, e.g.,
+``arr[['f1', 'f3']]``, returns a view into the original array instead of a
+copy. The returned view will have extra padding bytes corresponding to
+intervening fields in the original array, unlike the copy in 1.13, which will
+affect code such as ``arr[['f1', 'f3']].view(newdtype)``.
+
+Second, assignment between structured arrays will now occur "by position"
+instead of "by field name". The Nth field of the destination will be set to the
+Nth field of the source regardless of field name, unlike in numpy versions 1.6
+to 1.13 in which fields in the destination array were set to the
+identically-named field in the source array or to 0 if the source did not have
+a field.
+
+Correspondingly, the order of fields in a structured dtypes now matters when
+computing dtype equality. For example, with the dtypes ::
+
+ x = dtype({'names': ['A', 'B'], 'formats': ['i4', 'f4'], 'offsets': [0, 4]})
+ y = dtype({'names': ['B', 'A'], 'formats': ['f4', 'i4'], 'offsets': [4, 0]})
+
+the expression ``x == y`` will now return ``False``, unlike before.
+This makes dictionary based dtype specifications like
+``dtype({'a': ('i4', 0), 'b': ('f4', 4)})`` dangerous in python < 3.6
+since dict key order is not preserved in those versions.
+
+Assignment from a structured array to a boolean array now raises a ValueError,
+unlike in 1.13, where it always set the destination elements to ``True``.
+
+Assignment from structured array with more than one field to a non-structured
+array now raises a ValueError. In 1.13 this copied just the first field of the
+source to the destination.
+
+Using field "titles" in multiple-field indexing is now disallowed, as is
+repeating a field name in a multiple-field index.
+
+The documentation for structured arrays in the user guide has been
+significantly updated to reflect these changes.
+
+Integer and Void scalars are now unaffected by ``np.set_string_function``
+-------------------------------------------------------------------------
+Previously, unlike most other numpy scalars, the ``str`` and ``repr`` of
+integer and void scalars could be controlled by ``np.set_string_function``.
+This is no longer possible.
+
+0d array printing changed, ``style`` arg of array2string deprecated
+-------------------------------------------------------------------
+Previously the ``str`` and ``repr`` of 0d arrays had idiosyncratic
+implementations which returned ``str(a.item())`` and ``'array(' +
+repr(a.item()) + ')'`` respectively for 0d array ``a``, unlike both numpy
+scalars and higher dimension ndarrays.
+
+Now, the ``str`` of a 0d array acts like a numpy scalar using ``str(a[()])``
+and the ``repr`` acts like higher dimension arrays using ``formatter(a[()])``,
+where ``formatter`` can be specified using ``np.set_printoptions``. The
+``style`` argument of ``np.array2string`` is deprecated.
+
+This new behavior is disabled in 1.13 legacy printing mode, see compatibility
+notes above.
+
+Seeding ``RandomState`` using an array requires a 1-d array
+-----------------------------------------------------------
+``RandomState`` previously would accept empty arrays or arrays with 2 or more
+dimensions, which resulted in either a failure to seed (empty arrays) or for
+some of the passed values to be ignored when setting the seed.
+
+``MaskedArray`` objects show a more useful ``repr``
+---------------------------------------------------
+The ``repr`` of a ``MaskedArray`` is now closer to the python code that would
+produce it, with arrays now being shown with commas and dtypes. Like the other
+formatting changes, this can be disabled with the 1.13 legacy printing mode in
+order to help transition doctests.
+
+The ``repr`` of ``np.polynomial`` classes is more explicit
+----------------------------------------------------------
+It now shows the domain and window parameters as keyword arguments to make
+them more clear::
+
+ >>> np.polynomial.Polynomial(range(4))
+ Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
--- /dev/null
+==========================
+NumPy 1.14.1 Release Notes
+==========================
+
+This is a bugfix release for some problems reported following the 1.14.0 release. The major
+problems fixed are the following.
+
+* Problems with the new array printing, particularly the printing of complex
+ values, Please report any additional problems that may turn up.
+* Problems with ``np.einsum`` due to the new ``optimized=True`` default. Some
+ fixes for optimization have been applied and ``optimize=False`` is now the
+ default.
+* The sort order in ``np.unique`` when ``axis=<some-number>`` will now always
+ be lexicographic in the subarray elements. In previous NumPy versions there
+ was an optimization that could result in sorting the subarrays as unsigned
+ byte strings.
+* The change in 1.14.0 that multi-field indexing of structured arrays returns a
+ view instead of a copy has been reverted but remains on track for NumPy 1.15.
+ Affected users should read the 1.14.1 Numpy User Guide section
+ "basics/structured arrays/accessing multiple fields" for advice on how to
+ manage this transition.
+
+The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
+3.6 wheels available from PIP are built with Python 3.6.2 and should be
+compatible with all previous versions of Python 3.6. The source releases were
+cythonized with Cython 0.26.1, which is known to **not** support the upcoming
+Python 3.7 release. People who wish to run Python 3.7 should check out the
+NumPy repo and try building with the, as yet, unreleased master branch of
+Cython.
+
+Contributors
+============
+
+A total of 14 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Daniel Smith
+* Dennis Weyland +
+* Eric Larson
+* Eric Wieser
+* Jarrod Millman
+* Kenichi Maehashi +
+* Marten van Kerkwijk
+* Mathieu Lamarre
+* Sebastian Berg
+* Simon Conseil
+* Simon Gibbons
+* xoviat
+
+Pull requests merged
+====================
+
+A total of 36 pull requests were merged for this release.
+
+* `#10339 <https://github.com/numpy/numpy/pull/10339>`__: BUG: restrict the __config__ modifications to win32
+* `#10368 <https://github.com/numpy/numpy/pull/10368>`__: MAINT: Adjust type promotion in linalg.norm
+* `#10375 <https://github.com/numpy/numpy/pull/10375>`__: BUG: add missing paren and remove quotes from repr of fieldless...
+* `#10395 <https://github.com/numpy/numpy/pull/10395>`__: MAINT: Update download URL in setup.py.
+* `#10396 <https://github.com/numpy/numpy/pull/10396>`__: BUG: fix einsum issue with unicode input and py2
+* `#10397 <https://github.com/numpy/numpy/pull/10397>`__: BUG: fix error message not formatted in einsum
+* `#10398 <https://github.com/numpy/numpy/pull/10398>`__: DOC: add documentation about how to handle new array printing
+* `#10403 <https://github.com/numpy/numpy/pull/10403>`__: BUG: Set einsum optimize parameter default to `False`.
+* `#10424 <https://github.com/numpy/numpy/pull/10424>`__: ENH: Fix repr of np.record objects to match np.void types #10412
+* `#10425 <https://github.com/numpy/numpy/pull/10425>`__: MAINT: Update zesty to artful for i386 testing
+* `#10431 <https://github.com/numpy/numpy/pull/10431>`__: REL: Add 1.14.1 release notes template
+* `#10435 <https://github.com/numpy/numpy/pull/10435>`__: MAINT: Use ValueError for duplicate field names in lookup (backport)
+* `#10534 <https://github.com/numpy/numpy/pull/10534>`__: BUG: Provide a better error message for out-of-order fields
+* `#10536 <https://github.com/numpy/numpy/pull/10536>`__: BUG: Resize bytes columns in genfromtxt (backport of #10401)
+* `#10537 <https://github.com/numpy/numpy/pull/10537>`__: BUG: multifield-indexing adds padding bytes: revert for 1.14.1
+* `#10539 <https://github.com/numpy/numpy/pull/10539>`__: BUG: fix np.save issue with python 2.7.5
+* `#10540 <https://github.com/numpy/numpy/pull/10540>`__: BUG: Add missing DECREF in Py2 int() cast
+* `#10541 <https://github.com/numpy/numpy/pull/10541>`__: TST: Add circleci document testing to maintenance/1.14.x
+* `#10542 <https://github.com/numpy/numpy/pull/10542>`__: BUG: complex repr has extra spaces, missing + (1.14 backport)
+* `#10550 <https://github.com/numpy/numpy/pull/10550>`__: BUG: Set missing exception after malloc
+* `#10557 <https://github.com/numpy/numpy/pull/10557>`__: BUG: In numpy.i, clear CARRAY flag if wrapped buffer is not C_CONTIGUOUS.
+* `#10558 <https://github.com/numpy/numpy/pull/10558>`__: DEP: Issue FutureWarning when malformed records detected.
+* `#10559 <https://github.com/numpy/numpy/pull/10559>`__: BUG: Fix einsum optimize logic for singleton dimensions
+* `#10560 <https://github.com/numpy/numpy/pull/10560>`__: BUG: Fix calling ufuncs with a positional output argument.
+* `#10561 <https://github.com/numpy/numpy/pull/10561>`__: BUG: Fix various Big-Endian test failures (ppc64)
+* `#10562 <https://github.com/numpy/numpy/pull/10562>`__: BUG: Make dtype.descr error for out-of-order fields.
+* `#10563 <https://github.com/numpy/numpy/pull/10563>`__: BUG: arrays not being flattened in `union1d`
+* `#10607 <https://github.com/numpy/numpy/pull/10607>`__: MAINT: Update sphinxext submodule hash.
+* `#10608 <https://github.com/numpy/numpy/pull/10608>`__: BUG: Revert sort optimization in np.unique.
+* `#10609 <https://github.com/numpy/numpy/pull/10609>`__: BUG: infinite recursion in str of 0d subclasses
+* `#10610 <https://github.com/numpy/numpy/pull/10610>`__: BUG: Align type definition with generated lapack
+* `#10612 <https://github.com/numpy/numpy/pull/10612>`__: BUG/ENH: Improve output for structured non-void types
+* `#10622 <https://github.com/numpy/numpy/pull/10622>`__: BUG: deallocate recursive closure in arrayprint.py (1.14 backport)
+* `#10624 <https://github.com/numpy/numpy/pull/10624>`__: BUG: Correctly identify comma separated dtype strings
+* `#10629 <https://github.com/numpy/numpy/pull/10629>`__: BUG: deallocate recursive closure in arrayprint.py (backport...
+* `#10630 <https://github.com/numpy/numpy/pull/10630>`__: REL: Prepare for 1.14.1 release.
--- /dev/null
+==========================
+NumPy 1.14.2 Release Notes
+==========================
+
+This is a bugfix release for some bugs reported following the 1.14.1 release. The major
+problems dealt with are as follows.
+
+* Residual bugs in the new array printing functionality.
+* Regression resulting in a relocation problem with shared library.
+* Improved PyPy compatibility.
+
+The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
+3.6 wheels available from PIP are built with Python 3.6.2 and should be
+compatible with all previous versions of Python 3.6. The source releases were
+cythonized with Cython 0.26.1, which is known to **not** support the upcoming
+Python 3.7 release. People who wish to run Python 3.7 should check out the
+NumPy repo and try building with the, as yet, unreleased master branch of
+Cython.
+
+Contributors
+============
+
+A total of 4 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Eric Wieser
+* Pauli Virtanen
+
+Pull requests merged
+====================
+
+A total of 5 pull requests were merged for this release.
+
+* `#10674 <https://github.com/numpy/numpy/pull/10674>`__: BUG: Further back-compat fix for subclassed array repr
+* `#10725 <https://github.com/numpy/numpy/pull/10725>`__: BUG: dragon4 fractional output mode adds too many trailing zeros
+* `#10726 <https://github.com/numpy/numpy/pull/10726>`__: BUG: Fix f2py generated code to work on PyPy
+* `#10727 <https://github.com/numpy/numpy/pull/10727>`__: BUG: Fix missing NPY_VISIBILITY_HIDDEN on npy_longdouble_to_PyLong
+* `#10729 <https://github.com/numpy/numpy/pull/10729>`__: DOC: Create 1.14.2 notes and changelog.
--- /dev/null
+==========================
+NumPy 1.14.3 Release Notes
+==========================
+
+This is a bugfix release for a few bugs reported following the 1.14.2 release:
+
+* np.lib.recfunctions.fromrecords accepts a list-of-lists, until 1.15
+* In python2, float types use the new print style when printing to a file
+* style arg in "legacy" print mode now works for 0d arrays
+
+The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
+3.6 wheels available from PIP are built with Python 3.6.2 and should be
+compatible with all previous versions of Python 3.6. The source releases were
+cythonized with Cython 0.28.2.
+
+Contributors
+============
+
+A total of 6 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Jonathan March +
+* Malcolm Smith +
+* Matti Picus
+* Pauli Virtanen
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#10862 <https://github.com/numpy/numpy/pull/10862>`__: BUG: floating types should override tp_print (1.14 backport)
+* `#10905 <https://github.com/numpy/numpy/pull/10905>`__: BUG: for 1.14 back-compat, accept list-of-lists in fromrecords
+* `#10947 <https://github.com/numpy/numpy/pull/10947>`__: BUG: 'style' arg to array2string broken in legacy mode (1.14...
+* `#10959 <https://github.com/numpy/numpy/pull/10959>`__: BUG: test, fix for missing flags['WRITEBACKIFCOPY'] key
+* `#10960 <https://github.com/numpy/numpy/pull/10960>`__: BUG: Add missing underscore to prototype in check_embedded_lapack
+* `#10961 <https://github.com/numpy/numpy/pull/10961>`__: BUG: Fix encoding regression in ma/bench.py (Issue #10868)
+* `#10962 <https://github.com/numpy/numpy/pull/10962>`__: BUG: core: fix NPY_TITLE_KEY macro on pypy
+* `#10974 <https://github.com/numpy/numpy/pull/10974>`__: BUG: test, fix PyArray_DiscardWritebackIfCopy...
--- /dev/null
+==========================
+NumPy 1.14.4 Release Notes
+==========================
+
+This is a bugfix release for bugs reported following the 1.14.3 release. The
+most significant fixes are:
+
+* fixes for compiler instruction reordering that resulted in NaN's not being
+ properly propagated in `np.max` and `np.min`,
+
+* fixes for bus faults on SPARC and older ARM due to incorrect alignment
+ checks.
+
+There are also improvements to printing of long doubles on PPC platforms. All
+is not yet perfect on that platform, the whitespace padding is still incorrect
+and is to be fixed in numpy 1.15, consequently NumPy still fails some
+printing-related (and other) unit tests on ppc systems. However, the printed
+values are now correct.
+
+Note that NumPy will error on import if it detects incorrect float32 `dot`
+results. This problem has been seen on the Mac when working in the Anaconda
+environment and is due to a subtle interaction between MKL and PyQt5. It is not
+strictly a NumPy problem, but it is best that users be aware of it. See the
+gh-8577 NumPy issue for more information.
+
+The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
+3.6 wheels available from PIP are built with Python 3.6.2 and should be
+compatible with all previous versions of Python 3.6. The source releases were
+cythonized with Cython 0.28.2 and should work for the upcoming Python 3.7.
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Marten van Kerkwijk
+* Matti Picus
+* Pauli Virtanen
+* Ryan Soklaski +
+* Sebastian Berg
+
+Pull requests merged
+====================
+
+A total of 11 pull requests were merged for this release.
+
+* `#11104 <https://github.com/numpy/numpy/pull/11104>`__: BUG: str of DOUBLE_DOUBLE format wrong on ppc64
+* `#11170 <https://github.com/numpy/numpy/pull/11170>`__: TST: linalg: add regression test for gh-8577
+* `#11174 <https://github.com/numpy/numpy/pull/11174>`__: MAINT: add sanity-checks to be run at import time
+* `#11181 <https://github.com/numpy/numpy/pull/11181>`__: BUG: void dtype setup checked offset not actual pointer for alignment
+* `#11194 <https://github.com/numpy/numpy/pull/11194>`__: BUG: Python2 doubles don't print correctly in interactive shell.
+* `#11198 <https://github.com/numpy/numpy/pull/11198>`__: BUG: optimizing compilers can reorder call to npy_get_floatstatus
+* `#11199 <https://github.com/numpy/numpy/pull/11199>`__: BUG: reduce using SSE only warns if inside SSE loop
+* `#11203 <https://github.com/numpy/numpy/pull/11203>`__: BUG: Bytes delimiter/comments in genfromtxt should be decoded
+* `#11211 <https://github.com/numpy/numpy/pull/11211>`__: BUG: Fix reference count/memory leak exposed by better testing
+* `#11219 <https://github.com/numpy/numpy/pull/11219>`__: BUG: Fixes einsum broadcasting bug when optimize=True
+* `#11251 <https://github.com/numpy/numpy/pull/11251>`__: DOC: Document 1.14.4 release.
--- /dev/null
+==========================
+NumPy 1.14.5 Release Notes
+==========================
+
+This is a bugfix release for bugs reported following the 1.14.4 release. The
+most significant fixes are:
+
+* fixes for compilation errors on alpine and NetBSD
+
+The Python versions supported in this release are 2.7 and 3.4 - 3.6. The Python
+3.6 wheels available from PIP are built with Python 3.6.2 and should be
+compatible with all previous versions of Python 3.6. The source releases were
+cythonized with Cython 0.28.2 and should work for the upcoming Python 3.7.
+
+Contributors
+============
+
+A total of 1 person contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+
+Pull requests merged
+====================
+
+A total of 2 pull requests were merged for this release.
+
+* `#11274 <https://github.com/numpy/numpy/pull/11274>`__: BUG: Correct use of NPY_UNUSED.
+* `#11294 <https://github.com/numpy/numpy/pull/11294>`__: BUG: Remove extra trailing parentheses.
+
--- /dev/null
+==========================
+NumPy 1.14.6 Release Notes
+==========================
+
+This is a bugfix release for bugs reported following the 1.14.5 release. The
+most significant fixes are:
+
+* Fix for behavior change in ``ma.masked_values(shrink=True)``
+* Fix the new cached allocations machinery to be thread safe.
+
+The Python versions supported in this release are 2.7 and 3.4 - 3.7. The Python
+3.6 wheels on PyPI should be compatible with all Python 3.6 versions.
+
+Contributors
+============
+
+A total of 4 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Eric Wieser
+* Julian Taylor
+* Matti Picus
+
+Pull requests merged
+====================
+
+A total of 4 pull requests were merged for this release.
+
+* `#11985 <https://github.com/numpy/numpy/pull/11985>`__: BUG: fix cached allocations without the GIL
+* `#11986 <https://github.com/numpy/numpy/pull/11986>`__: BUG: Undo behavior change in ma.masked_values(shrink=True)
+* `#11987 <https://github.com/numpy/numpy/pull/11987>`__: BUG: fix refcount leak in PyArray_AdaptFlexibleDType
+* `#11995 <https://github.com/numpy/numpy/pull/11995>`__: TST: Add Python 3.7 testing to NumPy 1.14.
--- /dev/null
+==========================
+NumPy 1.15.0 Release Notes
+==========================
+
+NumPy 1.15.0 is a release with an unusual number of cleanups, many deprecations
+of old functions, and improvements to many existing functions. Please read the
+detailed descriptions below to see if you are affected.
+
+For testing, we have switched to pytest as a replacement for the no longer
+maintained nose framework. The old nose based interface remains for downstream
+projects who may still be using it.
+
+The Python versions supported by this release are 2.7, 3.4-3.7. The wheels are
+linked with OpenBLAS v0.3.0, which should fix some of the linalg problems
+reported for NumPy 1.14.
+
+
+Highlights
+==========
+
+* NumPy has switched to pytest for testing.
+* A new `numpy.printoptions` context manager.
+* Many improvements to the histogram functions.
+* Support for unicode field names in python 2.7.
+* Improved support for PyPy.
+* Fixes and improvements to `numpy.einsum`.
+
+
+New functions
+=============
+
+* `numpy.gcd` and `numpy.lcm`, to compute the greatest common divisor and least
+ common multiple.
+
+* `numpy.ma.stack`, the `numpy.stack` array-joining function generalized to
+ masked arrays.
+
+* `numpy.quantile` function, an interface to ``percentile`` without factors of
+ 100
+
+* `numpy.nanquantile` function, an interface to ``nanpercentile`` without
+ factors of 100
+
+* `numpy.printoptions`, a context manager that sets print options temporarily
+ for the scope of the ``with`` block::
+
+ >>> with np.printoptions(precision=2):
+ ... print(np.array([2.0]) / 3)
+ [0.67]
+
+* `numpy.histogram_bin_edges`, a function to get the edges of the bins used by a
+ histogram without needing to calculate the histogram.
+
+* C functions `npy_get_floatstatus_barrier` and `npy_clear_floatstatus_barrier`
+ have been added to deal with compiler optimization changing the order of
+ operations. See below for details.
+
+
+Deprecations
+============
+
+* Aliases of builtin `pickle` functions are deprecated, in favor of their
+ unaliased ``pickle.<func>`` names:
+
+ * `numpy.loads`
+ * `numpy.core.numeric.load`
+ * `numpy.core.numeric.loads`
+ * `numpy.ma.loads`, `numpy.ma.dumps`
+ * `numpy.ma.load`, `numpy.ma.dump` - these functions already failed on
+ python 3 when called with a string.
+
+* Multidimensional indexing with anything but a tuple is deprecated. This means
+ that the index list in ``ind = [slice(None), 0]; arr[ind]`` should be changed
+ to a tuple, e.g., ``ind = [slice(None), 0]; arr[tuple(ind)]`` or
+ ``arr[(slice(None), 0)]``. That change is necessary to avoid ambiguity in
+ expressions such as ``arr[[[0, 1], [0, 1]]]``, currently interpreted as
+ ``arr[array([0, 1]), array([0, 1])]``, that will be interpreted
+ as ``arr[array([[0, 1], [0, 1]])]`` in the future.
+
+* Imports from the following sub-modules are deprecated, they will be removed
+ at some future date.
+
+ * `numpy.testing.utils`
+ * `numpy.testing.decorators`
+ * `numpy.testing.nosetester`
+ * `numpy.testing.noseclasses`
+ * `numpy.core.umath_tests`
+
+* Giving a generator to `numpy.sum` is now deprecated. This was undocumented
+ behavior, but worked. Previously, it would calculate the sum of the generator
+ expression. In the future, it might return a different result. Use
+ ``np.sum(np.from_iter(generator))`` or the built-in Python ``sum`` instead.
+
+* Users of the C-API should call ``PyArrayResolveWriteBackIfCopy`` or
+ ``PyArray_DiscardWritbackIfCopy`` on any array with the ``WRITEBACKIFCOPY``
+ flag set, before deallocating the array. A deprecation warning will be
+ emitted if those calls are not used when needed.
+
+* Users of ``nditer`` should use the nditer object as a context manager
+ anytime one of the iterator operands is writeable, so that numpy can
+ manage writeback semantics, or should call ``it.close()``. A
+ `RuntimeWarning` may be emitted otherwise in these cases.
+
+* The ``normed`` argument of ``np.histogram``, deprecated long ago in 1.6.0,
+ now emits a ``DeprecationWarning``.
+
+
+Future Changes
+==============
+
+* NumPy 1.16 will drop support for Python 3.4.
+* NumPy 1.17 will drop support for Python 2.7.
+
+
+Compatibility notes
+===================
+
+Compiled testing modules renamed and made private
+-------------------------------------------------
+The following compiled modules have been renamed and made private:
+
+* ``umath_tests`` -> ``_umath_tests``
+* ``test_rational`` -> ``_rational_tests``
+* ``multiarray_tests`` -> ``_multiarray_tests``
+* ``struct_ufunc_test`` -> ``_struct_ufunc_tests``
+* ``operand_flag_tests`` -> ``_operand_flag_tests``
+
+The ``umath_tests`` module is still available for backwards compatibility, but
+will be removed in the future.
+
+The ``NpzFile`` returned by ``np.savez`` is now a ``collections.abc.Mapping``
+-----------------------------------------------------------------------------
+This means it behaves like a readonly dictionary, and has a new ``.values()``
+method and ``len()`` implementation.
+
+For python 3, this means that ``.iteritems()``, ``.iterkeys()`` have been
+deprecated, and ``.keys()`` and ``.items()`` now return views and not lists.
+This is consistent with how the builtin ``dict`` type changed between python 2
+and python 3.
+
+Under certain conditions, ``nditer`` must be used in a context manager
+----------------------------------------------------------------------
+When using an `numpy.nditer` with the ``"writeonly"`` or ``"readwrite"`` flags, there
+are some circumstances where nditer doesn't actually give you a view of the
+writable array. Instead, it gives you a copy, and if you make changes to the
+copy, nditer later writes those changes back into your actual array. Currently,
+this writeback occurs when the array objects are garbage collected, which makes
+this API error-prone on CPython and entirely broken on PyPy. Therefore,
+``nditer`` should now be used as a context manager whenever it is used
+with writeable arrays, e.g., ``with np.nditer(...) as it: ...``. You may also
+explicitly call ``it.close()`` for cases where a context manager is unusable,
+for instance in generator expressions.
+
+Numpy has switched to using pytest instead of nose for testing
+--------------------------------------------------------------
+The last nose release was 1.3.7 in June, 2015, and development of that tool has
+ended, consequently NumPy has now switched to using pytest. The old decorators
+and nose tools that were previously used by some downstream projects remain
+available, but will not be maintained. The standard testing utilities,
+``assert_almost_equal`` and such, are not be affected by this change except for
+the nose specific functions ``import_nose`` and ``raises``. Those functions are
+not used in numpy, but are kept for downstream compatibility.
+
+Numpy no longer monkey-patches ``ctypes`` with ``__array_interface__``
+----------------------------------------------------------------------
+Previously numpy added ``__array_interface__`` attributes to all the integer
+types from ``ctypes``.
+
+``np.ma.notmasked_contiguous`` and ``np.ma.flatnotmasked_contiguous`` always return lists
+-----------------------------------------------------------------------------------------
+This is the documented behavior, but previously the result could be any of
+slice, None, or list.
+
+All downstream users seem to check for the ``None`` result from
+``flatnotmasked_contiguous`` and replace it with ``[]``. Those callers will
+continue to work as before.
+
+``np.squeeze`` restores old behavior of objects that cannot handle an ``axis`` argument
+---------------------------------------------------------------------------------------
+Prior to version ``1.7.0``, `numpy.squeeze` did not have an ``axis`` argument and
+all empty axes were removed by default. The incorporation of an ``axis``
+argument made it possible to selectively squeeze single or multiple empty axes,
+but the old API expectation was not respected because axes could still be
+selectively removed (silent success) from an object expecting all empty axes to
+be removed. That silent, selective removal of empty axes for objects expecting
+the old behavior has been fixed and the old behavior restored.
+
+unstructured void array's ``.item`` method now returns a bytes object
+---------------------------------------------------------------------
+``.item`` now returns a ``bytes`` object instead of a buffer or byte array.
+This may affect code which assumed the return value was mutable, which is no
+longer the case.
+
+``copy.copy`` and ``copy.deepcopy`` no longer turn ``masked`` into an array
+---------------------------------------------------------------------------
+Since ``np.ma.masked`` is a readonly scalar, copying should be a no-op. These
+functions now behave consistently with ``np.copy()``.
+
+Multifield Indexing of Structured Arrays will still return a copy
+-----------------------------------------------------------------
+The change that multi-field indexing of structured arrays returns a view
+instead of a copy is pushed back to 1.16. A new method
+``numpy.lib.recfunctions.repack_fields`` has been introduced to help mitigate
+the effects of this change, which can be used to write code compatible with
+both numpy 1.15 and 1.16. For more information on how to update code to account
+for this future change see the "accessing multiple fields" section of the
+`user guide <https://docs.scipy.org/doc/numpy/user/basics.rec.html>`__.
+
+
+C API changes
+=============
+
+New functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier``
+-----------------------------------------------------------------------------------
+Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier``
+have been added and should be used in place of the ``npy_get_floatstatus``and
+``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang
+were rearranging the order of operations when the previous functions were used
+in the ufunc SIMD functions, resulting in the floatstatus flags being checked
+before the operation whose status we wanted to check was run. See `#10339
+<https://github.com/numpy/numpy/issues/10370>`__.
+
+Changes to ``PyArray_GetDTypeTransferFunction``
+-----------------------------------------------
+``PyArray_GetDTypeTransferFunction`` now defaults to using user-defined
+``copyswapn`` / ``copyswap`` for user-defined dtypes. If this causes a
+significant performance hit, consider implementing ``copyswapn`` to reflect the
+implementation of ``PyArray_GetStridedCopyFn``. See `#10898
+<https://github.com/numpy/numpy/pull/10898>`__.
+
+
+New Features
+============
+
+``np.gcd`` and ``np.lcm`` ufuncs added for integer and objects types
+--------------------------------------------------------------------
+These compute the greatest common divisor, and lowest common multiple,
+respectively. These work on all the numpy integer types, as well as the
+builtin arbitrary-precision ``Decimal`` and ``long`` types.
+
+Support for cross-platform builds for iOS
+-----------------------------------------
+The build system has been modified to add support for the
+``_PYTHON_HOST_PLATFORM`` environment variable, used by ``distutils`` when
+compiling on one platform for another platform. This makes it possible to
+compile NumPy for iOS targets.
+
+This only enables you to compile NumPy for one specific platform at a time.
+Creating a full iOS-compatible NumPy package requires building for the 5
+architectures supported by iOS (i386, x86_64, armv7, armv7s and arm64), and
+combining these 5 compiled builds products into a single "fat" binary.
+
+``return_indices`` keyword added for ``np.intersect1d``
+-------------------------------------------------------
+New keyword ``return_indices`` returns the indices of the two input arrays
+that correspond to the common elements.
+
+``np.quantile`` and ``np.nanquantile``
+--------------------------------------
+Like ``np.percentile`` and ``np.nanpercentile``, but takes quantiles in [0, 1]
+rather than percentiles in [0, 100]. ``np.percentile`` is now a thin wrapper
+around ``np.quantile`` with the extra step of dividing by 100.
+
+
+Build system
+------------
+Added experimental support for the 64-bit RISC-V architecture.
+
+
+Improvements
+============
+
+``np.einsum`` updates
+---------------------
+Syncs einsum path optimization tech between `numpy` and `opt_einsum`. In
+particular, the `greedy` path has received many enhancements by @jcmgray. A
+full list of issues fixed are:
+
+* Arbitrary memory can be passed into the `greedy` path. Fixes gh-11210.
+* The greedy path has been updated to contain more dynamic programming ideas
+ preventing a large number of duplicate (and expensive) calls that figure out
+ the actual pair contraction that takes place. Now takes a few seconds on
+ several hundred input tensors. Useful for matrix product state theories.
+* Reworks the broadcasting dot error catching found in gh-11218 gh-10352 to be
+ a bit earlier in the process.
+* Enhances the `can_dot` functionality that previous missed an edge case (part
+ of gh-11308).
+
+``np.ufunc.reduce`` and related functions now accept an initial value
+---------------------------------------------------------------------
+``np.ufunc.reduce``, ``np.sum``, ``np.prod``, ``np.min`` and ``np.max`` all
+now accept an ``initial`` keyword argument that specifies the value to start
+the reduction with.
+
+``np.flip`` can operate over multiple axes
+------------------------------------------
+``np.flip`` now accepts None, or tuples of int, in its ``axis`` argument. If
+axis is None, it will flip over all the axes.
+
+``histogram`` and ``histogramdd`` functions have moved to ``np.lib.histograms``
+-------------------------------------------------------------------------------
+These were originally found in ``np.lib.function_base``. They are still
+available under their un-scoped ``np.histogram(dd)`` names, and
+to maintain compatibility, aliased at ``np.lib.function_base.histogram(dd)``.
+
+Code that does ``from np.lib.function_base import *`` will need to be updated
+with the new location, and should consider not using ``import *`` in future.
+
+``histogram`` will accept NaN values when explicit bins are given
+-----------------------------------------------------------------
+Previously it would fail when trying to compute a finite range for the data.
+Since the range is ignored anyway when the bins are given explicitly, this error
+was needless.
+
+Note that calling ``histogram`` on NaN values continues to raise the
+``RuntimeWarning`` s typical of working with nan values, which can be silenced
+as usual with ``errstate``.
+
+``histogram`` works on datetime types, when explicit bin edges are given
+------------------------------------------------------------------------
+Dates, times, and timedeltas can now be histogrammed. The bin edges must be
+passed explicitly, and are not yet computed automatically.
+
+``histogram`` "auto" estimator handles limited variance better
+--------------------------------------------------------------
+No longer does an IQR of 0 result in ``n_bins=1``, rather the number of bins
+chosen is related to the data size in this situation.
+
+The edges retuned by `histogram`` and ``histogramdd`` now match the data float type
+-----------------------------------------------------------------------------------
+When passed ``np.float16``, ``np.float32``, or ``np.longdouble`` data, the
+returned edges are now of the same dtype. Previously, ``histogram`` would only
+return the same type if explicit bins were given, and ``histogram`` would
+produce ``float64`` bins no matter what the inputs.
+
+``histogramdd`` allows explicit ranges to be given in a subset of axes
+----------------------------------------------------------------------
+The ``range`` argument of `numpy.histogramdd` can now contain ``None`` values to
+indicate that the range for the corresponding axis should be computed from the
+data. Previously, this could not be specified on a per-axis basis.
+
+The normed arguments of ``histogramdd`` and ``histogram2d`` have been renamed
+-----------------------------------------------------------------------------
+These arguments are now called ``density``, which is consistent with
+``histogram``. The old argument continues to work, but the new name should be
+preferred.
+
+``np.r_`` works with 0d arrays, and ``np.ma.mr_`` works with ``np.ma.masked``
+-----------------------------------------------------------------------------
+0d arrays passed to the `r_` and `mr_` concatenation helpers are now treated as
+though they are arrays of length 1. Previously, passing these was an error.
+As a result, `numpy.ma.mr_` now works correctly on the ``masked`` constant.
+
+``np.ptp`` accepts a ``keepdims`` argument, and extended axis tuples
+--------------------------------------------------------------------
+``np.ptp`` (peak-to-peak) can now work over multiple axes, just like ``np.max``
+and ``np.min``.
+
+``MaskedArray.astype`` now is identical to ``ndarray.astype``
+-------------------------------------------------------------
+This means it takes all the same arguments, making more code written for
+ndarray work for masked array too.
+
+Enable AVX2/AVX512 at compile time
+----------------------------------
+Change to simd.inc.src to allow use of AVX2 or AVX512 at compile time. Previously
+compilation for avx2 (or 512) with -march=native would still use the SSE
+code for the simd functions even when the rest of the code got AVX2.
+
+``nan_to_num`` always returns scalars when receiving scalar or 0d inputs
+------------------------------------------------------------------------
+Previously an array was returned for integer scalar inputs, which is
+inconsistent with the behavior for float inputs, and that of ufuncs in general.
+For all types of scalar or 0d input, the result is now a scalar.
+
+``np.flatnonzero`` works on numpy-convertible types
+---------------------------------------------------
+``np.flatnonzero`` now uses ``np.ravel(a)`` instead of ``a.ravel()``, so it
+works for lists, tuples, etc.
+
+``np.interp`` returns numpy scalars rather than builtin scalars
+---------------------------------------------------------------
+Previously ``np.interp(0.5, [0, 1], [10, 20])`` would return a ``float``, but
+now it returns a ``np.float64`` object, which more closely matches the behavior
+of other functions.
+
+Additionally, the special case of ``np.interp(object_array_0d, ...)`` is no
+longer supported, as ``np.interp(object_array_nd)`` was never supported anyway.
+
+As a result of this change, the ``period`` argument can now be used on 0d
+arrays.
+
+Allow dtype field names to be unicode in Python 2
+-------------------------------------------------
+Previously ``np.dtype([(u'name', float)])`` would raise a ``TypeError`` in
+Python 2, as only bytestrings were allowed in field names. Now any unicode
+string field names will be encoded with the ``ascii`` codec, raising a
+``UnicodeEncodeError`` upon failure.
+
+This change makes it easier to write Python 2/3 compatible code using
+``from __future__ import unicode_literals``, which previously would cause
+string literal field names to raise a TypeError in Python 2.
+
+Comparison ufuncs accept ``dtype=object``, overriding the default ``bool``
+--------------------------------------------------------------------------
+This allows object arrays of symbolic types, which override ``==`` and other
+operators to return expressions, to be compared elementwise with
+``np.equal(a, b, dtype=object)``.
+
+``sort`` functions accept ``kind='stable'``
+-------------------------------------------
+Up until now, to perform a stable sort on the data, the user must do:
+
+ >>> np.sort([5, 2, 6, 2, 1], kind='mergesort')
+ [1, 2, 2, 5, 6]
+
+because merge sort is the only stable sorting algorithm available in
+NumPy. However, having kind='mergesort' does not make it explicit that
+the user wants to perform a stable sort thus harming the readability.
+
+This change allows the user to specify kind='stable' thus clarifying
+the intent.
+
+Do not make temporary copies for in-place accumulation
+------------------------------------------------------
+When ufuncs perform accumulation they no longer make temporary copies because
+of the overlap between input an output, that is, the next element accumulated
+is added before the accumulated result is stored in its place, hence the
+overlap is safe. Avoiding the copy results in faster execution.
+
+``linalg.matrix_power`` can now handle stacks of matrices
+---------------------------------------------------------
+Like other functions in ``linalg``, ``matrix_power`` can now deal with arrays
+of dimension larger than 2, which are treated as stacks of matrices. As part
+of the change, to further improve consistency, the name of the first argument
+has been changed to ``a`` (from ``M``), and the exceptions for non-square
+matrices have been changed to ``LinAlgError`` (from ``ValueError``).
+
+Increased performance in ``random.permutation`` for multidimensional arrays
+---------------------------------------------------------------------------
+``permutation`` uses the fast path in ``random.shuffle`` for all input
+array dimensions. Previously the fast path was only used for 1-d arrays.
+
+Generalized ufuncs now accept ``axes``, ``axis`` and ``keepdims`` arguments
+---------------------------------------------------------------------------
+One can control over which axes a generalized ufunc operates by passing in an
+``axes`` argument, a list of tuples with indices of particular axes. For
+instance, for a signature of ``(i,j),(j,k)->(i,k)`` appropriate for matrix
+multiplication, the base elements are two-dimensional matrices and these are
+taken to be stored in the two last axes of each argument. The corresponding
+axes keyword would be ``[(-2, -1), (-2, -1), (-2, -1)]``. If one wanted to
+use leading dimensions instead, one would pass in ``[(0, 1), (0, 1), (0, 1)]``.
+
+For simplicity, for generalized ufuncs that operate on 1-dimensional arrays
+(vectors), a single integer is accepted instead of a single-element tuple, and
+for generalized ufuncs for which all outputs are scalars, the (empty) output
+tuples can be omitted. Hence, for a signature of ``(i),(i)->()`` appropriate
+for an inner product, one could pass in ``axes=[0, 0]`` to indicate that the
+vectors are stored in the first dimensions of the two inputs arguments.
+
+As a short-cut for generalized ufuncs that are similar to reductions, i.e.,
+that act on a single, shared core dimension such as the inner product example
+above, one can pass an ``axis`` argument. This is equivalent to passing in
+``axes`` with identical entries for all arguments with that core dimension
+(e.g., for the example above, ``axes=[(axis,), (axis,)]``).
+
+Furthermore, like for reductions, for generalized ufuncs that have inputs that
+all have the same number of core dimensions and outputs with no core dimension,
+one can pass in ``keepdims`` to leave a dimension with size 1 in the outputs,
+thus allowing proper broadcasting against the original inputs. The location of
+the extra dimension can be controlled with ``axes``. For instance, for the
+inner-product example, ``keepdims=True, axes=[-2, -2, -2]`` would act on the
+inner-product example, ``keepdims=True, axis=-2`` would act on the
+one-but-last dimension of the input arguments, and leave a size 1 dimension in
+that place in the output.
+
+float128 values now print correctly on ppc systems
+--------------------------------------------------
+Previously printing float128 values was buggy on ppc, since the special
+double-double floating-point-format on these systems was not accounted for.
+float128s now print with correct rounding and uniqueness.
+
+Warning to ppc users: You should upgrade glibc if it is version <=2.23,
+especially if using float128. On ppc, glibc's malloc in these version often
+misaligns allocated memory which can crash numpy when using float128 values.
+
+New ``np.take_along_axis`` and ``np.put_along_axis`` functions
+--------------------------------------------------------------
+When used on multidimensional arrays, ``argsort``, ``argmin``, ``argmax``, and
+``argpartition`` return arrays that are difficult to use as indices.
+``take_along_axis`` provides an easy way to use these indices to lookup values
+within an array, so that::
+
+ np.take_along_axis(a, np.argsort(a, axis=axis), axis=axis)
+
+is the same as::
+
+ np.sort(a, axis=axis)
+
+``np.put_along_axis`` acts as the dual operation for writing to these indices
+within an array.
+
--- /dev/null
+==========================
+NumPy 1.15.1 Release Notes
+==========================
+
+This is a bugfix release for bugs and regressions reported following the 1.15.0
+release.
+
+* The annoying but harmless RuntimeWarning that "numpy.dtype size changed" has
+ been suppressed. The long standing suppression was lost in the transition to
+ pytest.
+* The update to Cython 0.28.3 exposed a problematic use of a gcc attribute used
+ to prefer code size over speed in module initialization, possibly resulting in
+ incorrect compiled code. This has been fixed in latest Cython but has been
+ disabled here for safety.
+* Support for big-endian and ARMv8 architectures has been improved.
+
+The Python versions supported by this release are 2.7, 3.4-3.7. The wheels are
+linked with OpenBLAS v0.3.0, which should fix some of the linalg problems
+reported for NumPy 1.14.
+
+
+Compatibility Note
+==================
+
+The NumPy 1.15.x OS X wheels released on PyPI no longer contain 32-bit
+binaries. That will also be the case in future releases. See
+`#11625 <https://github.com/numpy/numpy/issues/11625>`__ for the related
+discussion. Those needing 32-bit support should look elsewhere or build
+from source.
+
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Chris Billington
+* Elliott Sales de Andrade +
+* Eric Wieser
+* Jeremy Manning +
+* Matti Picus
+* Ralf Gommers
+
+Pull requests merged
+====================
+
+A total of 24 pull requests were merged for this release.
+
+* `#11647 <https://github.com/numpy/numpy/pull/11647>`__: MAINT: Filter Cython warnings in ``__init__.py``
+* `#11648 <https://github.com/numpy/numpy/pull/11648>`__: BUG: Fix doc source links to unwrap decorators
+* `#11657 <https://github.com/numpy/numpy/pull/11657>`__: BUG: Ensure singleton dimensions are not dropped when converting...
+* `#11661 <https://github.com/numpy/numpy/pull/11661>`__: BUG: Warn on Nan in minimum,maximum for scalars
+* `#11665 <https://github.com/numpy/numpy/pull/11665>`__: BUG: cython sometimes emits invalid gcc attribute
+* `#11682 <https://github.com/numpy/numpy/pull/11682>`__: BUG: Fix regression in void_getitem
+* `#11698 <https://github.com/numpy/numpy/pull/11698>`__: BUG: Make matrix_power again work for object arrays.
+* `#11700 <https://github.com/numpy/numpy/pull/11700>`__: BUG: Add missing PyErr_NoMemory after failing malloc
+* `#11719 <https://github.com/numpy/numpy/pull/11719>`__: BUG: Fix undefined functions on big-endian systems.
+* `#11720 <https://github.com/numpy/numpy/pull/11720>`__: MAINT: Make einsum optimize default to False.
+* `#11746 <https://github.com/numpy/numpy/pull/11746>`__: BUG: Fix regression in loadtxt for bz2 text files in Python 2.
+* `#11757 <https://github.com/numpy/numpy/pull/11757>`__: BUG: Revert use of `console_scripts`.
+* `#11758 <https://github.com/numpy/numpy/pull/11758>`__: BUG: Fix Fortran kind detection for aarch64 & s390x.
+* `#11759 <https://github.com/numpy/numpy/pull/11759>`__: BUG: Fix printing of longdouble on ppc64le.
+* `#11760 <https://github.com/numpy/numpy/pull/11760>`__: BUG: Fixes for unicode field names in Python 2
+* `#11761 <https://github.com/numpy/numpy/pull/11761>`__: BUG: Increase required cython version on python 3.7
+* `#11763 <https://github.com/numpy/numpy/pull/11763>`__: BUG: check return value of _buffer_format_string
+* `#11775 <https://github.com/numpy/numpy/pull/11775>`__: MAINT: Make assert_array_compare more generic.
+* `#11776 <https://github.com/numpy/numpy/pull/11776>`__: TST: Fix urlopen stubbing.
+* `#11777 <https://github.com/numpy/numpy/pull/11777>`__: BUG: Fix regression in intersect1d.
+* `#11779 <https://github.com/numpy/numpy/pull/11779>`__: BUG: Fix test sensitive to platform byte order.
+* `#11781 <https://github.com/numpy/numpy/pull/11781>`__: BUG: Avoid signed overflow in histogram
+* `#11785 <https://github.com/numpy/numpy/pull/11785>`__: BUG: Fix pickle and memoryview for datetime64, timedelta64 scalars
+* `#11786 <https://github.com/numpy/numpy/pull/11786>`__: BUG: Deprecation triggers segfault
--- /dev/null
+==========================
+NumPy 1.15.2 Release Notes
+==========================
+
+This is a bugfix release for bugs and regressions reported following the 1.15.1
+release.
+
+* The matrix PendingDeprecationWarning is now suppressed in pytest 3.8.
+* The new cached allocations machinery has been fixed to be thread safe.
+* The boolean indexing of subclasses now works correctly.
+* A small memory leak in PyArray_AdaptFlexibleDType has been fixed.
+
+The Python versions supported by this release are 2.7, 3.4-3.7. The wheels are
+linked with OpenBLAS v0.3.0, which should fix some of the linalg problems
+reported for NumPy 1.14.
+
+Compatibility Note
+==================
+
+The NumPy 1.15.x OS X wheels released on PyPI no longer contain 32-bit
+binaries. That will also be the case in future releases. See
+`#11625 <https://github.com/numpy/numpy/issues/11625>`__ for the related
+discussion. Those needing 32-bit support should look elsewhere or build
+from source.
+
+Contributors
+============
+
+A total of 4 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Julian Taylor
+* Marten van Kerkwijk
+* Matti Picus
+
+Pull requests merged
+====================
+
+A total of 4 pull requests were merged for this release.
+
+* `#11902 <https://github.com/numpy/numpy/pull/11902>`__: BUG: Fix matrix PendingDeprecationWarning suppression for pytest...
+* `#11981 <https://github.com/numpy/numpy/pull/11981>`__: BUG: fix cached allocations without the GIL for 1.15.x
+* `#11982 <https://github.com/numpy/numpy/pull/11982>`__: BUG: fix refcount leak in PyArray_AdaptFlexibleDType
+* `#11992 <https://github.com/numpy/numpy/pull/11992>`__: BUG: Ensure boolean indexing of subclasses sets base correctly.
--- /dev/null
+==========================
+NumPy 1.15.3 Release Notes
+==========================
+
+This is a bugfix release for bugs and regressions reported following the 1.15.2
+release. The Python versions supported by this release are 2.7, 3.4-3.7. The
+wheels are linked with OpenBLAS v0.3.0, which should fix some of the linalg
+problems reported for NumPy 1.14.
+
+Compatibility Note
+==================
+
+The NumPy 1.15.x OS X wheels released on PyPI no longer contain 32-bit
+binaries. That will also be the case in future releases. See
+`#11625 <https://github.com/numpy/numpy/issues/11625>`__ for the related
+discussion. Those needing 32-bit support should look elsewhere or build
+from source.
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Jeroen Demeyer
+* Kevin Sheppard
+* Matthew Bowden +
+* Matti Picus
+* Tyler Reddy
+
+Pull requests merged
+====================
+
+A total of 12 pull requests were merged for this release.
+
+* `#12080 <https://github.com/numpy/numpy/pull/12080>`__: MAINT: Blacklist some MSVC complex functions.
+* `#12083 <https://github.com/numpy/numpy/pull/12083>`__: TST: Add azure CI testing to 1.15.x branch.
+* `#12084 <https://github.com/numpy/numpy/pull/12084>`__: BUG: test_path() now uses Path.resolve()
+* `#12085 <https://github.com/numpy/numpy/pull/12085>`__: TST, MAINT: Fix some failing tests on azure-pipelines mac and...
+* `#12187 <https://github.com/numpy/numpy/pull/12187>`__: BUG: Fix memory leak in mapping.c
+* `#12188 <https://github.com/numpy/numpy/pull/12188>`__: BUG: Allow boolean subtract in histogram
+* `#12189 <https://github.com/numpy/numpy/pull/12189>`__: BUG: Fix in-place permutation
+* `#12190 <https://github.com/numpy/numpy/pull/12190>`__: BUG: limit default for get_num_build_jobs() to 8
+* `#12191 <https://github.com/numpy/numpy/pull/12191>`__: BUG: OBJECT_to_* should check for errors
+* `#12192 <https://github.com/numpy/numpy/pull/12192>`__: DOC: Prepare for NumPy 1.15.3 release.
+* `#12237 <https://github.com/numpy/numpy/pull/12237>`__: BUG: Fix MaskedArray fill_value type conversion.
+* `#12238 <https://github.com/numpy/numpy/pull/12238>`__: TST: Backport azure-pipeline testing fixes for Mac
--- /dev/null
+==========================
+NumPy 1.15.4 Release Notes
+==========================
+
+This is a bugfix release for bugs and regressions reported following the 1.15.3
+release. The Python versions supported by this release are 2.7, 3.4-3.7. The
+wheels are linked with OpenBLAS v0.3.0, which should fix some of the linalg
+problems reported for NumPy 1.14.
+
+Compatibility Note
+==================
+
+The NumPy 1.15.x OS X wheels released on PyPI no longer contain 32-bit
+binaries. That will also be the case in future releases. See
+`#11625 <https://github.com/numpy/numpy/issues/11625>`__ for the related
+discussion. Those needing 32-bit support should look elsewhere or build
+from source.
+
+Contributors
+============
+
+A total of 4 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Matti Picus
+* Sebastian Berg
+* bbbbbbbbba +
+
+Pull requests merged
+====================
+
+A total of 4 pull requests were merged for this release.
+
+* `#12296 <https://github.com/numpy/numpy/pull/12296>`__: BUG: Dealloc cached buffer info
+* `#12297 <https://github.com/numpy/numpy/pull/12297>`__: BUG: Fix fill value in masked array '==' and '!=' ops.
+* `#12307 <https://github.com/numpy/numpy/pull/12307>`__: DOC: Correct the default value of `optimize` in `numpy.einsum`
+* `#12320 <https://github.com/numpy/numpy/pull/12320>`__: REL: Prepare for the NumPy 1.15.4 release
--- /dev/null
+==========================
+NumPy 1.16.0 Release Notes
+==========================
+
+This NumPy release is the last one to support Python 2.7 and will be maintained
+as a long term release with bug fixes until 2020. Support for Python 3.4 been
+dropped, the supported Python versions are 2.7 and 3.5-3.7. The wheels on PyPI
+are linked with OpenBLAS v0.3.4+, which should fix the known threading issues
+found in previous OpenBLAS versions.
+
+Downstream developers building this release should use Cython >= 0.29 and, if
+using OpenBLAS, OpenBLAS > v0.3.4.
+
+This release has seen a lot of refactoring and features many bug fixes, improved
+code organization, and better cross platform compatibility. Not all of these
+improvements will be visible to users, but they should help make maintenance
+easier going forward.
+
+
+Highlights
+==========
+
+* Experimental (opt-in only) support for overriding numpy functions,
+ see ``__array_function__`` below.
+
+* The ``matmul`` function is now a ufunc. This provides better
+ performance and allows overriding with ``__array_ufunc__``.
+
+* Improved support for the ARM and POWER architectures.
+
+* Improved support for AIX and PyPy.
+
+* Improved interop with ctypes.
+
+* Improved support for PEP 3118.
+
+
+
+New functions
+=============
+
+* New functions added to the `numpy.lib.recfuntions` module to ease the
+ structured assignment changes:
+
+ * ``assign_fields_by_name``
+ * ``structured_to_unstructured``
+ * ``unstructured_to_structured``
+ * ``apply_along_fields``
+ * ``require_fields``
+
+ See the user guide at <https://docs.scipy.org/doc/numpy/user/basics.rec.html>
+ for more info.
+
+
+New deprecations
+================
+
+* The type dictionaries `numpy.core.typeNA` and `numpy.core.sctypeNA` are
+ deprecated. They were buggy and not documented and will be removed in the
+ 1.18 release. Use`numpy.sctypeDict` instead.
+
+* The `numpy.asscalar` function is deprecated. It is an alias to the more
+ powerful `numpy.ndarray.item`, not tested, and fails for scalars.
+
+* The `numpy.set_array_ops` and `numpy.get_array_ops` functions are deprecated.
+ As part of `NEP 15`, they have been deprecated along with the C-API functions
+ :c:func:`PyArray_SetNumericOps` and :c:func:`PyArray_GetNumericOps`. Users
+ who wish to override the inner loop functions in built-in ufuncs should use
+ :c:func:`PyUFunc_ReplaceLoopBySignature`.
+
+* The `numpy.unravel_index` keyword argument ``dims`` is deprecated, use
+ ``shape`` instead.
+
+* The `numpy.histogram` ``normed`` argument is deprecated. It was deprecated
+ previously, but no warning was issued.
+
+* The ``positive`` operator (``+``) applied to non-numerical arrays is
+ deprecated. See below for details.
+
+* Passing an iterator to the stack functions is deprecated
+
+
+Expired deprecations
+====================
+
+* NaT comparisons now return ``False`` without a warning, finishing a
+ deprecation cycle begun in NumPy 1.11.
+
+* ``np.lib.function_base.unique`` was removed, finishing a deprecation cycle
+ begun in NumPy 1.4. Use `numpy.unique` instead.
+
+* multi-field indexing now returns views instead of copies, finishing a
+ deprecation cycle begun in NumPy 1.7. The change was previously attempted in
+ NumPy 1.14 but reverted until now.
+
+* ``np.PackageLoader`` and ``np.pkgload`` have been removed. These were
+ deprecated in 1.10, had no tests, and seem to no longer work in 1.15.
+
+
+Future changes
+==============
+
+* NumPy 1.17 will drop support for Python 2.7.
+
+
+Compatibility notes
+===================
+
+f2py script on Windows
+----------------------
+On Windows, the installed script for running f2py is now an ``.exe`` file
+rather than a ``*.py`` file and should be run from the command line as ``f2py``
+whenever the ``Scripts`` directory is in the path. Running ``f2py`` as a module
+``python -m numpy.f2py [...]`` will work without path modification in any
+version of NumPy.
+
+NaT comparisons
+---------------
+Consistent with the behavior of NaN, all comparisons other than inequality
+checks with datetime64 or timedelta64 NaT ("not-a-time") values now always
+return ``False``, and inequality checks with NaT now always return ``True``.
+This includes comparisons beteween NaT values. For compatibility with the
+old behavior, use ``np.isnat`` to explicitly check for NaT or convert
+datetime64/timedelta64 arrays with ``.astype(np.int64)`` before making
+comparisons.
+
+complex64/128 alignment has changed
+-----------------------------------
+The memory alignment of complex types is now the same as a C-struct composed of
+two floating point values, while before it was equal to the size of the type.
+For many users (for instance on x64/unix/gcc) this means that complex64 is now
+4-byte aligned instead of 8-byte aligned. An important consequence is that
+aligned structured dtypes may now have a different size. For instance,
+``np.dtype('c8,u1', align=True)`` used to have an itemsize of 16 (on x64/gcc)
+but now it is 12.
+
+More in detail, the complex64 type now has the same alignment as a C-struct
+``struct {float r, i;}``, according to the compiler used to compile numpy, and
+similarly for the complex128 and complex256 types.
+
+nd_grid __len__ removal
+-----------------------
+``len(np.mgrid)`` and ``len(np.ogrid)`` are now considered nonsensical
+and raise a ``TypeError``.
+
+``np.unravel_index`` now accepts ``shape`` keyword argument
+-----------------------------------------------------------
+Previously, only the ``dims`` keyword argument was accepted
+for specification of the shape of the array to be used
+for unraveling. ``dims`` remains supported, but is now deprecated.
+
+multi-field views return a view instead of a copy
+-------------------------------------------------
+Indexing a structured array with multiple fields, e.g., ``arr[['f1', 'f3']]``,
+returns a view into the original array instead of a copy. The returned view
+will often have extra padding bytes corresponding to intervening fields in the
+original array, unlike before, which will affect code such as
+``arr[['f1', 'f3']].view('float64')``. This change has been planned since numpy
+1.7. Operations hitting this path have emitted ``FutureWarnings`` since then.
+Additional ``FutureWarnings`` about this change were added in 1.12.
+
+To help users update their code to account for these changes, a number of
+functions have been added to the ``numpy.lib.recfunctions`` module which
+safely allow such operations. For instance, the code above can be replaced
+with ``structured_to_unstructured(arr[['f1', 'f3']], dtype='float64')``.
+See the "accessing multiple fields" section of the
+`user guide <https://docs.scipy.org/doc/numpy/user/basics.rec.html#accessing-multiple-fields>`__.
+
+
+C API changes
+=============
+
+The :c:data:`NPY_API_VERSION` was incremented to 0x0000D, due to the addition
+of:
+
+* :c:member:`PyUFuncObject.core_dim_flags`
+* :c:member:`PyUFuncObject.core_dim_sizes`
+* :c:member:`PyUFuncObject.identity_value`
+* :c:func:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity`
+
+
+New Features
+============
+
+Integrated squared error (ISE) estimator added to ``histogram``
+---------------------------------------------------------------
+This method (``bins='stone'``) for optimizing the bin number is a
+generalization of the Scott's rule. The Scott's rule assumes the distribution
+is approximately Normal, while the ISE_ is a non-parametric method based on
+cross-validation.
+
+.. _ISE: https://en.wikipedia.org/wiki/Histogram#Minimizing_cross-validation_estimated_squared_error
+
+``max_rows`` keyword added for ``np.loadtxt``
+---------------------------------------------
+New keyword ``max_rows`` in `numpy.loadtxt` sets the maximum rows of the
+content to be read after ``skiprows``, as in `numpy.genfromtxt`.
+
+modulus operator support added for ``np.timedelta64`` operands
+--------------------------------------------------------------
+The modulus (remainder) operator is now supported for two operands
+of type ``np.timedelta64``. The operands may have different units
+and the return value will match the type of the operands.
+
+
+Improvements
+============
+
+no-copy pickling of numpy arrays
+--------------------------------
+Up to protocol 4, numpy array pickling created 2 spurious copies of the data
+being serialized. With pickle protocol 5, and the ``PickleBuffer`` API, a
+large variety of numpy arrays can now be serialized without any copy using
+out-of-band buffers, and with one less copy using in-band buffers. This
+results, for large arrays, in an up to 66% drop in peak memory usage.
+
+build shell independence
+------------------------
+NumPy builds should no longer interact with the host machine
+shell directly. ``exec_command`` has been replaced with
+``subprocess.check_output`` where appropriate.
+
+`np.polynomial.Polynomial` classes render in LaTeX in Jupyter notebooks
+-----------------------------------------------------------------------
+When used in a front-end that supports it, `Polynomial` instances are now
+rendered through LaTeX. The current format is experimental, and is subject to
+change.
+
+``randint`` and ``choice`` now work on empty distributions
+----------------------------------------------------------
+Even when no elements needed to be drawn, ``np.random.randint`` and
+``np.random.choice`` raised an error when the arguments described an empty
+distribution. This has been fixed so that e.g.
+``np.random.choice([], 0) == np.array([], dtype=float64)``.
+
+``linalg.lstsq``, ``linalg.qr``, and ``linalg.svd`` now work with empty arrays
+------------------------------------------------------------------------------
+Previously, a ``LinAlgError`` would be raised when an empty matrix/empty
+matrices (with zero rows and/or columns) is/are passed in. Now outputs of
+appropriate shapes are returned.
+
+Chain exceptions to give better error messages for invalid PEP3118 format strings
+---------------------------------------------------------------------------------
+This should help track down problems.
+
+Einsum optimization path updates and efficiency improvements
+------------------------------------------------------------
+Einsum was synchronized with the current upstream work.
+
+`numpy.angle` and `numpy.expand_dims` now work on ``ndarray`` subclasses
+------------------------------------------------------------------------
+In particular, they now work for masked arrays.
+
+``NPY_NO_DEPRECATED_API`` compiler warning suppression
+------------------------------------------------------
+Setting ``NPY_NO_DEPRECATED_API`` to a value of 0 will suppress the current compiler
+warnings when the deprecated numpy API is used.
+
+``np.diff`` Added kwargs prepend and append
+-------------------------------------------
+New kwargs ``prepend`` and ``append``, allow for values to be inserted on
+either end of the differences. Similar to options for `ediff1d`. Now the
+inverse of `cumsum` can be obtained easily via ``prepend=0``.
+
+ARM support updated
+-------------------
+Support for ARM CPUs has been updated to accommodate 32 and 64 bit targets,
+and also big and little endian byte ordering. AARCH32 memory alignment issues
+have been addressed. CI testing has been expanded to include AARCH64 targets
+via the services of shippable.com.
+
+Appending to build flags
+------------------------
+`numpy.distutils` has always overridden rather than appended to `LDFLAGS` and
+other similar such environment variables for compiling Fortran extensions.
+Now, if the `NPY_DISTUTILS_APPEND_FLAGS` environment variable is set to 1, the
+behavior will be appending. This applied to: `LDFLAGS`, `F77FLAGS`,
+`F90FLAGS`, `FREEFLAGS`, `FOPT`, `FDEBUG`, and `FFLAGS`. See gh-11525 for more
+details.
+
+Generalized ufunc signatures now allow fixed-size dimensions
+------------------------------------------------------------
+By using a numerical value in the signature of a generalized ufunc, one can
+indicate that the given function requires input or output to have dimensions
+with the given size. E.g., the signature of a function that converts a polar
+angle to a two-dimensional cartesian unit vector would be ``()->(2)``; that
+for one that converts two spherical angles to a three-dimensional unit vector
+would be ``(),()->(3)``; and that for the cross product of two
+three-dimensional vectors would be ``(3),(3)->(3)``.
+
+Note that to the elementary function these dimensions are not treated any
+differently from variable ones indicated with a name starting with a letter;
+the loop still is passed the corresponding size, but it can now count on that
+size being equal to the fixed one given in the signature.
+
+Generalized ufunc signatures now allow flexible dimensions
+----------------------------------------------------------
+Some functions, in particular numpy's implementation of ``@`` as ``matmul``,
+are very similar to generalized ufuncs in that they operate over core
+dimensions, but one could not present them as such because they were able to
+deal with inputs in which a dimension is missing. To support this, it is now
+allowed to postfix a dimension name with a question mark to indicate that the
+dimension does not necessarily have to be present.
+
+With this addition, the signature for ``matmul`` can be expressed as
+``(m?,n),(n,p?)->(m?,p?)``. This indicates that if, e.g., the second operand
+has only one dimension, for the purposes of the elementary function it will be
+treated as if that input has core shape ``(n, 1)``, and the output has the
+corresponding core shape of ``(m, 1)``. The actual output array, however, has
+the flexible dimension removed, i.e., it will have shape ``(..., m)``.
+Similarly, if both arguments have only a single dimension, the inputs will be
+presented as having shapes ``(1, n)`` and ``(n, 1)`` to the elementary
+function, and the output as ``(1, 1)``, while the actual output array returned
+will have shape ``()``. In this way, the signature allows one to use a
+single elementary function for four related but different signatures,
+``(m,n),(n,p)->(m,p)``, ``(n),(n,p)->(p)``, ``(m,n),(n)->(m)`` and
+``(n),(n)->()``.
+
+``np.clip`` and the ``clip`` method check for memory overlap
+------------------------------------------------------------
+The ``out`` argument to these functions is now always tested for memory overlap
+to avoid corrupted results when memory overlap occurs.
+
+New value ``unscaled`` for option ``cov`` in ``np.polyfit``
+-----------------------------------------------------------
+A further possible value has been added to the ``cov`` parameter of the
+``np.polyfit`` function. With ``cov='unscaled'`` the scaling of the covariance
+matrix is disabled completely (similar to setting ``absolute_sigma=True`` in
+``scipy.optimize.curve_fit``). This would be useful in occasions, where the
+weights are given by 1/sigma with sigma being the (known) standard errors of
+(Gaussian distributed) data points, in which case the unscaled matrix is
+already a correct estimate for the covariance matrix.
+
+Detailed docstrings for scalar numeric types
+--------------------------------------------
+The ``help`` function, when applied to numeric types such as `numpy.intc`,
+`numpy.int_`, and `numpy.longlong`, now lists all of the aliased names for that
+type, distinguishing between platform -dependent and -independent aliases.
+
+``__module__`` attribute now points to public modules
+-----------------------------------------------------
+The ``__module__`` attribute on most NumPy functions has been updated to refer
+to the preferred public module from which to access a function, rather than
+the module in which the function happens to be defined. This produces more
+informative displays for functions in tools such as IPython, e.g., instead of
+``<function 'numpy.core.fromnumeric.sum'>`` you now see
+``<function 'numpy.sum'>``.
+
+Large allocations marked as suitable for transparent hugepages
+--------------------------------------------------------------
+On systems that support transparent hugepages over the madvise system call
+numpy now marks that large memory allocations can be backed by hugepages which
+reduces page fault overhead and can in some fault heavy cases improve
+performance significantly. On Linux the setting for huge pages to be used,
+`/sys/kernel/mm/transparent_hugepage/enabled`, must be at least `madvise`.
+Systems which already have it set to `always` will not see much difference as
+the kernel will automatically use huge pages where appropriate.
+
+Users of very old Linux kernels (~3.x and older) should make sure that
+`/sys/kernel/mm/transparent_hugepage/defrag` is not set to `always` to avoid
+performance problems due concurrency issues in the memory defragmentation.
+
+Alpine Linux (and other musl c library distros) support
+-------------------------------------------------------
+We now default to use `fenv.h` for floating point status error reporting.
+Previously we had a broken default that sometimes would not report underflow,
+overflow, and invalid floating point operations. Now we can support non-glibc
+distrubutions like Alpine Linux as long as they ship `fenv.h`.
+
+Speedup ``np.block`` for large arrays
+-------------------------------------
+Large arrays (greater than ``512 * 512``) now use a blocking algorithm based on
+copying the data directly into the appropriate slice of the resulting array.
+This results in significant speedups for these large arrays, particularly for
+arrays being blocked along more than 2 dimensions.
+
+``arr.ctypes.data_as(...)`` holds a reference to arr
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Previously the caller was responsible for keeping the array alive for the
+lifetime of the pointer.
+
+Speedup ``np.take`` for read-only arrays
+----------------------------------------
+The implementation of ``np.take`` no longer makes an unnecessary copy of the
+source array when its ``writeable`` flag is set to ``False``.
+
+Support path-like objects for more functions
+--------------------------------------------
+The ``np.core.records.fromfile`` function now supports ``pathlib.Path``
+and other path-like objects in addition to a file object. Furthermore, the
+``np.load`` function now also supports path-like objects when using memory
+mapping (``mmap_mode`` keyword argument).
+
+Better behaviour of ufunc identities during reductions
+------------------------------------------------------
+Universal functions have an ``.identity`` which is used when ``.reduce`` is
+called on an empty axis.
+
+As of this release, the logical binary ufuncs, `logical_and`, `logical_or`,
+and `logical_xor`, now have ``identity`` s of type `bool`, where previously they
+were of type `int`. This restores the 1.14 behavior of getting ``bool`` s when
+reducing empty object arrays with these ufuncs, while also keeping the 1.15
+behavior of getting ``int`` s when reducing empty object arrays with arithmetic
+ufuncs like ``add`` and ``multiply``.
+
+Additionally, `logaddexp` now has an identity of ``-inf``, allowing it to be
+called on empty sequences, where previously it could not be.
+
+This is possible thanks to the new
+:c:func:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity`, which allows
+arbitrary values to be used as identities now.
+
+Improved conversion from ctypes objects
+---------------------------------------
+Numpy has always supported taking a value or type from ``ctypes`` and
+converting it into an array or dtype, but only behaved correctly for simpler
+types. As of this release, this caveat is lifted - now:
+
+* The ``_pack_`` attribute of ``ctypes.Structure``, used to emulate C's
+ ``__attribute__((packed))``, is respected.
+* Endianness of all ctypes objects is preserved
+* ``ctypes.Union`` is supported
+* Non-representable constructs raise exceptions, rather than producing
+ dangerously incorrect results:
+
+ * Bitfields are no longer interpreted as sub-arrays
+ * Pointers are no longer replaced with the type that they point to
+
+A new ``ndpointer.contents`` member
+-----------------------------------
+This matches the ``.contents`` member of normal ctypes arrays, and can be used
+to construct an ``np.array`` around the pointers contents. This replaces
+``np.array(some_nd_pointer)``, which stopped working in 1.15. As a side effect
+of this change, ``ndpointer`` now supports dtypes with overlapping fields and
+padding.
+
+``matmul`` is now a ``ufunc``
+-----------------------------
+`numpy.matmul` is now a ufunc which means that both the function and the
+``__matmul__`` operator can now be overridden by ``__array_ufunc__``. Its
+implementation has also changed. It uses the same BLAS routines as
+`numpy.dot`, ensuring its performance is similar for large matrices.
+
+Start and stop arrays for ``linspace``, ``logspace`` and ``geomspace``
+----------------------------------------------------------------------
+These functions used to be limited to scalar stop and start values, but can
+now take arrays, which will be properly broadcast and result in an output
+which has one axis prepended. This can be used, e.g., to obtain linearly
+interpolated points between sets of points.
+
+CI extended with additional services
+------------------------------------
+We now use additional free CI services, thanks to the companies that provide:
+
+* Codecoverage testing via codecov.io
+* Arm testing via shippable.com
+* Additional test runs on azure pipelines
+
+These are in addition to our continued use of travis, appveyor (for wheels) and
+LGTM
+
+
+Changes
+=======
+
+Comparison ufuncs will now error rather than return NotImplemented
+------------------------------------------------------------------
+Previously, comparison ufuncs such as ``np.equal`` would return
+`NotImplemented` if their arguments had structured dtypes, to help comparison
+operators such as ``__eq__`` deal with those. This is no longer needed, as the
+relevant logic has moved to the comparison operators proper (which thus do
+continue to return `NotImplemented` as needed). Hence, like all other ufuncs,
+the comparison ufuncs will now error on structured dtypes.
+
+Positive will now raise a deprecation warning for non-numerical arrays
+----------------------------------------------------------------------
+Previously, ``+array`` unconditionally returned a copy. Now, it will
+raise a ``DeprecationWarning`` if the array is not numerical (i.e.,
+if ``np.positive(array)`` raises a ``TypeError``. For ``ndarray``
+subclasses that override the default ``__array_ufunc__`` implementation,
+the ``TypeError`` is passed on.
+
+``NDArrayOperatorsMixin`` now implements matrix multiplication
+--------------------------------------------------------------
+Previously, ``np.lib.mixins.NDArrayOperatorsMixin`` did not implement the
+special methods for Python's matrix multiplication operator (``@``). This has
+changed now that ``matmul`` is a ufunc and can be overridden using
+``__array_ufunc__``.
+
+The scaling of the covariance matrix in ``np.polyfit`` is different
+-------------------------------------------------------------------
+So far, ``np.polyfit`` used a non-standard factor in the scaling of the the
+covariance matrix. Namely, rather than using the standard ``chisq/(M-N)``, it
+scaled it with ``chisq/(M-N-2)`` where M is the number of data points and N is the
+number of parameters. This scaling is inconsistent with other fitting programs
+such as e.g. ``scipy.optimize.curve_fit`` and was changed to ``chisq/(M-N)``.
+
+``maximum`` and ``minimum`` no longer emit warnings
+---------------------------------------------------
+As part of code introduced in 1.10, ``float32`` and ``float64`` set invalid
+float status when a Nan is encountered in `numpy.maximum` and `numpy.minimum`,
+when using SSE2 semantics. This caused a `RuntimeWarning` to sometimes be
+emitted. In 1.15 we fixed the inconsistencies which caused the warnings to
+become more conspicuous. Now no warnings will be emitted.
+
+Umath and multiarray c-extension modules merged into a single module
+--------------------------------------------------------------------
+The two modules were merged, according to `NEP 15`_. Previously `np.core.umath`
+and `np.core.multiarray` were separate c-extension modules. They are now python
+wrappers to the single `np.core/_multiarray_math` c-extension module.
+
+.. _`NEP 15` : http://www.numpy.org/neps/nep-0015-merge-multiarray-umath.html
+
+``getfield`` validity checks extended
+-------------------------------------
+`numpy.ndarray.getfield` now checks the dtype and offset arguments to prevent
+accessing invalid memory locations.
+
+NumPy functions now support overrides with ``__array_function__``
+-----------------------------------------------------------------
+NumPy has a new experimental mechanism for overriding the implementation of
+almost all NumPy functions on non-NumPy arrays by defining an
+``__array_function__`` method, as described in `NEP 18`_.
+
+This feature is not yet been enabled by default, but has been released to
+facilitate experimentation by potential users. See the NEP for details on
+setting the appropriate environment variable. We expect the NumPy 1.17 release
+will enable overrides by default, which will also be more performant due to a
+new implementation written in C.
+
+.. _`NEP 18` : http://www.numpy.org/neps/nep-0018-array-function-protocol.html
+
+Arrays based off readonly buffers cannot be set ``writeable``
+-------------------------------------------------------------
+We now disallow setting the ``writeable`` flag True on arrays created
+from ``fromstring(readonly-buffer)``.
--- /dev/null
+==========================
+NumPy 1.16.1 Release Notes
+==========================
+
+The NumPy 1.16.1 release fixes bugs reported against the 1.16.0 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.4+, which should fix the known threading issues
+found in previous OpenBLAS versions.
+
+Downstream developers building this release should use Cython >= 0.29.2 and, if
+using OpenBLAS, OpenBLAS > v0.3.4.
+
+If you are installing using pip, you may encounter a problem with older
+installed versions of NumPy that pip did not delete becoming mixed with the
+current version, resulting in an ``ImportError``. That problem is particularly
+common on Debian derived distributions due to a modified pip. The fix is to
+make sure all previous NumPy versions installed by pip have been removed. See
+`#12736 <https://github.com/numpy/numpy/issues/12736>`__ for discussion of the
+issue. Note that previously this problem resulted in an ``AttributeError``.
+
+
+Contributors
+============
+
+A total of 16 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Antoine Pitrou
+* Arcesio Castaneda Medina +
+* Charles Harris
+* Chris Markiewicz +
+* Christoph Gohlke
+* Christopher J. Markiewicz +
+* Daniel Hrisca +
+* EelcoPeacs +
+* Eric Wieser
+* Kevin Sheppard
+* Matti Picus
+* OBATA Akio +
+* Ralf Gommers
+* Sebastian Berg
+* Stephan Hoyer
+* Tyler Reddy
+
+
+Enhancements
+============
+
+* `#12767 <https://github.com/numpy/numpy/pull/12767>`__: ENH: add mm->q floordiv
+* `#12768 <https://github.com/numpy/numpy/pull/12768>`__: ENH: port np.core.overrides to C for speed
+* `#12769 <https://github.com/numpy/numpy/pull/12769>`__: ENH: Add np.ctypeslib.as_ctypes_type(dtype), improve `np.ctypeslib.as_ctypes`
+* `#12773 <https://github.com/numpy/numpy/pull/12773>`__: ENH: add "max difference" messages to np.testing.assert_array_equal...
+* `#12820 <https://github.com/numpy/numpy/pull/12820>`__: ENH: Add mm->qm divmod
+* `#12890 <https://github.com/numpy/numpy/pull/12890>`__: ENH: add _dtype_ctype to namespace for freeze analysis
+
+
+Compatibility notes
+===================
+
+* The changed error message emitted by array comparison testing functions may
+ affect doctests. See below for detail.
+
+* Casting from double and single denormals to float16 has been corrected. In
+ some rare cases, this may result in results being rounded up instead of down,
+ changing the last bit (ULP) of the result.
+
+
+New Features
+============
+
+divmod operation is now supported for two ``timedelta64`` operands
+------------------------------------------------------------------
+The divmod operator now handles two ``np.timedelta64`` operands, with
+type signature ``mm->qm``.
+
+
+Improvements
+============
+
+Further improvements to ``ctypes`` support in ``np.ctypeslib``
+--------------------------------------------------------------
+A new `numpy.ctypeslib.as_ctypes_type` function has been added, which can be
+used to converts a `dtype` into a best-guess `ctypes` type. Thanks to this
+new function, `numpy.ctypeslib.as_ctypes` now supports a much wider range of
+array types, including structures, booleans, and integers of non-native
+endianness.
+
+Array comparison assertions include maximum differences
+-------------------------------------------------------
+Error messages from array comparison tests such as
+`np.testing.assert_allclose` now include "max absolute difference" and
+"max relative difference," in addition to the previous "mismatch" percentage.
+This information makes it easier to update absolute and relative error
+tolerances.
+
+
+Changes
+=======
+
+``timedelta64 % 0`` behavior adjusted to return ``NaT``
+-------------------------------------------------------
+The modulus operation with two ``np.timedelta64`` operands now returns
+``NaT`` in the case of division by zero, rather than returning zero
+
+
+
--- /dev/null
+==========================
+NumPy 1.16.2 Release Notes
+==========================
+
+NumPy 1.16.2 is a quick release fixing several problems encountered on Windows.
+The Python versions supported are 2.7 and 3.5-3.7. The Windows problems
+addressed are:
+
+- DLL load problems for NumPy wheels on Windows,
+- distutils command line parsing on Windows.
+
+There is also a regression fix correcting signed zeros produced by divmod, see
+below for details.
+
+Downstream developers building this release should use Cython >= 0.29.2 and, if
+using OpenBLAS, OpenBLAS > v0.3.4.
+
+If you are installing using pip, you may encounter a problem with older
+installed versions of NumPy that pip did not delete becoming mixed with the
+current version, resulting in an ``ImportError``. That problem is particularly
+common on Debian derived distributions due to a modified pip. The fix is to
+make sure all previous NumPy versions installed by pip have been removed. See
+`#12736 <https://github.com/numpy/numpy/issues/12736>`__ for discussion of the
+issue.
+
+
+Compatibility notes
+===================
+
+Signed zero when using divmod
+-----------------------------
+Starting in version 1.12.0, numpy incorrectly returned a negatively signed zero
+when using the ``divmod`` and ``floor_divide`` functions when the result was
+zero. For example::
+
+ >>> np.zeros(10)//1
+ array([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0.])
+
+With this release, the result is correctly returned as a positively signed
+zero::
+
+ >>> np.zeros(10)//1
+ array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
+
+
+Contributors
+============
+
+A total of 5 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Eric Wieser
+* Matti Picus
+* Tyler Reddy
+* Tony LaTorre +
+
+
+Pull requests merged
+====================
+
+A total of 7 pull requests were merged for this release.
+
+* `#12909 <https://github.com/numpy/numpy/pull/12909>`__: TST: fix vmImage dispatch in Azure
+* `#12923 <https://github.com/numpy/numpy/pull/12923>`__: MAINT: remove complicated test of multiarray import failure mode
+* `#13020 <https://github.com/numpy/numpy/pull/13020>`__: BUG: fix signed zero behavior in npy_divmod
+* `#13026 <https://github.com/numpy/numpy/pull/13026>`__: MAINT: Add functions to parse shell-strings in the platform-native...
+* `#13028 <https://github.com/numpy/numpy/pull/13028>`__: BUG: Fix regression in parsing of F90 and F77 environment variables
+* `#13038 <https://github.com/numpy/numpy/pull/13038>`__: BUG: parse shell escaping in extra_compile_args and extra_link_args
+* `#13041 <https://github.com/numpy/numpy/pull/13041>`__: BLD: Windows absolute path DLL loading
--- /dev/null
+==========================
+NumPy 1.16.3 Release Notes
+==========================
+
+The NumPy 1.16.3 release fixes bugs reported against the 1.16.2 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.4+, which should fix the known threading issues
+found in previous OpenBLAS versions.
+
+Downstream developers building this release should use Cython >= 0.29.2 and,
+if using OpenBLAS, OpenBLAS > v0.3.4.
+
+The most noticeable change in this release is that unpickling object arrays
+when loading ``*.npy`` or ``*.npz`` files now requires an explicit opt-in.
+This backwards incompatible change was made in response to
+`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
+
+
+Compatibility notes
+===================
+
+Unpickling while loading requires explicit opt-in
+-------------------------------------------------
+The functions ``np.load``, and ``np.lib.format.read_array`` take an
+`allow_pickle` keyword which now defaults to ``False`` in response to
+`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
+
+
+Improvements
+============
+
+Covariance in `random.mvnormal` cast to double
+----------------------------------------------
+This should make the tolerance used when checking the singular values of the
+covariance matrix more meaningful.
+
+
+Changes
+=======
+
+``__array_interface__`` offset now works as documented
+------------------------------------------------------
+The interface may use an ``offset`` value that was previously mistakenly
+ignored.
+
--- /dev/null
+==========================
+NumPy 1.16.4 Release Notes
+==========================
+
+The NumPy 1.16.4 release fixes bugs reported against the 1.16.3 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.7-dev, which should fix issues on Skylake series
+cpus.
+
+Downstream developers building this release should use Cython >= 0.29.2 and,
+if using OpenBLAS, OpenBLAS > v0.3.7. The supported Python versions are 2.7 and
+3.5-3.7.
+
+
+New deprecations
+================
+Writeable flag of C-API wrapped arrays
+--------------------------------------
+When an array is created from the C-API to wrap a pointer to data, the only
+indication we have of the read-write nature of the data is the ``writeable``
+flag set during creation. It is dangerous to force the flag to writeable. In
+the future it will not be possible to switch the writeable flag to ``True``
+from python. This deprecation should not affect many users since arrays
+created in such a manner are very rare in practice and only available through
+the NumPy C-API.
+
+
+Compatibility notes
+===================
+
+Potential changes to the random stream
+--------------------------------------
+Due to bugs in the application of log to random floating point numbers,
+the stream may change when sampling from ``np.random.beta``, ``np.random.binomial``,
+``np.random.laplace``, ``np.random.logistic``, ``np.random.logseries`` or
+``np.random.multinomial`` if a 0 is generated in the underlying MT19937 random stream.
+There is a 1 in :math:`10^{53}` chance of this occurring, and so the probability that
+the stream changes for any given seed is extremely small. If a 0 is encountered in the
+underlying generator, then the incorrect value produced (either ``np.inf``
+or ``np.nan``) is now dropped.
+
+
+Changes
+=======
+
+`numpy.lib.recfunctions.structured_to_unstructured` does not squeeze single-field views
+---------------------------------------------------------------------------------------
+Previously ``structured_to_unstructured(arr[['a']])`` would produce a squeezed
+result inconsistent with ``structured_to_unstructured(arr[['a', b']])``. This
+was accidental. The old behavior can be retained with
+``structured_to_unstructured(arr[['a']]).squeeze(axis=-1)`` or far more simply,
+``arr['a']``.
+
+
+Contributors
+============
+
+A total of 10 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Eric Wieser
+* Dennis Zollo +
+* Hunter Damron +
+* Jingbei Li +
+* Kevin Sheppard
+* Matti Picus
+* Nicola Soranzo +
+* Sebastian Berg
+* Tyler Reddy
+
+
+Pull requests merged
+====================
+
+A total of 16 pull requests were merged for this release.
+
+* `#13392 <https://github.com/numpy/numpy/pull/13392>`__: BUG: Some PyPy versions lack PyStructSequence_InitType2.
+* `#13394 <https://github.com/numpy/numpy/pull/13394>`__: MAINT, DEP: Fix deprecated ``assertEquals()``
+* `#13396 <https://github.com/numpy/numpy/pull/13396>`__: BUG: Fix structured_to_unstructured on single-field types (backport)
+* `#13549 <https://github.com/numpy/numpy/pull/13549>`__: BLD: Make CI pass again with pytest 4.5
+* `#13552 <https://github.com/numpy/numpy/pull/13552>`__: TST: Register markers in conftest.py.
+* `#13559 <https://github.com/numpy/numpy/pull/13559>`__: BUG: Removes ValueError for empty kwargs in arraymultiter_new
+* `#13560 <https://github.com/numpy/numpy/pull/13560>`__: BUG: Add TypeError to accepted exceptions in crackfortran.
+* `#13561 <https://github.com/numpy/numpy/pull/13561>`__: BUG: Handle subarrays in descr_to_dtype
+* `#13562 <https://github.com/numpy/numpy/pull/13562>`__: BUG: Protect generators from log(0.0)
+* `#13563 <https://github.com/numpy/numpy/pull/13563>`__: BUG: Always return views from structured_to_unstructured when...
+* `#13564 <https://github.com/numpy/numpy/pull/13564>`__: BUG: Catch stderr when checking compiler version
+* `#13565 <https://github.com/numpy/numpy/pull/13565>`__: BUG: longdouble(int) does not work
+* `#13587 <https://github.com/numpy/numpy/pull/13587>`__: BUG: distutils/system_info.py fix missing subprocess import (#13523)
+* `#13620 <https://github.com/numpy/numpy/pull/13620>`__: BUG,DEP: Fix writeable flag setting for arrays without base
+* `#13641 <https://github.com/numpy/numpy/pull/13641>`__: MAINT: Prepare for the 1.16.4 release.
+* `#13644 <https://github.com/numpy/numpy/pull/13644>`__: BUG: special case object arrays when printing rel-, abs-error
--- /dev/null
+==========================
+NumPy 1.16.5 Release Notes
+==========================
+
+The NumPy 1.16.5 release fixes bugs reported against the 1.16.4 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.7-dev, which should fix errors on Skylake series
+cpus.
+
+Downstream developers building this release should use Cython >= 0.29.2 and, if
+using OpenBLAS, OpenBLAS >= v0.3.7. The supported Python versions are 2.7 and
+3.5-3.7.
+
+
+Contributors
+============
+
+A total of 18 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexander Shadchin
+* Allan Haldane
+* Bruce Merry +
+* Charles Harris
+* Colin Snyder +
+* Dan Allan +
+* Emile +
+* Eric Wieser
+* Grey Baker +
+* Maksim Shabunin +
+* Marten van Kerkwijk
+* Matti Picus
+* Peter Andreas Entschev +
+* Ralf Gommers
+* Richard Harris +
+* Sebastian Berg
+* Sergei Lebedev +
+* Stephan Hoyer
+
+Pull requests merged
+====================
+
+A total of 23 pull requests were merged for this release.
+
+* `#13742 <https://github.com/numpy/numpy/pull/13742>`__: ENH: Add project URLs to setup.py
+* `#13823 <https://github.com/numpy/numpy/pull/13823>`__: TEST, ENH: fix tests and ctypes code for PyPy
+* `#13845 <https://github.com/numpy/numpy/pull/13845>`__: BUG: use npy_intp instead of int for indexing array
+* `#13867 <https://github.com/numpy/numpy/pull/13867>`__: TST: Ignore DeprecationWarning during nose imports
+* `#13905 <https://github.com/numpy/numpy/pull/13905>`__: BUG: Fix use-after-free in boolean indexing
+* `#13933 <https://github.com/numpy/numpy/pull/13933>`__: MAINT/BUG/DOC: Fix errors in _add_newdocs
+* `#13984 <https://github.com/numpy/numpy/pull/13984>`__: BUG: fix byte order reversal for datetime64[ns]
+* `#13994 <https://github.com/numpy/numpy/pull/13994>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation
+* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject
+* `#14043 <https://github.com/numpy/numpy/pull/14043>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors.
+* `#14044 <https://github.com/numpy/numpy/pull/14044>`__: BUG: ensure that casting to/from structured is properly checked.
+* `#14045 <https://github.com/numpy/numpy/pull/14045>`__: MAINT: fix histogram*d dispatchers
+* `#14046 <https://github.com/numpy/numpy/pull/14046>`__: BUG: further fixup to histogram2d dispatcher.
+* `#14052 <https://github.com/numpy/numpy/pull/14052>`__: BUG: Replace contextlib.suppress for Python 2.7
+* `#14056 <https://github.com/numpy/numpy/pull/14056>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API...
+* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict contructor
+* `#14058 <https://github.com/numpy/numpy/pull/14058>`__: DOC: Document array_function at a higher level.
+* `#14084 <https://github.com/numpy/numpy/pull/14084>`__: BUG, DOC: add new recfunctions to `__all__`
+* `#14162 <https://github.com/numpy/numpy/pull/14162>`__: BUG: Remove stray print that causes a SystemError on python 3.7
+* `#14297 <https://github.com/numpy/numpy/pull/14297>`__: TST: Pin pytest version to 5.0.1.
+* `#14322 <https://github.com/numpy/numpy/pull/14322>`__: ENH: Enable huge pages in all Linux builds
+* `#14346 <https://github.com/numpy/numpy/pull/14346>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
+* `#14382 <https://github.com/numpy/numpy/pull/14382>`__: REL: Prepare for the NumPy 1.16.5 release.
--- /dev/null
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.17.0 Release Notes
+==========================
+
+This NumPy release contains a number of new features that should substantially
+improve its performance and usefulness, see Highlights below for a summary. The
+Python versions supported are 3.5-3.7, note that Python 2.7 has been dropped.
+Python 3.8b2 should work with the released source packages, but there are no
+future guarantees.
+
+Downstream developers should use Cython >= 0.29.11 for Python 3.8 support and
+OpenBLAS >= 3.7 (not currently out) to avoid problems on the Skylake
+architecture. The NumPy wheels on PyPI are built from the OpenBLAS development
+branch in order to avoid those problems.
+
+
+Highlights
+==========
+
+* A new extensible `random` module along with four selectable `random number
+ generators <random.BitGenerators>` and improved seeding designed for use in parallel
+ processes has been added. The currently available bit generators are `MT19937
+ <random.mt19937.MT19937>`, `PCG64 <random.pcg64.PCG64>`, `Philox
+ <random.philox.Philox>`, and `SFC64 <random.sfc64.SFC64>`. See below under
+ New Features.
+
+* NumPy's `FFT <fft>` implementation was changed from fftpack to pocketfft,
+ resulting in faster, more accurate transforms and better handling of datasets
+ of prime length. See below under Improvements.
+
+* New radix sort and timsort sorting methods. It is currently not possible to
+ choose which will be used. They are hardwired to the datatype and used
+ when either ``stable`` or ``mergesort`` is passed as the method. See below
+ under Improvements.
+
+* Overriding numpy functions is now possible by default,
+ see ``__array_function__`` below.
+
+
+New functions
+=============
+
+* `numpy.errstate` is now also a function decorator
+
+
+Deprecations
+============
+
+`numpy.polynomial` functions warn when passed ``float`` in place of ``int``
+---------------------------------------------------------------------------
+Previously functions in this module would accept ``float`` values provided they
+were integral (``1.0``, ``2.0``, etc). For consistency with the rest of numpy,
+doing so is now deprecated, and in future will raise a ``TypeError``.
+
+Similarly, passing a float like ``0.5`` in place of an integer will now raise a
+``TypeError`` instead of the previous ``ValueError``.
+
+Deprecate `numpy.distutils.exec_command` and ``temp_file_name``
+---------------------------------------------------------------
+The internal use of these functions has been refactored and there are better
+alternatives. Replace ``exec_command`` with `subprocess.Popen` and
+`temp_file_name <numpy.distutils.exec_command>` with `tempfile.mkstemp`.
+
+Writeable flag of C-API wrapped arrays
+--------------------------------------
+When an array is created from the C-API to wrap a pointer to data, the only
+indication we have of the read-write nature of the data is the ``writeable``
+flag set during creation. It is dangerous to force the flag to writeable.
+In the future it will not be possible to switch the writeable flag to ``True``
+from python.
+This deprecation should not affect many users since arrays created in such
+a manner are very rare in practice and only available through the NumPy C-API.
+
+`numpy.nonzero` should no longer be called on 0d arrays
+-------------------------------------------------------
+The behavior of `numpy.nonzero` on 0d arrays was surprising, making uses of it
+almost always incorrect. If the old behavior was intended, it can be preserved
+without a warning by using ``nonzero(atleast_1d(arr))`` instead of
+``nonzero(arr)``. In a future release, it is most likely this will raise a
+``ValueError``.
+
+Writing to the result of `numpy.broadcast_arrays` will warn
+-----------------------------------------------------------
+
+Commonly `numpy.broadcast_arrays` returns a writeable array with internal
+overlap, making it unsafe to write to. A future version will set the
+``writeable`` flag to ``False``, and require users to manually set it to
+``True`` if they are sure that is what they want to do. Now writing to it will
+emit a deprecation warning with instructions to set the ``writeable`` flag
+``True``. Note that if one were to inspect the flag before setting it, one
+would find it would already be ``True``. Explicitly setting it, though, as one
+will need to do in future versions, clears an internal flag that is used to
+produce the deprecation warning. To help alleviate confusion, an additional
+`FutureWarning` will be emitted when accessing the ``writeable`` flag state to
+clarify the contradiction.
+
+Note that for the C-side buffer protocol such an array will return a
+readonly buffer immediately unless a writable buffer is requested. If
+a writeable buffer is requested a warning will be given. When using
+cython, the ``const`` qualifier should be used with such arrays to avoid
+the warning (e.g. ``cdef const double[::1] view``).
+
+
+Future Changes
+==============
+
+Shape-1 fields in dtypes won't be collapsed to scalars in a future version
+--------------------------------------------------------------------------
+
+Currently, a field specified as ``[(name, dtype, 1)]`` or ``"1type"`` is
+interpreted as a scalar field (i.e., the same as ``[(name, dtype)]`` or
+``[(name, dtype, ()]``). This now raises a FutureWarning; in a future version,
+it will be interpreted as a shape-(1,) field, i.e. the same as ``[(name,
+dtype, (1,))]`` or ``"(1,)type"`` (consistently with ``[(name, dtype, n)]``
+/ ``"ntype"`` with ``n>1``, which is already equivalent to ``[(name, dtype,
+(n,)]`` / ``"(n,)type"``).
+
+
+Compatibility notes
+===================
+
+``float16`` subnormal rounding
+------------------------------
+Casting from a different floating point precision to ``float16`` used incorrect
+rounding in some edge cases. This means in rare cases, subnormal results will
+now be rounded up instead of down, changing the last bit (ULP) of the result.
+
+Signed zero when using divmod
+-----------------------------
+Starting in version `1.12.0`, numpy incorrectly returned a negatively signed zero
+when using the ``divmod`` and ``floor_divide`` functions when the result was
+zero. For example::
+
+ >>> np.zeros(10)//1
+ array([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0.])
+
+With this release, the result is correctly returned as a positively signed
+zero::
+
+ >>> np.zeros(10)//1
+ array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
+
+``MaskedArray.mask`` now returns a view of the mask, not the mask itself
+------------------------------------------------------------------------
+Returning the mask itself was unsafe, as it could be reshaped in place which
+would violate expectations of the masked array code. The behavior of `mask
+<ma.MaskedArray.mask>` is now consistent with `data <ma.MaskedArray.data>`,
+which also returns a view.
+
+The underlying mask can still be accessed with ``._mask`` if it is needed.
+Tests that contain ``assert x.mask is not y.mask`` or similar will need to be
+updated.
+
+Do not lookup ``__buffer__`` attribute in `numpy.frombuffer`
+------------------------------------------------------------
+Looking up ``__buffer__`` attribute in `numpy.frombuffer` was undocumented and
+non-functional. This code was removed. If needed, use
+``frombuffer(memoryview(obj), ...)`` instead.
+
+``out`` is buffered for memory overlaps in `take`, `choose`, `put`
+------------------------------------------------------------------
+If the out argument to these functions is provided and has memory overlap with
+the other arguments, it is now buffered to avoid order-dependent behavior.
+
+Unpickling while loading requires explicit opt-in
+-------------------------------------------------
+The functions `load`, and ``lib.format.read_array`` take an
+``allow_pickle`` keyword which now defaults to ``False`` in response to
+`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
+
+
+.. currentmodule:: numpy.random.mtrand
+
+Potential changes to the random stream in old random module
+-----------------------------------------------------------
+Due to bugs in the application of ``log`` to random floating point numbers,
+the stream may change when sampling from `~RandomState.beta`, `~RandomState.binomial`,
+`~RandomState.laplace`, `~RandomState.logistic`, `~RandomState.logseries` or
+`~RandomState.multinomial` if a ``0`` is generated in the underlying `MT19937
+<~numpy.random.mt11937.MT19937>` random stream. There is a ``1`` in
+:math:`10^{53}` chance of this occurring, so the probability that the stream
+changes for any given seed is extremely small. If a ``0`` is encountered in the
+underlying generator, then the incorrect value produced (either `numpy.inf` or
+`numpy.nan`) is now dropped.
+
+.. currentmodule:: numpy
+
+`i0` now always returns a result with the same shape as the input
+-----------------------------------------------------------------
+Previously, the output was squeezed, such that, e.g., input with just a single
+element would lead to an array scalar being returned, and inputs with shapes
+such as ``(10, 1)`` would yield results that would not broadcast against the
+input.
+
+Note that we generally recommend the SciPy implementation over the numpy one:
+it is a proper ufunc written in C, and more than an order of magnitude faster.
+
+`can_cast` no longer assumes all unsafe casting is allowed
+----------------------------------------------------------
+Previously, `can_cast` returned `True` for almost all inputs for
+``casting='unsafe'``, even for cases where casting was not possible, such as
+from a structured dtype to a regular one. This has been fixed, making it
+more consistent with actual casting using, e.g., the `.astype <ndarray.astype>`
+method.
+
+``ndarray.flags.writeable`` can be switched to true slightly more often
+-----------------------------------------------------------------------
+
+In rare cases, it was not possible to switch an array from not writeable
+to writeable, although a base array is writeable. This can happen if an
+intermediate `ndarray.base` object is writeable. Previously, only the deepest
+base object was considered for this decision. However, in rare cases this
+object does not have the necessary information. In that case switching to
+writeable was never allowed. This has now been fixed.
+
+
+C API changes
+=============
+
+dimension or stride input arguments are now passed by ``npy_intp const*``
+-------------------------------------------------------------------------
+Previously these function arguments were declared as the more strict
+``npy_intp*``, which prevented the caller passing constant data.
+This change is backwards compatible, but now allows code like::
+
+ npy_intp const fixed_dims[] = {1, 2, 3};
+ // no longer complains that the const-qualifier is discarded
+ npy_intp size = PyArray_MultiplyList(fixed_dims, 3);
+
+
+New Features
+============
+
+.. currentmodule:: numpy.random
+
+New extensible `numpy.random` module with selectable random number generators
+-----------------------------------------------------------------------------
+A new extensible `numpy.random` module along with four selectable random number
+generators and improved seeding designed for use in parallel processes has been
+added. The currently available `Bit Generators` are
+`~mt19937.MT19937`, `~pcg64.PCG64`, `~philox.Philox`, and `~sfc64.SFC64`.
+``PCG64`` is the new default while ``MT19937`` is retained for backwards
+compatibility. Note that the legacy random module is unchanged and is now
+frozen, your current results will not change. More information is available in
+the :ref:`API change description <new-or-different>` and in the `top-level view
+<numpy.random>` documentation.
+
+.. currentmodule:: numpy
+
+libFLAME
+--------
+Support for building NumPy with the libFLAME linear algebra package as the LAPACK,
+implementation, see
+`libFLAME <https://www.cs.utexas.edu/~flame/web/libFLAME.html>`_ for details.
+
+User-defined BLAS detection order
+---------------------------------
+`distutils` now uses an environment variable, comma-separated and case
+insensitive, to determine the detection order for BLAS libraries.
+By default ``NPY_BLAS_ORDER=mkl,blis,openblas,atlas,accelerate,blas``.
+However, to force the use of OpenBLAS simply do::
+
+ NPY_BLAS_ORDER=openblas python setup.py build
+
+which forces the use of OpenBLAS.
+This may be helpful for users which have a MKL installation but wishes to try
+out different implementations.
+
+User-defined LAPACK detection order
+-----------------------------------
+``numpy.distutils`` now uses an environment variable, comma-separated and case
+insensitive, to determine the detection order for LAPACK libraries.
+By default ``NPY_LAPACK_ORDER=mkl,openblas,flame,atlas,accelerate,lapack``.
+However, to force the use of OpenBLAS simply do::
+
+ NPY_LAPACK_ORDER=openblas python setup.py build
+
+which forces the use of OpenBLAS.
+This may be helpful for users which have a MKL installation but wishes to try
+out different implementations.
+
+`ufunc.reduce` and related functions now accept a ``where`` mask
+----------------------------------------------------------------
+`ufunc.reduce`, `sum`, `prod`, `min`, `max` all
+now accept a ``where`` keyword argument, which can be used to tell which
+elements to include in the reduction. For reductions that do not have an
+identity, it is necessary to also pass in an initial value (e.g.,
+``initial=np.inf`` for `min`). For instance, the equivalent of
+`nansum` would be ``np.sum(a, where=~np.isnan(a))``.
+
+Timsort and radix sort have replaced mergesort for stable sorting
+-----------------------------------------------------------------
+Both radix sort and timsort have been implemented and are now used in place of
+mergesort. Due to the need to maintain backward compatibility, the sorting
+``kind`` options ``"stable"`` and ``"mergesort"`` have been made aliases of
+each other with the actual sort implementation depending on the array type.
+Radix sort is used for small integer types of 16 bits or less and timsort for
+the remaining types. Timsort features improved performace on data containing
+already or nearly sorted data and performs like mergesort on random data and
+requires :math:`O(n/2)` working space. Details of the timsort algorithm can be
+found at `CPython listsort.txt
+<https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
+
+`packbits` and `unpackbits` accept an ``order`` keyword
+-------------------------------------------------------
+The ``order`` keyword defaults to ``big``, and will order the **bits**
+accordingly. For ``'order=big'`` 3 will become ``[0, 0, 0, 0, 0, 0, 1, 1]``,
+and ``[1, 1, 0, 0, 0, 0, 0, 0]`` for ``order=little``
+
+`unpackbits` now accepts a ``count`` parameter
+----------------------------------------------
+``count`` allows subsetting the number of bits that will be unpacked up-front,
+rather than reshaping and subsetting later, making the `packbits` operation
+invertible, and the unpacking less wasteful. Counts larger than the number of
+available bits add zero padding. Negative counts trim bits off the end instead
+of counting from the beginning. None counts implement the existing behavior of
+unpacking everything.
+
+`linalg.svd` and `linalg.pinv` can be faster on hermitian inputs
+----------------------------------------------------------------
+These functions now accept a ``hermitian`` argument, matching the one added
+to `linalg.matrix_rank` in 1.14.0.
+
+divmod operation is now supported for two ``timedelta64`` operands
+------------------------------------------------------------------
+The divmod operator now handles two ``timedelta64`` operands, with
+type signature ``mm->qm``.
+
+`fromfile` now takes an ``offset`` argument
+-------------------------------------------
+This function now takes an ``offset`` keyword argument for binary files,
+which specifics the offset (in bytes) from the file's current position.
+Defaults to ``0``.
+
+New mode "empty" for `pad`
+--------------------------
+This mode pads an array to a desired shape without initializing the new
+entries.
+
+`empty_like` and related functions now accept a ``shape`` argument
+------------------------------------------------------------------
+`empty_like`, `full_like`, `ones_like` and `zeros_like` now accept a ``shape``
+keyword argument, which can be used to create a new array
+as the prototype, overriding its shape as well. This is particularly useful
+when combined with the ``__array_function__`` protocol, allowing the creation
+of new arbitrary-shape arrays from NumPy-like libraries when such an array
+is used as the prototype.
+
+Floating point scalars implement ``as_integer_ratio`` to match the builtin float
+--------------------------------------------------------------------------------
+This returns a (numerator, denominator) pair, which can be used to construct a
+`fractions.Fraction`.
+
+Structured ``dtype`` objects can be indexed with multiple fields names
+----------------------------------------------------------------------
+``arr.dtype[['a', 'b']]`` now returns a dtype that is equivalent to
+``arr[['a', 'b']].dtype``, for consistency with
+``arr.dtype['a'] == arr['a'].dtype``.
+
+Like the dtype of structured arrays indexed with a list of fields, this dtype
+has the same ``itemsize`` as the original, but only keeps a subset of the fields.
+
+This means that ``arr[['a', 'b']]`` and ``arr.view(arr.dtype[['a', 'b']])`` are
+equivalent.
+
+``.npy`` files support unicode field names
+------------------------------------------
+A new format version of 3.0 has been introduced, which enables structured types
+with non-latin1 field names. This is used automatically when needed.
+
+
+Improvements
+============
+
+Array comparison assertions include maximum differences
+-------------------------------------------------------
+Error messages from array comparison tests such as
+`testing.assert_allclose` now include "max absolute difference" and
+"max relative difference," in addition to the previous "mismatch" percentage.
+This information makes it easier to update absolute and relative error
+tolerances.
+
+Replacement of the fftpack based `fft` module by the pocketfft library
+----------------------------------------------------------------------
+Both implementations have the same ancestor (Fortran77 FFTPACK by Paul N.
+Swarztrauber), but pocketfft contains additional modifications which improve
+both accuracy and performance in some circumstances. For FFT lengths containing
+large prime factors, pocketfft uses Bluestein's algorithm, which maintains
+:math:`O(N log N)` run time complexity instead of deteriorating towards
+:math:`O(N*N)` for prime lengths. Also, accuracy for real valued FFTs with near
+prime lengths has improved and is on par with complex valued FFTs.
+
+Further improvements to ``ctypes`` support in `numpy.ctypeslib`
+---------------------------------------------------------------
+A new `numpy.ctypeslib.as_ctypes_type` function has been added, which can be
+used to converts a `dtype` into a best-guess `ctypes` type. Thanks to this
+new function, `numpy.ctypeslib.as_ctypes` now supports a much wider range of
+array types, including structures, booleans, and integers of non-native
+endianness.
+
+`numpy.errstate` is now also a function decorator
+-------------------------------------------------
+Currently, if you have a function like::
+
+ def foo():
+ pass
+
+and you want to wrap the whole thing in `errstate`, you have to rewrite it
+like so::
+
+ def foo():
+ with np.errstate(...):
+ pass
+
+but with this change, you can do::
+
+ @np.errstate(...)
+ def foo():
+ pass
+
+thereby saving a level of indentation
+
+`numpy.exp` and `numpy.log` speed up for float32 implementation
+---------------------------------------------------------------
+float32 implementation of `exp` and `log` now benefit from AVX2/AVX512
+instruction set which are detected during runtime. `exp` has a max ulp
+error of 2.52 and `log` has a max ulp error or 3.83.
+
+Improve performance of `numpy.pad`
+----------------------------------
+The performance of the function has been improved for most cases by filling in
+a preallocated array with the desired padded shape instead of using
+concatenation.
+
+`numpy.interp` handles infinities more robustly
+-----------------------------------------------
+In some cases where `interp` would previously return `nan`, it now
+returns an appropriate infinity.
+
+Pathlib support for `fromfile`, `tofile` and `ndarray.dump`
+-----------------------------------------------------------
+`fromfile`, `ndarray.ndarray.tofile` and `ndarray.dump` now support
+the `pathlib.Path` type for the ``file``/``fid`` parameter.
+
+Specialized `isnan`, `isinf`, and `isfinite` ufuncs for bool and int types
+--------------------------------------------------------------------------
+The boolean and integer types are incapable of storing `nan` and `inf` values,
+which allows us to provide specialized ufuncs that are up to 250x faster than
+the previous approach.
+
+`isfinite` supports ``datetime64`` and ``timedelta64`` types
+-----------------------------------------------------------------
+Previously, `isfinite` used to raise a `TypeError` on being used on these
+two types.
+
+New keywords added to `nan_to_num`
+----------------------------------
+`nan_to_num` now accepts keywords ``nan``, ``posinf`` and ``neginf``
+allowing the user to define the value to replace the ``nan``, positive and
+negative ``np.inf`` values respectively.
+
+MemoryErrors caused by allocated overly large arrays are more descriptive
+-------------------------------------------------------------------------
+Often the cause of a MemoryError is incorrect broadcasting, which results in a
+very large and incorrect shape. The message of the error now includes this
+shape to help diagnose the cause of failure.
+
+`floor`, `ceil`, and `trunc` now respect builtin magic methods
+--------------------------------------------------------------
+These ufuncs now call the ``__floor__``, ``__ceil__``, and ``__trunc__``
+methods when called on object arrays, making them compatible with
+`decimal.Decimal` and `fractions.Fraction` objects.
+
+`quantile` now works on `fraction.Fraction` and `decimal.Decimal` objects
+-------------------------------------------------------------------------
+In general, this handles object arrays more gracefully, and avoids floating-
+point operations if exact arithmetic types are used.
+
+Support of object arrays in `matmul`
+------------------------------------
+It is now possible to use `matmul` (or the ``@`` operator) with object arrays.
+For instance, it is now possible to do::
+
+ from fractions import Fraction
+ a = np.array([[Fraction(1, 2), Fraction(1, 3)], [Fraction(1, 3), Fraction(1, 2)]])
+ b = a @ a
+
+
+Changes
+=======
+
+`median` and `percentile` family of functions no longer warn about ``nan``
+--------------------------------------------------------------------------
+`numpy.median`, `numpy.percentile`, and `numpy.quantile` used to emit a
+``RuntimeWarning`` when encountering an `nan`. Since they return the
+``nan`` value, the warning is redundant and has been removed.
+
+``timedelta64 % 0`` behavior adjusted to return ``NaT``
+-------------------------------------------------------
+The modulus operation with two ``np.timedelta64`` operands now returns
+``NaT`` in the case of division by zero, rather than returning zero
+
+NumPy functions now always support overrides with ``__array_function__``
+------------------------------------------------------------------------
+NumPy now always checks the ``__array_function__`` method to implement overrides
+of NumPy functions on non-NumPy arrays, as described in `NEP 18`_. The feature
+was available for testing with NumPy 1.16 if appropriate environment variables
+are set, but is now always enabled.
+
+.. _`NEP 18` : http://www.numpy.org/neps/nep-0018-array-function-protocol.html
+
+``lib.recfunctions.structured_to_unstructured`` does not squeeze single-field views
+-----------------------------------------------------------------------------------
+Previously ``structured_to_unstructured(arr[['a']])`` would produce a squeezed
+result inconsistent with ``structured_to_unstructured(arr[['a', b']])``. This
+was accidental. The old behavior can be retained with
+``structured_to_unstructured(arr[['a']]).squeeze(axis=-1)`` or far more simply,
+``arr['a']``.
+
+`clip` now uses a ufunc under the hood
+--------------------------------------
+This means that registering clip functions for custom dtypes in C via
+``descr->f->fastclip`` is deprecated - they should use the ufunc registration
+mechanism instead, attaching to the ``np.core.umath.clip`` ufunc.
+
+It also means that ``clip`` accepts ``where`` and ``casting`` arguments,
+and can be override with ``__array_ufunc__``.
+
+A consequence of this change is that some behaviors of the old ``clip`` have
+been deprecated:
+
+* Passing ``nan`` to mean "do not clip" as one or both bounds. This didn't work
+ in all cases anyway, and can be better handled by passing infinities of the
+ appropriate sign.
+* Using "unsafe" casting by default when an ``out`` argument is passed. Using
+ ``casting="unsafe"`` explicitly will silence this warning.
+
+Additionally, there are some corner cases with behavior changes:
+
+* Padding ``max < min`` has changed to be more consistent across dtypes, but
+ should not be relied upon.
+* Scalar ``min`` and ``max`` take part in promotion rules like they do in all
+ other ufuncs.
+
+``__array_interface__`` offset now works as documented
+------------------------------------------------------
+The interface may use an ``offset`` value that was mistakenly ignored.
+
+Pickle protocol in `savez` set to 3 for ``force zip64`` flag
+-----------------------------------------------------------------
+`savez` was not using the ``force_zip64`` flag, which limited the size of
+the archive to 2GB. But using the flag requires us to use pickle protocol 3 to
+write ``object`` arrays. The protocol used was bumped to 3, meaning the archive
+will be unreadable by Python2.
+
+Structured arrays indexed with non-existent fields raise ``KeyError`` not ``ValueError``
+----------------------------------------------------------------------------------------
+``arr['bad_field']`` on a structured type raises ``KeyError``, for consistency
+with ``dict['bad_field']``.
+
--- /dev/null
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.17.1 Release Notes
+==========================
+
+This release contains a number of fixes for bugs reported against NumPy 1.17.0
+along with a few documentation and build improvements. The Python versions
+supported are 3.5-3.7, note that Python 2.7 has been dropped. Python 3.8b3
+should work with the released source packages, but there are no future
+guarantees.
+
+Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
+OpenBLAS >= 3.7 to avoid problems on the Skylake architecture. The NumPy wheels
+on PyPI are built from the OpenBLAS development branch in order to avoid those
+problems.
+
+
+Contributors
+============
+
+A total of 17 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexander Jung +
+* Allan Haldane
+* Charles Harris
+* Eric Wieser
+* Giuseppe Cuccu +
+* Hiroyuki V. Yamazaki
+* Jérémie du Boisberranger
+* Kmol Yuan +
+* Matti Picus
+* Max Bolingbroke +
+* Maxwell Aladago +
+* Oleksandr Pavlyk
+* Peter Andreas Entschev
+* Sergei Lebedev
+* Seth Troisi +
+* Vladimir Pershin +
+* Warren Weckesser
+
+
+Pull requests merged
+====================
+
+A total of 24 pull requests were merged for this release.
+
+* `#14156 <https://github.com/numpy/numpy/pull/14156>`__: TST: Allow fuss in testing strided/non-strided exp/log loops
+* `#14157 <https://github.com/numpy/numpy/pull/14157>`__: BUG: avx2_scalef_ps must be static
+* `#14158 <https://github.com/numpy/numpy/pull/14158>`__: BUG: Remove stray print that causes a SystemError on python 3.7.
+* `#14159 <https://github.com/numpy/numpy/pull/14159>`__: BUG: Fix DeprecationWarning in python 3.8.
+* `#14160 <https://github.com/numpy/numpy/pull/14160>`__: BLD: Add missing gcd/lcm definitions to npy_math.h
+* `#14161 <https://github.com/numpy/numpy/pull/14161>`__: DOC, BUILD: cleanups and fix (again) 'build dist'
+* `#14166 <https://github.com/numpy/numpy/pull/14166>`__: TST: Add 3.8-dev to travisCI testing.
+* `#14194 <https://github.com/numpy/numpy/pull/14194>`__: BUG: Remove the broken clip wrapper (Backport)
+* `#14198 <https://github.com/numpy/numpy/pull/14198>`__: DOC: Fix hermitian argument docs in svd.
+* `#14199 <https://github.com/numpy/numpy/pull/14199>`__: MAINT: Workaround for Intel compiler bug leading to failing test
+* `#14200 <https://github.com/numpy/numpy/pull/14200>`__: TST: Clean up of test_pocketfft.py
+* `#14201 <https://github.com/numpy/numpy/pull/14201>`__: BUG: Make advanced indexing result on read-only subclass writeable...
+* `#14236 <https://github.com/numpy/numpy/pull/14236>`__: BUG: Fixed default BitGenerator name
+* `#14237 <https://github.com/numpy/numpy/pull/14237>`__: ENH: add c-imported modules for freeze analysis in np.random
+* `#14296 <https://github.com/numpy/numpy/pull/14296>`__: TST: Pin pytest version to 5.0.1
+* `#14301 <https://github.com/numpy/numpy/pull/14301>`__: BUG: Fix leak in the f2py-generated module init and `PyMem_Del`...
+* `#14302 <https://github.com/numpy/numpy/pull/14302>`__: BUG: Fix formatting error in exception message
+* `#14307 <https://github.com/numpy/numpy/pull/14307>`__: MAINT: random: Match type of SeedSequence.pool_size to DEFAULT_POOL_SIZE.
+* `#14308 <https://github.com/numpy/numpy/pull/14308>`__: BUG: Fix numpy.random bug in platform detection
+* `#14309 <https://github.com/numpy/numpy/pull/14309>`__: ENH: Enable huge pages in all Linux builds
+* `#14330 <https://github.com/numpy/numpy/pull/14330>`__: BUG: Fix segfault in `random.permutation(x)` when x is a string.
+* `#14338 <https://github.com/numpy/numpy/pull/14338>`__: BUG: don't fail when lexsorting some empty arrays (#14228)
+* `#14339 <https://github.com/numpy/numpy/pull/14339>`__: BUG: Fix misuse of .names and .fields in various places (backport...
+* `#14345 <https://github.com/numpy/numpy/pull/14345>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
+* `#14350 <https://github.com/numpy/numpy/pull/14350>`__: REL: Prepare 1.17.1 release
--- /dev/null
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.17.2 Release Notes
+==========================
+
+This release contains fixes for bugs reported against NumPy 1.17.1 along with a
+some documentation improvements. The most important fix is for lexsort when the
+keys are of type (u)int8 or (u)int16. If you are currently using 1.17 you
+should upgrade.
+
+The Python versions supported in this release are 3.5-3.7, Python 2.7 has been
+dropped. Python 3.8b4 should work with the released source packages, but there
+are no future guarantees.
+
+Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
+OpenBLAS >= 3.7 to avoid errors on the Skylake architecture. The NumPy wheels
+on PyPI are built from the OpenBLAS development branch in order to avoid those
+errors.
+
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* CakeWithSteak +
+* Charles Harris
+* Dan Allan
+* Hameer Abbasi
+* Lars Grueter
+* Matti Picus
+* Sebastian Berg
+
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#14418 <https://github.com/numpy/numpy/pull/14418>`__: BUG: Fix aradixsort indirect indexing.
+* `#14420 <https://github.com/numpy/numpy/pull/14420>`__: DOC: Fix a minor typo in dispatch documentation.
+* `#14421 <https://github.com/numpy/numpy/pull/14421>`__: BUG: test, fix regression in converting to ctypes
+* `#14430 <https://github.com/numpy/numpy/pull/14430>`__: BUG: Do not show Override module in private error classes.
+* `#14432 <https://github.com/numpy/numpy/pull/14432>`__: BUG: Fixed maximum relative error reporting in assert_allclose.
+* `#14433 <https://github.com/numpy/numpy/pull/14433>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative...
+* `#14436 <https://github.com/numpy/numpy/pull/14436>`__: BUG: Update 1.17.x with 1.18.0-dev pocketfft.py.
+* `#14446 <https://github.com/numpy/numpy/pull/14446>`__: REL: Prepare for NumPy 1.17.2 release.
--- /dev/null
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.17.3 Release Notes
+==========================
+
+This release contains fixes for bugs reported against NumPy 1.17.2 along with a
+some documentation improvements. The Python versions supported in this release
+are 3.5-3.8.
+
+Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
+OpenBLAS >= 3.7 to avoid errors on the Skylake architecture.
+
+
+Highlights
+==========
+
+- Wheels for Python 3.8
+- Boolean ``matmul`` fixed to use booleans instead of integers.
+
+
+Compatibility notes
+===================
+
+- The seldom used ``PyArray_DescrCheck`` macro has been changed/fixed.
+
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Kevin Sheppard
+* Matti Picus
+* Ralf Gommers
+* Sebastian Berg
+* Warren Weckesser
+
+
+Pull requests merged
+====================
+
+A total of 12 pull requests were merged for this release.
+
+* `#14456 <https://github.com/numpy/numpy/pull/14456>`__: MAINT: clean up pocketfft modules inside numpy.fft namespace.
+* `#14463 <https://github.com/numpy/numpy/pull/14463>`__: BUG: random.hypergeometic assumes npy_long is npy_int64, hung...
+* `#14502 <https://github.com/numpy/numpy/pull/14502>`__: BUG: random: Revert gh-14458 and refix gh-14557.
+* `#14504 <https://github.com/numpy/numpy/pull/14504>`__: BUG: add a specialized loop for boolean matmul.
+* `#14506 <https://github.com/numpy/numpy/pull/14506>`__: MAINT: Update pytest version for Python 3.8
+* `#14512 <https://github.com/numpy/numpy/pull/14512>`__: DOC: random: fix doc linking, was referencing private submodules.
+* `#14513 <https://github.com/numpy/numpy/pull/14513>`__: BUG,MAINT: Some fixes and minor cleanup based on clang analysis
+* `#14515 <https://github.com/numpy/numpy/pull/14515>`__: BUG: Fix randint when range is 2**32
+* `#14519 <https://github.com/numpy/numpy/pull/14519>`__: MAINT: remove the entropy c-extension module
+* `#14563 <https://github.com/numpy/numpy/pull/14563>`__: DOC: remove note about Pocketfft license file (non-existing here).
+* `#14578 <https://github.com/numpy/numpy/pull/14578>`__: BUG: random: Create a legacy implementation of random.binomial.
+* `#14687 <https://github.com/numpy/numpy/pull/14687>`__: BUG: properly define PyArray_DescrCheck
--- /dev/null
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.17.4 Release Notes
+==========================
+
+This release contains fixes for bugs reported against NumPy 1.17.3 along with
+some build improvements. The Python versions supported in this release
+are 3.5-3.8.
+
+Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
+OpenBLAS >= 3.7 to avoid errors on the Skylake architecture.
+
+
+Highlights
+==========
+
+- Fixed `random.random_integers` biased generation of 8 and 16 bit integers.
+- Fixed `np.einsum` regression on Power9 and z/Linux.
+- Fixed histogram problem with signed integer arrays.
+
+
+Contributors
+============
+
+A total of 5 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Chris Burr +
+* Matti Picus
+* Qiming Sun +
+* Warren Weckesser
+
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#14758 <https://github.com/numpy/numpy/pull/14758>`__: BLD: declare support for python 3.8
+* `#14781 <https://github.com/numpy/numpy/pull/14781>`__: BUG: random: biased samples from integers() with 8 or 16 bit...
+* `#14851 <https://github.com/numpy/numpy/pull/14851>`__: BUG: Fix _ctypes class circular reference. (#13808)
+* `#14852 <https://github.com/numpy/numpy/pull/14852>`__: BLD: add 'apt update' to shippable
+* `#14855 <https://github.com/numpy/numpy/pull/14855>`__: BUG: Fix `np.einsum` errors on Power9 Linux and z/Linux
+* `#14857 <https://github.com/numpy/numpy/pull/14857>`__: BUG: lib: Fix histogram problem with signed integer arrays.
+* `#14858 <https://github.com/numpy/numpy/pull/14858>`__: BLD: Prevent -flto from optimising long double representation...
+* `#14866 <https://github.com/numpy/numpy/pull/14866>`__: MAINT: move buffer.h -> npy_buffer.h to avoid conflicts
+
--- /dev/null
+.. currentmodule:: numpy
+
+================================
+NumPy NumPy 1.18.0 Release Notes
+================================
+
+In addition to the usual bug fixes, this NumPy release cleans up and documents
+the new random C-API, expires a large number of old deprecations, and improves
+the appearance of the documentation. The Python versions supported are 3.5-3.8.
+This is the last NumPy release series that will support Python 3.5.
+
+Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
+OpenBLAS >= 3.7 to avoid problems on the Skylake
+architecture.
+
+
+Highlights
+==========
+
+* The C-API for ``numpy.random`` has been defined and documented.
+* Basic infrastructure for linking with 64 bit BLAS and LAPACK libraries.
+* Many documentation improvements.
+
+
+New functions
+=============
+
+Multivariate hypergeometric distribution added to ``numpy.random``
+------------------------------------------------------------------
+The method ``multivariate_hypergeometric`` has been added to the class
+`numpy.random.Generator`. This method generates random variates from
+the multivariate hypergeometric probability distribution.
+(`gh-13794 <https://github.com/numpy/numpy/pull/13794>`__)
+
+
+Deprecations
+============
+
+``np.fromfile`` and ``np.fromstring`` will error on bad data
+------------------------------------------------------------
+
+In future numpy releases, the functions ``np.fromfile`` and ``np.fromstring``
+will throw an error when parsing bad data.
+This will now give a ``DeprecationWarning`` where previously partial or
+even invalid data was silently returned. This deprecation also affects
+the C defined functions ``PyArray_FromString`` and ``PyArray_FromFile``
+(`gh-13605 <https://github.com/numpy/numpy/pull/13605>`__)
+
+Deprecate non-scalar arrays as fill values in ``ma.fill_value``
+---------------------------------------------------------------
+Setting a ``MaskedArray.fill_value`` to a non-scalar array is deprecated
+since the logic to broadcast the fill value to the array is fragile,
+especially when slicing.
+(`gh-13698 <https://github.com/numpy/numpy/pull/13698>`__)
+
+Deprecate ``PyArray_As1D``, ``PyArray_As2D``
+--------------------------------------------
+``PyArray_As1D``, ``PyArray_As2D`` are deprecated, use
+``PyArray_AsCArray`` instead
+(`gh-14036 <https://github.com/numpy/numpy/pull/14036>`__)
+
+Deprecate ``np.alen``
+---------------------
+``np.alen`` was deprecated. Use ``len`` instead.
+(`gh-14181 <https://github.com/numpy/numpy/pull/14181>`__)
+
+Deprecate the financial functions
+---------------------------------
+In accordance with
+`NEP-32 <https://numpy.org/neps/nep-0032-remove-financial-functions.html>`_,
+the financial functions ``fv`` ``ipmt``, ``irr``, ``mirr``, ``nper``,
+``npv``, ``pmt``, ``ppmt``, ``pv`` and ``rate`` are deprecated, and will be
+removed from NumPy 1.20.The replacement for these functions is the Python package
+`numpy-financial <https://pypi.org/project/numpy-financial>`_.
+(`gh-14720 <https://github.com/numpy/numpy/pull/14720>`__)
+
+The ``axis`` argument to ``numpy.ma.mask_cols`` and ``numpy.ma.mask_row`` is deprecated
+---------------------------------------------------------------------------------------
+This argument was always ignored.
+(`gh-14996 <https://github.com/numpy/numpy/pull/14996>`__)
+
+
+Expired deprecations
+====================
+
+* ``PyArray_As1D`` and ``PyArray_As2D`` have been removed in favor of
+ ``PyArray_AsCArray``
+ (`gh-14036 <https://github.com/numpy/numpy/pull/14036>`__)
+
+* ``np.rank`` has been removed. This was deprecated in NumPy 1.10
+ and has been replaced by ``np.ndim``.
+ (`gh-14039 <https://github.com/numpy/numpy/pull/14039>`__)
+
+* The deprecation of ``expand_dims`` out-of-range axes in 1.13.0 has
+ expired.
+ (`gh-14051 <https://github.com/numpy/numpy/pull/14051>`__)
+
+* ``PyArray_FromDimsAndDataAndDescr`` and ``PyArray_FromDims`` have been
+ removed (they will always raise an error). Use ``PyArray_NewFromDescr``
+ and ``PyArray_SimpleNew`` instead.
+ (`gh-14100 <https://github.com/numpy/numpy/pull/14100>`__)
+
+* ``numeric.loads``, ``numeric.load``, ``np.ma.dump``,
+ ``np.ma.dumps``, ``np.ma.load``, ``np.ma.loads`` are removed,
+ use ``pickle`` methods instead
+ (`gh-14256 <https://github.com/numpy/numpy/pull/14256>`__)
+
+* ``arrayprint.FloatFormat``, ``arrayprint.LongFloatFormat`` has been removed,
+ use ``FloatingFormat`` instead
+
+* ``arrayprint.ComplexFormat``, ``arrayprint.LongComplexFormat`` has been
+ removed, use ``ComplexFloatingFormat`` instead
+
+* ``arrayprint.StructureFormat`` has been removed, use ``StructureVoidFormat``
+ instead
+ (`gh-14259 <https://github.com/numpy/numpy/pull/14259>`__)
+
+* ``np.testing.rand`` has been removed. This was deprecated in NumPy 1.11
+ and has been replaced by ``np.random.rand``.
+ (`gh-14325 <https://github.com/numpy/numpy/pull/14325>`__)
+
+* Class ``SafeEval`` in ``numpy/lib/utils.py`` has been removed.
+ This was deprecated in NumPy 1.10. Use ``np.safe_eval`` instead.
+ (`gh-14335 <https://github.com/numpy/numpy/pull/14335>`__)
+
+* Remove deprecated support for boolean and empty condition lists in
+ ``np.select``
+ (`gh-14583 <https://github.com/numpy/numpy/pull/14583>`__)
+
+* Array order only accepts 'C', 'F', 'A', and 'K'. More permissive options
+ were deprecated in NumPy 1.11.
+ (`gh-14596 <https://github.com/numpy/numpy/pull/14596>`__)
+
+* np.linspace parameter ``num`` must be an integer. Deprecated in NumPy 1.12.
+ (`gh-14620 <https://github.com/numpy/numpy/pull/14620>`__)
+
+* UFuncs with multiple outputs must use a tuple for the ``out`` kwarg. This
+ finishes a deprecation started in NumPy 1.10.
+ (`gh-14682 <https://github.com/numpy/numpy/pull/14682>`__)
+
+The files ``numpy/testing/decorators.py``, ``numpy/testing/noseclasses.py``
+and ``numpy/testing/nosetester.py`` have been removed. They were never
+meant to be public (all relevant objects are present in the
+``numpy.testing`` namespace), and importing them has given a deprecation
+warning since NumPy 1.15.0
+(`gh-14567 <https://github.com/numpy/numpy/pull/14567>`__)
+
+
+Compatibility notes
+===================
+
+`numpy.lib.recfunctions.drop_fields` can no longer return None
+--------------------------------------------------------------
+If ``drop_fields`` is used to drop all fields, previously the array would
+be completely discarded and None returned. Now it returns an array of the
+same shape as the input, but with no fields. The old behavior can be retained
+with::
+
+ dropped_arr = drop_fields(arr, ['a', 'b'])
+ if dropped_arr.dtype.names == ():
+ dropped_arr = None
+
+converting the empty recarray to None
+(`gh-14510 <https://github.com/numpy/numpy/pull/14510>`__)
+
+``numpy.argmin/argmax/min/max`` returns ``NaT`` if it exists in array
+---------------------------------------------------------------------
+``numpy.argmin``, ``numpy.argmax``, ``numpy.min``, and ``numpy.max`` will return
+``NaT`` if it exists in the array.
+(`gh-14717 <https://github.com/numpy/numpy/pull/14717>`__)
+
+``np.can_cast(np.uint64, np.timedelta64, casting='safe')`` is now ``False``
+---------------------------------------------------------------------------
+Previously this was ``True`` - however, this was inconsistent with ``uint64``
+not being safely castable to ``int64``, and resulting in strange type
+resolution.
+
+If this impacts your code, cast ``uint64`` to ``int64`` first.
+(`gh-14718 <https://github.com/numpy/numpy/pull/14718>`__)
+
+Changed random variate stream from ``numpy.random.Generator.integers``
+----------------------------------------------------------------------
+There was a bug in ``numpy.random.Generator.integers`` that caused biased
+sampling of 8 and 16 bit integer types. Fixing that bug has changed the
+output stream from what it was in previous releases.
+(`gh-14777 <https://github.com/numpy/numpy/pull/14777>`__)
+
+Add more ufunc loops for ``datetime64``, ``timedelta64``
+--------------------------------------------------------
+``np.datetime('NaT')`` should behave more like ``float('Nan')``. Add needed
+infrastructure so ``np.isinf(a)`` and ``np.isnan(a)`` will run on
+``datetime64`` and ``timedelta64`` dtypes. Also added specific loops for
+``numpy.fmin`` and ``numpy.fmax`` that mask ``NaT``. This may require
+adjustment to user- facing code. Specifically, code that either disallowed the
+calls to ``numpy.isinf`` or ``numpy.isnan`` or checked that they raised an
+exception will require adaptation, and code that mistakenly called
+``numpy.fmax`` and ``numpy.fmin`` instead of ``numpy.maximum`` or
+``numpy.minimum`` respectively will requre adjustment. This also affects
+``numpy.nanmax`` and ``numpy.nanmin``.
+(`gh-14841 <https://github.com/numpy/numpy/pull/14841>`__)
+
+
+C API changes
+=============
+
+``PyDataType_ISUNSIZED(descr)`` now returns False for structured datatypes
+--------------------------------------------------------------------------
+Previously this returned True for any datatype of itemsize 0, but now this
+returns false for the non-flexible datatype with itemsize 0, ``np.dtype([])``.
+(`gh-14393 <https://github.com/numpy/numpy/pull/14393>`__)
+
+
+New Features
+============
+
+Add our own ``*.pxd`` cython import file
+----------------------------------------
+Added a ``numpy/__init__.pxd`` file. It will be used for ``cimport numpy``
+(`gh-12284 <https://github.com/numpy/numpy/pull/12284>`__)
+
+A tuple of axes can now be input to ``expand_dims``
+---------------------------------------------------
+The ``numpy.expand_dims`` ``axis`` keyword can now accept a tuple of
+axes. Previously, ``axis`` was required to be an integer.
+(`gh-14051 <https://github.com/numpy/numpy/pull/14051>`__)
+
+Support for 64-bit OpenBLAS
+---------------------------
+Added support for 64-bit (ILP64) OpenBLAS. See ``site.cfg.example``
+for details.
+(`gh-15012 <https://github.com/numpy/numpy/pull/15012>`__)
+
+Add ``--f2cmap`` option to F2PY
+-------------------------------
+Allow specifying a file to load Fortran-to-C type map
+customizations from.
+(`gh-15113 <https://github.com/numpy/numpy/pull/15113>`__)
+
+
+Improvements
+============
+
+Different C numeric types of the same size have unique names
+------------------------------------------------------------
+On any given platform, two of ``np.intc``, ``np.int_``, and ``np.longlong``
+would previously appear indistinguishable through their ``repr``, despite
+their corresponding ``dtype`` having different properties.
+A similar problem existed for the unsigned counterparts to these types, and on
+some platforms for ``np.double`` and ``np.longdouble``
+
+These types now always print with a unique ``__name__``.
+(`gh-10151 <https://github.com/numpy/numpy/pull/10151>`__)
+
+``argwhere`` now produces a consistent result on 0d arrays
+----------------------------------------------------------
+On N-d arrays, ``numpy.argwhere`` now always produces an array of shape
+``(n_non_zero, arr.ndim)``, even when ``arr.ndim == 0``. Previously, the
+last axis would have a dimension of 1 in this case.
+(`gh-13610 <https://github.com/numpy/numpy/pull/13610>`__)
+
+Add ``axis`` argument for ``random.permutation`` and ``random.shuffle``
+-----------------------------------------------------------------------
+
+Previously the ``random.permutation`` and ``random.shuffle`` functions
+can only shuffle an array along the first axis; they now have a
+new argument ``axis`` which allows shuffle along a specified axis.
+(`gh-13829 <https://github.com/numpy/numpy/pull/13829>`__)
+
+``method`` keyword argument for ``np.random.multivariate_normal``
+-----------------------------------------------------------------
+A ``method`` keyword argument is now available for
+``np.random.multivariate_normal`` with possible values
+``{'svd', 'eigh', 'cholesky'}``. To use it, write
+``np.random.multivariate_normal(..., method=<method>)``.
+(`gh-14197 <https://github.com/numpy/numpy/pull/14197>`__)
+
+Add complex number support for ``numpy.fromstring``
+---------------------------------------------------
+Now ``numpy.fromstring`` can read complex numbers.
+(`gh-14227 <https://github.com/numpy/numpy/pull/14227>`__)
+
+``numpy.unique`` has consistent axes order when ``axis`` is not None
+--------------------------------------------------------------------
+Using ``moveaxis`` instead of ``swapaxes`` in ``numpy.unique``, so that the ordering of axes
+except the axis in arguments will not be broken.
+(`gh-14255 <https://github.com/numpy/numpy/pull/14255>`__)
+
+``numpy.matmul`` with boolean output now converts to boolean values
+-------------------------------------------------------------------
+Calling ``numpy.matmul`` where the output is a boolean array would fill the array
+with uint8 equivalents of the result, rather than 0/1. Now it forces the output
+to 0 or 1 (``NPY_TRUE`` or ``NPY_FALSE``).
+(`gh-14464 <https://github.com/numpy/numpy/pull/14464>`__)
+
+``numpy.random.randint`` produced incorrect value when the range was ``2**32``
+------------------------------------------------------------------------------
+The implementation introduced in 1.17.0 had an incorrect check when
+determining whether to use the 32-bit path or the full 64-bit
+path that incorrectly redirected random integer generation with a high - low
+range of ``2**32`` to the 64-bit generator.
+(`gh-14501 <https://github.com/numpy/numpy/pull/14501>`__)
+
+Add complex number support for ``numpy.fromfile``
+-------------------------------------------------
+Now ``numpy.fromfile`` can read complex numbers.
+(`gh-14730 <https://github.com/numpy/numpy/pull/14730>`__)
+
+``std=c99`` added if compiler is named ``gcc``
+----------------------------------------------
+GCC before version 5 requires the ``-std=c99`` command line argument. Newer
+compilers automatically turn on C99 mode. The compiler setup code will
+automatically add the code if the compiler name has ``gcc`` in it.
+(`gh-14771 <https://github.com/numpy/numpy/pull/14771>`__)
+
+
+Changes
+=======
+
+
+``NaT`` now sorts to the end of arrays
+--------------------------------------
+``NaT`` is now effectively treated as the largest integer for sorting
+purposes, so that it sorts to the end of arrays. This change is for consistency
+with ``NaN`` sorting behavior.
+(`gh-12658 <https://github.com/numpy/numpy/pull/12658>`__)
+(`gh-15068 <https://github.com/numpy/numpy/pull/15068>`__)
+
+Incorrect ``threshold`` in ``np.set_printoptions`` raises ``TypeError`` or ``ValueError``
+-----------------------------------------------------------------------------------------
+Previously an incorrect ``threshold`` raised ``ValueError``; it now raises ``TypeError``
+for non-numeric types and ``ValueError`` for ``nan`` values.
+(`gh-13899 <https://github.com/numpy/numpy/pull/13899>`__)
+
+Warn when saving a dtype with metadata
+--------------------------------------
+A ``UserWarning`` will be emitted when saving an array via ``numpy.save`` with
+``metadata``. Saving such an array may not preserve metadata, and if metadata
+is preserved, loading it will cause a ``ValueError``. This shortcoming in save
+and load will be addressed in a future release.
+(`gh-14142 <https://github.com/numpy/numpy/pull/14142>`__)
+
+``numpy.distutils`` append behavior changed for LDFLAGS and similar
+-------------------------------------------------------------------
+`numpy.distutils` has always overridden rather than appended to ``LDFLAGS`` and
+other similar such environment variables for compiling Fortran extensions. Now
+the default behavior has changed to appending - which is the expected behavior
+in most situations. To preserve the old (overwriting) behavior, set the
+``NPY_DISTUTILS_APPEND_FLAGS`` environment variable to 0. This applies to:
+``LDFLAGS``, ``F77FLAGS``, ``F90FLAGS``, ``FREEFLAGS``, ``FOPT``, ``FDEBUG``,
+and ``FFLAGS``. NumPy 1.16 and 1.17 gave build warnings in situations where this
+change in behavior would have affected the compile flags used.
+(`gh-14248 <https://github.com/numpy/numpy/pull/14248>`__)
+
+Remove ``numpy.random.entropy`` without a deprecation
+-----------------------------------------------------
+
+``numpy.random.entropy`` was added to the ``numpy.random`` namespace in 1.17.0.
+It was meant to be a private c-extension module, but was exposed as public.
+It has been replaced by ``numpy.random.SeedSequence`` so the module was
+completely removed.
+(`gh-14498 <https://github.com/numpy/numpy/pull/14498>`__)
+
+Add options to quiet build configuration and build with ``-Werror``
+-------------------------------------------------------------------
+Added two new configuration options. During the ``build_src`` subcommand, as
+part of configuring NumPy, the files ``_numpyconfig.h`` and ``config.h`` are
+created by probing support for various runtime functions and routines.
+Previously, the very verbose compiler output during this stage clouded more
+important information. By default the output is silenced. Running ``runtests.py
+--debug-info`` will add ``--verbose-cfg`` to the ``build_src`` subcommand,
+which will restore the previous behaviour.
+
+Adding ``CFLAGS=-Werror`` to turn warnings into errors would trigger errors
+during the configuration. Now ``runtests.py --warn-error`` will add
+``--warn-error`` to the ``build`` subcommand, which will percolate to the
+``build_ext`` and ``build_lib`` subcommands. This will add the compiler flag
+to those stages and turn compiler warnings into errors while actually building
+NumPy itself, avoiding the ``build_src`` subcommand compiler calls.
+
+(`gh-14527 <https://github.com/numpy/numpy/pull/14527>`__)
+(`gh-14518 <https://github.com/numpy/numpy/pull/14518>`__)
--- /dev/null
+=========================
+NumPy 1.3.0 Release Notes
+=========================
+
+This minor includes numerous bug fixes, official python 2.6 support, and
+several new features such as generalized ufuncs.
+
+Highlights
+==========
+
+Python 2.6 support
+------------------
+
+Python 2.6 is now supported on all previously supported platforms, including
+windows.
+
+https://www.python.org/dev/peps/pep-0361/
+
+Generalized ufuncs
+------------------
+
+There is a general need for looping over not only functions on scalars but also
+over functions on vectors (or arrays), as explained on
+http://scipy.org/scipy/numpy/wiki/GeneralLoopingFunctions. We propose to
+realize this concept by generalizing the universal functions (ufuncs), and
+provide a C implementation that adds ~500 lines to the numpy code base. In
+current (specialized) ufuncs, the elementary function is limited to
+element-by-element operations, whereas the generalized version supports
+"sub-array" by "sub-array" operations. The Perl vector library PDL provides a
+similar functionality and its terms are re-used in the following.
+
+Each generalized ufunc has information associated with it that states what the
+"core" dimensionality of the inputs is, as well as the corresponding
+dimensionality of the outputs (the element-wise ufuncs have zero core
+dimensions). The list of the core dimensions for all arguments is called the
+"signature" of a ufunc. For example, the ufunc numpy.add has signature
+"(),()->()" defining two scalar inputs and one scalar output.
+
+Another example is (see the GeneralLoopingFunctions page) the function
+inner1d(a,b) with a signature of "(i),(i)->()". This applies the inner product
+along the last axis of each input, but keeps the remaining indices intact. For
+example, where a is of shape (3,5,N) and b is of shape (5,N), this will return
+an output of shape (3,5). The underlying elementary function is called 3*5
+times. In the signature, we specify one core dimension "(i)" for each input and
+zero core dimensions "()" for the output, since it takes two 1-d arrays and
+returns a scalar. By using the same name "i", we specify that the two
+corresponding dimensions should be of the same size (or one of them is of size
+1 and will be broadcasted).
+
+The dimensions beyond the core dimensions are called "loop" dimensions. In the
+above example, this corresponds to (3,5).
+
+The usual numpy "broadcasting" rules apply, where the signature determines how
+the dimensions of each input/output object are split into core and loop
+dimensions:
+
+While an input array has a smaller dimensionality than the corresponding number
+of core dimensions, 1's are pre-pended to its shape. The core dimensions are
+removed from all inputs and the remaining dimensions are broadcasted; defining
+the loop dimensions. The output is given by the loop dimensions plus the
+output core dimensions.
+
+Experimental Windows 64 bits support
+------------------------------------
+
+Numpy can now be built on windows 64 bits (amd64 only, not IA64), with both MS
+compilers and mingw-w64 compilers:
+
+This is *highly experimental*: DO NOT USE FOR PRODUCTION USE. See INSTALL.txt,
+Windows 64 bits section for more information on limitations and how to build it
+by yourself.
+
+New features
+============
+
+Formatting issues
+-----------------
+
+Float formatting is now handled by numpy instead of the C runtime: this enables
+locale independent formatting, more robust fromstring and related methods.
+Special values (inf and nan) are also more consistent across platforms (nan vs
+IND/NaN, etc...), and more consistent with recent python formatting work (in
+2.6 and later).
+
+Nan handling in max/min
+-----------------------
+
+The maximum/minimum ufuncs now reliably propagate nans. If one of the
+arguments is a nan, then nan is returned. This affects np.min/np.max, amin/amax
+and the array methods max/min. New ufuncs fmax and fmin have been added to deal
+with non-propagating nans.
+
+Nan handling in sign
+--------------------
+
+The ufunc sign now returns nan for the sign of anan.
+
+
+New ufuncs
+----------
+
+#. fmax - same as maximum for integer types and non-nan floats. Returns the
+ non-nan argument if one argument is nan and returns nan if both arguments
+ are nan.
+#. fmin - same as minimum for integer types and non-nan floats. Returns the
+ non-nan argument if one argument is nan and returns nan if both arguments
+ are nan.
+#. deg2rad - converts degrees to radians, same as the radians ufunc.
+#. rad2deg - converts radians to degrees, same as the degrees ufunc.
+#. log2 - base 2 logarithm.
+#. exp2 - base 2 exponential.
+#. trunc - truncate floats to nearest integer towards zero.
+#. logaddexp - add numbers stored as logarithms and return the logarithm
+ of the result.
+#. logaddexp2 - add numbers stored as base 2 logarithms and return the base 2
+ logarithm of the result.
+
+Masked arrays
+-------------
+
+Several new features and bug fixes, including:
+
+ * structured arrays should now be fully supported by MaskedArray
+ (r6463, r6324, r6305, r6300, r6294...)
+ * Minor bug fixes (r6356, r6352, r6335, r6299, r6298)
+ * Improved support for __iter__ (r6326)
+ * made baseclass, sharedmask and hardmask accessible to the user (but
+ read-only)
+ * doc update
+
+gfortran support on windows
+---------------------------
+
+Gfortran can now be used as a fortran compiler for numpy on windows, even when
+the C compiler is Visual Studio (VS 2005 and above; VS 2003 will NOT work).
+Gfortran + Visual studio does not work on windows 64 bits (but gcc + gfortran
+does). It is unclear whether it will be possible to use gfortran and visual
+studio at all on x64.
+
+Arch option for windows binary
+------------------------------
+
+Automatic arch detection can now be bypassed from the command line for the superpack installed:
+
+ numpy-1.3.0-superpack-win32.exe /arch=nosse
+
+will install a numpy which works on any x86, even if the running computer
+supports SSE set.
+
+Deprecated features
+===================
+
+Histogram
+---------
+
+The semantics of histogram has been modified to fix long-standing issues
+with outliers handling. The main changes concern
+
+#. the definition of the bin edges, now including the rightmost edge, and
+#. the handling of upper outliers, now ignored rather than tallied in the
+ rightmost bin.
+
+The previous behavior is still accessible using `new=False`, but this is
+deprecated, and will be removed entirely in 1.4.0.
+
+Documentation changes
+=====================
+
+A lot of documentation has been added. Both user guide and references can be
+built from sphinx.
+
+New C API
+=========
+
+Multiarray API
+--------------
+
+The following functions have been added to the multiarray C API:
+
+ * PyArray_GetEndianness: to get runtime endianness
+
+Ufunc API
+---------
+
+The following functions have been added to the ufunc API:
+
+ * PyUFunc_FromFuncAndDataAndSignature: to declare a more general ufunc
+ (generalized ufunc).
+
+
+New defines
+-----------
+
+New public C defines are available for ARCH specific code through numpy/npy_cpu.h:
+
+ * NPY_CPU_X86: x86 arch (32 bits)
+ * NPY_CPU_AMD64: amd64 arch (x86_64, NOT Itanium)
+ * NPY_CPU_PPC: 32 bits ppc
+ * NPY_CPU_PPC64: 64 bits ppc
+ * NPY_CPU_SPARC: 32 bits sparc
+ * NPY_CPU_SPARC64: 64 bits sparc
+ * NPY_CPU_S390: S390
+ * NPY_CPU_IA64: ia64
+ * NPY_CPU_PARISC: PARISC
+
+New macros for CPU endianness has been added as well (see internal changes
+below for details):
+
+ * NPY_BYTE_ORDER: integer
+ * NPY_LITTLE_ENDIAN/NPY_BIG_ENDIAN defines
+
+Those provide portable alternatives to glibc endian.h macros for platforms
+without it.
+
+Portable NAN, INFINITY, etc...
+------------------------------
+
+npy_math.h now makes available several portable macro to get NAN, INFINITY:
+
+ * NPY_NAN: equivalent to NAN, which is a GNU extension
+ * NPY_INFINITY: equivalent to C99 INFINITY
+ * NPY_PZERO, NPY_NZERO: positive and negative zero respectively
+
+Corresponding single and extended precision macros are available as well. All
+references to NAN, or home-grown computation of NAN on the fly have been
+removed for consistency.
+
+Internal changes
+================
+
+numpy.core math configuration revamp
+------------------------------------
+
+This should make the porting to new platforms easier, and more robust. In
+particular, the configuration stage does not need to execute any code on the
+target platform, which is a first step toward cross-compilation.
+
+https://www.numpy.org/neps/nep-0003-math_config_clean.html
+
+umath refactor
+--------------
+
+A lot of code cleanup for umath/ufunc code (charris).
+
+Improvements to build warnings
+------------------------------
+
+Numpy can now build with -W -Wall without warnings
+
+https://www.numpy.org/neps/nep-0002-warnfix.html
+
+Separate core math library
+--------------------------
+
+The core math functions (sin, cos, etc... for basic C types) have been put into
+a separate library; it acts as a compatibility layer, to support most C99 maths
+functions (real only for now). The library includes platform-specific fixes for
+various maths functions, such as using those versions should be more robust
+than using your platform functions directly. The API for existing functions is
+exactly the same as the C99 math functions API; the only difference is the npy
+prefix (npy_cos vs cos).
+
+The core library will be made available to any extension in 1.4.0.
+
+CPU arch detection
+------------------
+
+npy_cpu.h defines numpy specific CPU defines, such as NPY_CPU_X86, etc...
+Those are portable across OS and toolchains, and set up when the header is
+parsed, so that they can be safely used even in the case of cross-compilation
+(the values is not set when numpy is built), or for multi-arch binaries (e.g.
+fat binaries on Max OS X).
+
+npy_endian.h defines numpy specific endianness defines, modeled on the glibc
+endian.h. NPY_BYTE_ORDER is equivalent to BYTE_ORDER, and one of
+NPY_LITTLE_ENDIAN or NPY_BIG_ENDIAN is defined. As for CPU archs, those are set
+when the header is parsed by the compiler, and as such can be used for
+cross-compilation and multi-arch binaries.
--- /dev/null
+=========================
+NumPy 1.4.0 Release Notes
+=========================
+
+This minor includes numerous bug fixes, as well as a few new features. It
+is backward compatible with 1.3.0 release.
+
+Highlights
+==========
+
+* New datetime dtype support to deal with dates in arrays
+
+* Faster import time
+
+* Extended array wrapping mechanism for ufuncs
+
+* New Neighborhood iterator (C-level only)
+
+* C99-like complex functions in npymath
+
+New features
+============
+
+Extended array wrapping mechanism for ufuncs
+--------------------------------------------
+
+An __array_prepare__ method has been added to ndarray to provide subclasses
+greater flexibility to interact with ufuncs and ufunc-like functions. ndarray
+already provided __array_wrap__, which allowed subclasses to set the array type
+for the result and populate metadata on the way out of the ufunc (as seen in
+the implementation of MaskedArray). For some applications it is necessary to
+provide checks and populate metadata *on the way in*. __array_prepare__ is
+therefore called just after the ufunc has initialized the output array but
+before computing the results and populating it. This way, checks can be made
+and errors raised before operations which may modify data in place.
+
+Automatic detection of forward incompatibilities
+------------------------------------------------
+
+Previously, if an extension was built against a version N of NumPy, and used on
+a system with NumPy M < N, the import_array was successful, which could cause
+crashes because the version M does not have a function in N. Starting from
+NumPy 1.4.0, this will cause a failure in import_array, so the error will be
+caught early on.
+
+New iterators
+-------------
+
+A new neighborhood iterator has been added to the C API. It can be used to
+iterate over the items in a neighborhood of an array, and can handle boundaries
+conditions automatically. Zero and one padding are available, as well as
+arbitrary constant value, mirror and circular padding.
+
+New polynomial support
+----------------------
+
+New modules chebyshev and polynomial have been added. The new polynomial module
+is not compatible with the current polynomial support in numpy, but is much
+like the new chebyshev module. The most noticeable difference to most will
+be that coefficients are specified from low to high power, that the low
+level functions do *not* work with the Chebyshev and Polynomial classes as
+arguments, and that the Chebyshev and Polynomial classes include a domain.
+Mapping between domains is a linear substitution and the two classes can be
+converted one to the other, allowing, for instance, a Chebyshev series in
+one domain to be expanded as a polynomial in another domain. The new classes
+should generally be used instead of the low level functions, the latter are
+provided for those who wish to build their own classes.
+
+The new modules are not automatically imported into the numpy namespace,
+they must be explicitly brought in with an "import numpy.polynomial"
+statement.
+
+New C API
+---------
+
+The following C functions have been added to the C API:
+
+ #. PyArray_GetNDArrayCFeatureVersion: return the *API* version of the
+ loaded numpy.
+ #. PyArray_Correlate2 - like PyArray_Correlate, but implements the usual
+ definition of correlation. Inputs are not swapped, and conjugate is
+ taken for complex arrays.
+ #. PyArray_NeighborhoodIterNew - a new iterator to iterate over a
+ neighborhood of a point, with automatic boundaries handling. It is
+ documented in the iterators section of the C-API reference, and you can
+ find some examples in the multiarray_test.c.src file in numpy.core.
+
+New ufuncs
+----------
+
+The following ufuncs have been added to the C API:
+
+ #. copysign - return the value of the first argument with the sign copied
+ from the second argument.
+ #. nextafter - return the next representable floating point value of the
+ first argument toward the second argument.
+
+New defines
+-----------
+
+The alpha processor is now defined and available in numpy/npy_cpu.h. The
+failed detection of the PARISC processor has been fixed. The defines are:
+
+ #. NPY_CPU_HPPA: PARISC
+ #. NPY_CPU_ALPHA: Alpha
+
+Testing
+-------
+
+ #. deprecated decorator: this decorator may be used to avoid cluttering
+ testing output while testing DeprecationWarning is effectively raised by
+ the decorated test.
+ #. assert_array_almost_equal_nulps: new method to compare two arrays of
+ floating point values. With this function, two values are considered
+ close if there are not many representable floating point values in
+ between, thus being more robust than assert_array_almost_equal when the
+ values fluctuate a lot.
+ #. assert_array_max_ulp: raise an assertion if there are more than N
+ representable numbers between two floating point values.
+ #. assert_warns: raise an AssertionError if a callable does not generate a
+ warning of the appropriate class, without altering the warning state.
+
+Reusing npymath
+---------------
+
+In 1.3.0, we started putting portable C math routines in npymath library, so
+that people can use those to write portable extensions. Unfortunately, it was
+not possible to easily link against this library: in 1.4.0, support has been
+added to numpy.distutils so that 3rd party can reuse this library. See coremath
+documentation for more information.
+
+Improved set operations
+-----------------------
+
+In previous versions of NumPy some set functions (intersect1d,
+setxor1d, setdiff1d and setmember1d) could return incorrect results if
+the input arrays contained duplicate items. These now work correctly
+for input arrays with duplicates. setmember1d has been renamed to
+in1d, as with the change to accept arrays with duplicates it is
+no longer a set operation, and is conceptually similar to an
+elementwise version of the Python operator 'in'. All of these
+functions now accept the boolean keyword assume_unique. This is False
+by default, but can be set True if the input arrays are known not
+to contain duplicates, which can increase the functions' execution
+speed.
+
+Improvements
+============
+
+ #. numpy import is noticeably faster (from 20 to 30 % depending on the
+ platform and computer)
+
+ #. The sort functions now sort nans to the end.
+
+ * Real sort order is [R, nan]
+ * Complex sort order is [R + Rj, R + nanj, nan + Rj, nan + nanj]
+
+ Complex numbers with the same nan placements are sorted according to
+ the non-nan part if it exists.
+ #. The type comparison functions have been made consistent with the new
+ sort order of nans. Searchsorted now works with sorted arrays
+ containing nan values.
+ #. Complex division has been made more resistant to overflow.
+ #. Complex floor division has been made more resistant to overflow.
+
+Deprecations
+============
+
+The following functions are deprecated:
+
+ #. correlate: it takes a new keyword argument old_behavior. When True (the
+ default), it returns the same result as before. When False, compute the
+ conventional correlation, and take the conjugate for complex arrays. The
+ old behavior will be removed in NumPy 1.5, and raises a
+ DeprecationWarning in 1.4.
+
+ #. unique1d: use unique instead. unique1d raises a deprecation
+ warning in 1.4, and will be removed in 1.5.
+
+ #. intersect1d_nu: use intersect1d instead. intersect1d_nu raises
+ a deprecation warning in 1.4, and will be removed in 1.5.
+
+ #. setmember1d: use in1d instead. setmember1d raises a deprecation
+ warning in 1.4, and will be removed in 1.5.
+
+The following raise errors:
+
+ #. When operating on 0-d arrays, ``numpy.max`` and other functions accept
+ only ``axis=0``, ``axis=-1`` and ``axis=None``. Using an out-of-bounds
+ axes is an indication of a bug, so Numpy raises an error for these cases
+ now.
+
+ #. Specifying ``axis > MAX_DIMS`` is no longer allowed; Numpy raises now an
+ error instead of behaving similarly as for ``axis=None``.
+
+Internal changes
+================
+
+Use C99 complex functions when available
+----------------------------------------
+
+The numpy complex types are now guaranteed to be ABI compatible with C99
+complex type, if available on the platform. Moreover, the complex ufunc now use
+the platform C99 functions instead of our own.
+
+split multiarray and umath source code
+--------------------------------------
+
+The source code of multiarray and umath has been split into separate logic
+compilation units. This should make the source code more amenable for
+newcomers.
+
+Separate compilation
+--------------------
+
+By default, every file of multiarray (and umath) is merged into one for
+compilation as was the case before, but if NPY_SEPARATE_COMPILATION env
+variable is set to a non-negative value, experimental individual compilation of
+each file is enabled. This makes the compile/debug cycle much faster when
+working on core numpy.
+
+Separate core math library
+--------------------------
+
+New functions which have been added:
+
+ * npy_copysign
+ * npy_nextafter
+ * npy_cpack
+ * npy_creal
+ * npy_cimag
+ * npy_cabs
+ * npy_cexp
+ * npy_clog
+ * npy_cpow
+ * npy_csqr
+ * npy_ccos
+ * npy_csin
--- /dev/null
+=========================
+NumPy 1.5.0 Release Notes
+=========================
+
+
+Highlights
+==========
+
+Python 3 compatibility
+----------------------
+
+This is the first NumPy release which is compatible with Python 3. Support for
+Python 3 and Python 2 is done from a single code base. Extensive notes on
+changes can be found at
+`<http://projects.scipy.org/numpy/browser/trunk/doc/Py3K.txt>`_.
+
+Note that the Numpy testing framework relies on nose, which does not have a
+Python 3 compatible release yet. A working Python 3 branch of nose can be found
+at `<http://bitbucket.org/jpellerin/nose3/>`_ however.
+
+Porting of SciPy to Python 3 is expected to be completed soon.
+
+:pep:`3118` compatibility
+-------------------------
+
+The new buffer protocol described by PEP 3118 is fully supported in this
+version of Numpy. On Python versions >= 2.6 Numpy arrays expose the buffer
+interface, and array(), asarray() and other functions accept new-style buffers
+as input.
+
+
+New features
+============
+
+Warning on casting complex to real
+----------------------------------
+
+Numpy now emits a `numpy.ComplexWarning` when a complex number is cast
+into a real number. For example:
+
+ >>> x = np.array([1,2,3])
+ >>> x[:2] = np.array([1+2j, 1-2j])
+ ComplexWarning: Casting complex values to real discards the imaginary part
+
+The cast indeed discards the imaginary part, and this may not be the
+intended behavior in all cases, hence the warning. This warning can be
+turned off in the standard way:
+
+ >>> import warnings
+ >>> warnings.simplefilter("ignore", np.ComplexWarning)
+
+Dot method for ndarrays
+-----------------------
+
+Ndarrays now have the dot product also as a method, which allows writing
+chains of matrix products as
+
+ >>> a.dot(b).dot(c)
+
+instead of the longer alternative
+
+ >>> np.dot(a, np.dot(b, c))
+
+linalg.slogdet function
+-----------------------
+
+The slogdet function returns the sign and logarithm of the determinant
+of a matrix. Because the determinant may involve the product of many
+small/large values, the result is often more accurate than that obtained
+by simple multiplication.
+
+new header
+----------
+
+The new header file ndarraytypes.h contains the symbols from
+ndarrayobject.h that do not depend on the PY_ARRAY_UNIQUE_SYMBOL and
+NO_IMPORT/_ARRAY macros. Broadly, these symbols are types, typedefs,
+and enumerations; the array function calls are left in
+ndarrayobject.h. This allows users to include array-related types and
+enumerations without needing to concern themselves with the macro
+expansions and their side- effects.
+
+
+Changes
+=======
+
+polynomial.polynomial
+---------------------
+
+* The polyint and polyder functions now check that the specified number
+ integrations or derivations is a non-negative integer. The number 0 is
+ a valid value for both functions.
+* A degree method has been added to the Polynomial class.
+* A trimdeg method has been added to the Polynomial class. It operates like
+ truncate except that the argument is the desired degree of the result,
+ not the number of coefficients.
+* Polynomial.fit now uses None as the default domain for the fit. The default
+ Polynomial domain can be specified by using [] as the domain value.
+* Weights can be used in both polyfit and Polynomial.fit
+* A linspace method has been added to the Polynomial class to ease plotting.
+* The polymulx function was added.
+
+polynomial.chebyshev
+--------------------
+
+* The chebint and chebder functions now check that the specified number
+ integrations or derivations is a non-negative integer. The number 0 is
+ a valid value for both functions.
+* A degree method has been added to the Chebyshev class.
+* A trimdeg method has been added to the Chebyshev class. It operates like
+ truncate except that the argument is the desired degree of the result,
+ not the number of coefficients.
+* Chebyshev.fit now uses None as the default domain for the fit. The default
+ Chebyshev domain can be specified by using [] as the domain value.
+* Weights can be used in both chebfit and Chebyshev.fit
+* A linspace method has been added to the Chebyshev class to ease plotting.
+* The chebmulx function was added.
+* Added functions for the Chebyshev points of the first and second kind.
+
+
+histogram
+---------
+
+After a two years transition period, the old behavior of the histogram function
+has been phased out, and the "new" keyword has been removed.
+
+correlate
+---------
+
+The old behavior of correlate was deprecated in 1.4.0, the new behavior (the
+usual definition for cross-correlation) is now the default.
--- /dev/null
+=========================
+NumPy 1.6.0 Release Notes
+=========================
+
+This release includes several new features as well as numerous bug fixes and
+improved documentation. It is backward compatible with the 1.5.0 release, and
+supports Python 2.4 - 2.7 and 3.1 - 3.2.
+
+
+Highlights
+==========
+
+* Re-introduction of datetime dtype support to deal with dates in arrays.
+
+* A new 16-bit floating point type.
+
+* A new iterator, which improves performance of many functions.
+
+
+New features
+============
+
+New 16-bit floating point type
+------------------------------
+
+This release adds support for the IEEE 754-2008 binary16 format, available as
+the data type ``numpy.half``. Within Python, the type behaves similarly to
+`float` or `double`, and C extensions can add support for it with the exposed
+half-float API.
+
+
+New iterator
+------------
+
+A new iterator has been added, replacing the functionality of the
+existing iterator and multi-iterator with a single object and API.
+This iterator works well with general memory layouts different from
+C or Fortran contiguous, and handles both standard NumPy and
+customized broadcasting. The buffering, automatic data type
+conversion, and optional output parameters, offered by
+ufuncs but difficult to replicate elsewhere, are now exposed by this
+iterator.
+
+
+Legendre, Laguerre, Hermite, HermiteE polynomials in ``numpy.polynomial``
+-------------------------------------------------------------------------
+
+Extend the number of polynomials available in the polynomial package. In
+addition, a new ``window`` attribute has been added to the classes in
+order to specify the range the ``domain`` maps to. This is mostly useful
+for the Laguerre, Hermite, and HermiteE polynomials whose natural domains
+are infinite and provides a more intuitive way to get the correct mapping
+of values without playing unnatural tricks with the domain.
+
+
+Fortran assumed shape array and size function support in ``numpy.f2py``
+-----------------------------------------------------------------------
+
+F2py now supports wrapping Fortran 90 routines that use assumed shape
+arrays. Before such routines could be called from Python but the
+corresponding Fortran routines received assumed shape arrays as zero
+length arrays which caused unpredicted results. Thanks to Lorenz
+Hüdepohl for pointing out the correct way to interface routines with
+assumed shape arrays.
+
+In addition, f2py supports now automatic wrapping of Fortran routines
+that use two argument ``size`` function in dimension specifications.
+
+
+Other new functions
+-------------------
+
+``numpy.ravel_multi_index`` : Converts a multi-index tuple into
+an array of flat indices, applying boundary modes to the indices.
+
+``numpy.einsum`` : Evaluate the Einstein summation convention. Using the
+Einstein summation convention, many common multi-dimensional array operations
+can be represented in a simple fashion. This function provides a way compute
+such summations.
+
+``numpy.count_nonzero`` : Counts the number of non-zero elements in an array.
+
+``numpy.result_type`` and ``numpy.min_scalar_type`` : These functions expose
+the underlying type promotion used by the ufuncs and other operations to
+determine the types of outputs. These improve upon the ``numpy.common_type``
+and ``numpy.mintypecode`` which provide similar functionality but do
+not match the ufunc implementation.
+
+
+Changes
+=======
+
+``default error handling``
+--------------------------
+
+The default error handling has been change from ``print`` to ``warn`` for
+all except for ``underflow``, which remains as ``ignore``.
+
+
+``numpy.distutils``
+-------------------
+
+Several new compilers are supported for building Numpy: the Portland Group
+Fortran compiler on OS X, the PathScale compiler suite and the 64-bit Intel C
+compiler on Linux.
+
+
+``numpy.testing``
+-----------------
+
+The testing framework gained ``numpy.testing.assert_allclose``, which provides
+a more convenient way to compare floating point arrays than
+`assert_almost_equal`, `assert_approx_equal` and `assert_array_almost_equal`.
+
+
+``C API``
+---------
+
+In addition to the APIs for the new iterator and half data type, a number
+of other additions have been made to the C API. The type promotion
+mechanism used by ufuncs is exposed via ``PyArray_PromoteTypes``,
+``PyArray_ResultType``, and ``PyArray_MinScalarType``. A new enumeration
+``NPY_CASTING`` has been added which controls what types of casts are
+permitted. This is used by the new functions ``PyArray_CanCastArrayTo``
+and ``PyArray_CanCastTypeTo``. A more flexible way to handle
+conversion of arbitrary python objects into arrays is exposed by
+``PyArray_GetArrayParamsFromObject``.
+
+
+Deprecated features
+===================
+
+The "normed" keyword in ``numpy.histogram`` is deprecated. Its functionality
+will be replaced by the new "density" keyword.
+
+
+Removed features
+================
+
+``numpy.fft``
+-------------
+
+The functions `refft`, `refft2`, `refftn`, `irefft`, `irefft2`, `irefftn`,
+which were aliases for the same functions without the 'e' in the name, were
+removed.
+
+
+``numpy.memmap``
+----------------
+
+The `sync()` and `close()` methods of memmap were removed. Use `flush()` and
+"del memmap" instead.
+
+
+``numpy.lib``
+-------------
+
+The deprecated functions ``numpy.unique1d``, ``numpy.setmember1d``,
+``numpy.intersect1d_nu`` and ``numpy.lib.ufunclike.log2`` were removed.
+
+
+``numpy.ma``
+------------
+
+Several deprecated items were removed from the ``numpy.ma`` module::
+
+ * ``numpy.ma.MaskedArray`` "raw_data" method
+ * ``numpy.ma.MaskedArray`` constructor "flag" keyword
+ * ``numpy.ma.make_mask`` "flag" keyword
+ * ``numpy.ma.allclose`` "fill_value" keyword
+
+
+``numpy.distutils``
+-------------------
+
+The ``numpy.get_numpy_include`` function was removed, use ``numpy.get_include``
+instead.
--- /dev/null
+=========================
+NumPy 1.6.1 Release Notes
+=========================
+
+This is a bugfix only release in the 1.6.x series.
+
+
+Issues Fixed
+============
+
+* #1834: einsum fails for specific shapes
+* #1837: einsum throws nan or freezes python for specific array shapes
+* #1838: object <-> structured type arrays regression
+* #1851: regression for SWIG based code in 1.6.0
+* #1863: Buggy results when operating on array copied with astype()
+* #1870: Fix corner case of object array assignment
+* #1843: Py3k: fix error with recarray
+* #1885: nditer: Error in detecting double reduction loop
+* #1874: f2py: fix --include_paths bug
+* #1749: Fix ctypes.load_library()
+* #1895/1896: iter: writeonly operands weren't always being buffered correctly
--- /dev/null
+=========================
+NumPy 1.6.2 Release Notes
+=========================
+
+This is a bugfix release in the 1.6.x series. Due to the delay of the NumPy
+1.7.0 release, this release contains far more fixes than a regular NumPy bugfix
+release. It also includes a number of documentation and build improvements.
+
+Issues fixed
+============
+
+``numpy.core``
+--------------
+
+* #2063: make unique() return consistent index
+* #1138: allow creating arrays from empty buffers or empty slices
+* #1446: correct note about correspondence vstack and concatenate
+* #1149: make argmin() work for datetime
+* #1672: fix allclose() to work for scalar inf
+* #1747: make np.median() work for 0-D arrays
+* #1776: make complex division by zero to yield inf properly
+* #1675: add scalar support for the format() function
+* #1905: explicitly check for NaNs in allclose()
+* #1952: allow floating ddof in std() and var()
+* #1948: fix regression for indexing chararrays with empty list
+* #2017: fix type hashing
+* #2046: deleting array attributes causes segfault
+* #2033: a**2.0 has incorrect type
+* #2045: make attribute/iterator_element deletions not segfault
+* #2021: fix segfault in searchsorted()
+* #2073: fix float16 __array_interface__ bug
+
+
+``numpy.lib``
+-------------
+
+* #2048: break reference cycle in NpzFile
+* #1573: savetxt() now handles complex arrays
+* #1387: allow bincount() to accept empty arrays
+* #1899: fixed histogramdd() bug with empty inputs
+* #1793: fix failing npyio test under py3k
+* #1936: fix extra nesting for subarray dtypes
+* #1848: make tril/triu return the same dtype as the original array
+* #1918: use Py_TYPE to access ob_type, so it works also on Py3
+
+
+``numpy.distutils``
+-------------------
+
+* #1261: change compile flag on AIX from -O5 to -O3
+* #1377: update HP compiler flags
+* #1383: provide better support for C++ code on HPUX
+* #1857: fix build for py3k + pip
+* BLD: raise a clearer warning in case of building without cleaning up first
+* BLD: follow build_ext coding convention in build_clib
+* BLD: fix up detection of Intel CPU on OS X in system_info.py
+* BLD: add support for the new X11 directory structure on Ubuntu & co.
+* BLD: add ufsparse to the libraries search path.
+* BLD: add 'pgfortran' as a valid compiler in the Portland Group
+* BLD: update version match regexp for IBM AIX Fortran compilers.
+
+
+``numpy.random``
+----------------
+
+* BUG: Use npy_intp instead of long in mtrand
+
+Changes
+=======
+
+``numpy.f2py``
+--------------
+
+* ENH: Introduce new options extra_f77_compiler_args and extra_f90_compiler_args
+* BLD: Improve reporting of fcompiler value
+* BUG: Fix f2py test_kind.py test
+
+
+``numpy.poly``
+--------------
+
+* ENH: Add some tests for polynomial printing
+* ENH: Add companion matrix functions
+* DOC: Rearrange the polynomial documents
+* BUG: Fix up links to classes
+* DOC: Add version added to some of the polynomial package modules
+* DOC: Document xxxfit functions in the polynomial package modules
+* BUG: The polynomial convenience classes let different types interact
+* DOC: Document the use of the polynomial convenience classes
+* DOC: Improve numpy reference documentation of polynomial classes
+* ENH: Improve the computation of polynomials from roots
+* STY: Code cleanup in polynomial [*]fromroots functions
+* DOC: Remove references to cast and NA, which were added in 1.7
--- /dev/null
+=========================
+NumPy 1.7.0 Release Notes
+=========================
+
+This release includes several new features as well as numerous bug fixes and
+refactorings. It supports Python 2.4 - 2.7 and 3.1 - 3.3 and is the last
+release that supports Python 2.4 - 2.5.
+
+Highlights
+==========
+
+* ``where=`` parameter to ufuncs (allows the use of boolean arrays to choose
+ where a computation should be done)
+* ``vectorize`` improvements (added 'excluded' and 'cache' keyword, general
+ cleanup and bug fixes)
+* ``numpy.random.choice`` (random sample generating function)
+
+
+Compatibility notes
+===================
+
+In a future version of numpy, the functions np.diag, np.diagonal, and the
+diagonal method of ndarrays will return a view onto the original array,
+instead of producing a copy as they do now. This makes a difference if you
+write to the array returned by any of these functions. To facilitate this
+transition, numpy 1.7 produces a FutureWarning if it detects that you may
+be attempting to write to such an array. See the documentation for
+np.diagonal for details.
+
+Similar to np.diagonal above, in a future version of numpy, indexing a
+record array by a list of field names will return a view onto the original
+array, instead of producing a copy as they do now. As with np.diagonal,
+numpy 1.7 produces a FutureWarning if it detects that you may be attempting
+to write to such an array. See the documentation for array indexing for
+details.
+
+In a future version of numpy, the default casting rule for UFunc out=
+parameters will be changed from 'unsafe' to 'same_kind'. (This also applies
+to in-place operations like a += b, which is equivalent to np.add(a, b,
+out=a).) Most usages which violate the 'same_kind' rule are likely bugs, so
+this change may expose previously undetected errors in projects that depend
+on NumPy. In this version of numpy, such usages will continue to succeed,
+but will raise a DeprecationWarning.
+
+Full-array boolean indexing has been optimized to use a different,
+optimized code path. This code path should produce the same results,
+but any feedback about changes to your code would be appreciated.
+
+Attempting to write to a read-only array (one with ``arr.flags.writeable``
+set to ``False``) used to raise either a RuntimeError, ValueError, or
+TypeError inconsistently, depending on which code path was taken. It now
+consistently raises a ValueError.
+
+The <ufunc>.reduce functions evaluate some reductions in a different order
+than in previous versions of NumPy, generally providing higher performance.
+Because of the nature of floating-point arithmetic, this may subtly change
+some results, just as linking NumPy to a different BLAS implementations
+such as MKL can.
+
+If upgrading from 1.5, then generally in 1.6 and 1.7 there have been
+substantial code added and some code paths altered, particularly in the
+areas of type resolution and buffered iteration over universal functions.
+This might have an impact on your code particularly if you relied on
+accidental behavior in the past.
+
+New features
+============
+
+Reduction UFuncs Generalize axis= Parameter
+-------------------------------------------
+
+Any ufunc.reduce function call, as well as other reductions like sum, prod,
+any, all, max and min support the ability to choose a subset of the axes to
+reduce over. Previously, one could say axis=None to mean all the axes or
+axis=# to pick a single axis. Now, one can also say axis=(#,#) to pick a
+list of axes for reduction.
+
+Reduction UFuncs New keepdims= Parameter
+----------------------------------------
+
+There is a new keepdims= parameter, which if set to True, doesn't throw
+away the reduction axes but instead sets them to have size one. When this
+option is set, the reduction result will broadcast correctly to the
+original operand which was reduced.
+
+Datetime support
+----------------
+
+.. note:: The datetime API is *experimental* in 1.7.0, and may undergo changes
+ in future versions of NumPy.
+
+There have been a lot of fixes and enhancements to datetime64 compared
+to NumPy 1.6:
+
+* the parser is quite strict about only accepting ISO 8601 dates, with a few
+ convenience extensions
+* converts between units correctly
+* datetime arithmetic works correctly
+* business day functionality (allows the datetime to be used in contexts where
+ only certain days of the week are valid)
+
+The notes in `doc/source/reference/arrays.datetime.rst <https://github.com/numpy/numpy/blob/maintenance/1.7.x/doc/source/reference/arrays.datetime.rst>`_
+(also available in the online docs at `arrays.datetime.html
+<https://docs.scipy.org/doc/numpy/reference/arrays.datetime.html>`_) should be
+consulted for more details.
+
+Custom formatter for printing arrays
+------------------------------------
+
+See the new ``formatter`` parameter of the ``numpy.set_printoptions``
+function.
+
+New function numpy.random.choice
+--------------------------------
+
+A generic sampling function has been added which will generate samples from
+a given array-like. The samples can be with or without replacement, and
+with uniform or given non-uniform probabilities.
+
+New function isclose
+--------------------
+
+Returns a boolean array where two arrays are element-wise equal within a
+tolerance. Both relative and absolute tolerance can be specified.
+
+Preliminary multi-dimensional support in the polynomial package
+---------------------------------------------------------------
+
+Axis keywords have been added to the integration and differentiation
+functions and a tensor keyword was added to the evaluation functions.
+These additions allow multi-dimensional coefficient arrays to be used in
+those functions. New functions for evaluating 2-D and 3-D coefficient
+arrays on grids or sets of points were added together with 2-D and 3-D
+pseudo-Vandermonde matrices that can be used for fitting.
+
+
+Ability to pad rank-n arrays
+----------------------------
+
+A pad module containing functions for padding n-dimensional arrays has been
+added. The various private padding functions are exposed as options to a
+public 'pad' function. Example::
+
+ pad(a, 5, mode='mean')
+
+Current modes are ``constant``, ``edge``, ``linear_ramp``, ``maximum``,
+``mean``, ``median``, ``minimum``, ``reflect``, ``symmetric``, ``wrap``, and
+``<function>``.
+
+
+New argument to searchsorted
+----------------------------
+
+The function searchsorted now accepts a 'sorter' argument that is a
+permutation array that sorts the array to search.
+
+Build system
+------------
+
+Added experimental support for the AArch64 architecture.
+
+C API
+-----
+
+New function ``PyArray_RequireWriteable`` provides a consistent interface
+for checking array writeability -- any C code which works with arrays whose
+WRITEABLE flag is not known to be True a priori, should make sure to call
+this function before writing.
+
+NumPy C Style Guide added (``doc/C_STYLE_GUIDE.rst.txt``).
+
+Changes
+=======
+
+General
+-------
+
+The function np.concatenate tries to match the layout of its input arrays.
+Previously, the layout did not follow any particular reason, and depended
+in an undesirable way on the particular axis chosen for concatenation. A
+bug was also fixed which silently allowed out of bounds axis arguments.
+
+The ufuncs logical_or, logical_and, and logical_not now follow Python's
+behavior with object arrays, instead of trying to call methods on the
+objects. For example the expression (3 and 'test') produces the string
+'test', and now np.logical_and(np.array(3, 'O'), np.array('test', 'O'))
+produces 'test' as well.
+
+The ``.base`` attribute on ndarrays, which is used on views to ensure that the
+underlying array owning the memory is not deallocated prematurely, now
+collapses out references when you have a view-of-a-view. For example::
+
+ a = np.arange(10)
+ b = a[1:]
+ c = b[1:]
+
+In numpy 1.6, ``c.base`` is ``b``, and ``c.base.base`` is ``a``. In numpy 1.7,
+``c.base`` is ``a``.
+
+To increase backwards compatibility for software which relies on the old
+behaviour of ``.base``, we only 'skip over' objects which have exactly the same
+type as the newly created view. This makes a difference if you use ``ndarray``
+subclasses. For example, if we have a mix of ``ndarray`` and ``matrix`` objects
+which are all views on the same original ``ndarray``::
+
+ a = np.arange(10)
+ b = np.asmatrix(a)
+ c = b[0, 1:]
+ d = c[0, 1:]
+
+then ``d.base`` will be ``b``. This is because ``d`` is a ``matrix`` object,
+and so the collapsing process only continues so long as it encounters other
+``matrix`` objects. It considers ``c``, ``b``, and ``a`` in that order, and
+``b`` is the last entry in that list which is a ``matrix`` object.
+
+Casting Rules
+-------------
+
+Casting rules have undergone some changes in corner cases, due to the
+NA-related work. In particular for combinations of scalar+scalar:
+
+* the `longlong` type (`q`) now stays `longlong` for operations with any other
+ number (`? b h i l q p B H I`), previously it was cast as `int_` (`l`). The
+ `ulonglong` type (`Q`) now stays as `ulonglong` instead of `uint` (`L`).
+
+* the `timedelta64` type (`m`) can now be mixed with any integer type (`b h i l
+ q p B H I L Q P`), previously it raised `TypeError`.
+
+For array + scalar, the above rules just broadcast except the case when
+the array and scalars are unsigned/signed integers, then the result gets
+converted to the array type (of possibly larger size) as illustrated by the
+following examples::
+
+ >>> (np.zeros((2,), dtype=np.uint8) + np.int16(257)).dtype
+ dtype('uint16')
+ >>> (np.zeros((2,), dtype=np.int8) + np.uint16(257)).dtype
+ dtype('int16')
+ >>> (np.zeros((2,), dtype=np.int16) + np.uint32(2**17)).dtype
+ dtype('int32')
+
+Whether the size gets increased depends on the size of the scalar, for
+example::
+
+ >>> (np.zeros((2,), dtype=np.uint8) + np.int16(255)).dtype
+ dtype('uint8')
+ >>> (np.zeros((2,), dtype=np.uint8) + np.int16(256)).dtype
+ dtype('uint16')
+
+Also a ``complex128`` scalar + ``float32`` array is cast to ``complex64``.
+
+In NumPy 1.7 the `datetime64` type (`M`) must be constructed by explicitly
+specifying the type as the second argument (e.g. ``np.datetime64(2000, 'Y')``).
+
+
+Deprecations
+============
+
+General
+-------
+
+Specifying a custom string formatter with a `_format` array attribute is
+deprecated. The new `formatter` keyword in ``numpy.set_printoptions`` or
+``numpy.array2string`` can be used instead.
+
+The deprecated imports in the polynomial package have been removed.
+
+``concatenate`` now raises DepractionWarning for 1D arrays if ``axis != 0``.
+Versions of numpy < 1.7.0 ignored axis argument value for 1D arrays. We
+allow this for now, but in due course we will raise an error.
+
+C-API
+-----
+
+Direct access to the fields of PyArrayObject* has been deprecated. Direct
+access has been recommended against for many releases. Expect similar
+deprecations for PyArray_Descr* and other core objects in the future as
+preparation for NumPy 2.0.
+
+The macros in old_defines.h are deprecated and will be removed in the next
+major release (>= 2.0). The sed script tools/replace_old_macros.sed can be
+used to replace these macros with the newer versions.
+
+You can test your code against the deprecated C API by adding a line
+composed of ``#define NPY_NO_DEPRECATED_API`` and the target version number,
+such as ``NPY_1_7_API_VERSION``, before including any NumPy headers.
+
+The ``NPY_CHAR`` member of the ``NPY_TYPES`` enum is deprecated and will be
+removed in NumPy 1.8. See the discussion at
+`gh-2801 <https://github.com/numpy/numpy/issues/2801>`_ for more details.
--- /dev/null
+=========================
+NumPy 1.7.1 Release Notes
+=========================
+
+This is a bugfix only release in the 1.7.x series.
+It supports Python 2.4 - 2.7 and 3.1 - 3.3 and is the last series that
+supports Python 2.4 - 2.5.
+
+
+Issues fixed
+============
+
+* gh-2973: Fix `1` is printed during numpy.test()
+* gh-2983: BUG: gh-2969: Backport memory leak fix 80b3a34.
+* gh-3007: Backport gh-3006
+* gh-2984: Backport fix complex polynomial fit
+* gh-2982: BUG: Make nansum work with booleans.
+* gh-2985: Backport large sort fixes
+* gh-3039: Backport object take
+* gh-3105: Backport nditer fix op axes initialization
+* gh-3108: BUG: npy-pkg-config ini files were missing after Bento build.
+* gh-3124: BUG: PyArray_LexSort allocates too much temporary memory.
+* gh-3131: BUG: Exported f2py_size symbol prevents linking multiple f2py modules.
+* gh-3117: Backport gh-2992
+* gh-3135: DOC: Add mention of PyArray_SetBaseObject stealing a reference
+* gh-3134: DOC: Fix typo in fft docs (the indexing variable is 'm', not 'n').
+* gh-3136: Backport #3128
--- /dev/null
+=========================
+NumPy 1.7.2 Release Notes
+=========================
+
+This is a bugfix only release in the 1.7.x series.
+It supports Python 2.4 - 2.7 and 3.1 - 3.3 and is the last series that
+supports Python 2.4 - 2.5.
+
+
+Issues fixed
+============
+
+* gh-3153: Do not reuse nditer buffers when not filled enough
+* gh-3192: f2py crashes with UnboundLocalError exception
+* gh-442: Concatenate with axis=None now requires equal number of array elements
+* gh-2485: Fix for astype('S') string truncate issue
+* gh-3312: bug in count_nonzero
+* gh-2684: numpy.ma.average casts complex to float under certain conditions
+* gh-2403: masked array with named components does not behave as expected
+* gh-2495: np.ma.compress treated inputs in wrong order
+* gh-576: add __len__ method to ma.mvoid
+* gh-3364: reduce performance regression of mmap slicing
+* gh-3421: fix non-swapping strided copies in GetStridedCopySwap
+* gh-3373: fix small leak in datetime metadata initialization
+* gh-2791: add platform specific python include directories to search paths
+* gh-3168: fix undefined function and add integer divisions
+* gh-3301: memmap does not work with TemporaryFile in python3
+* gh-3057: distutils.misc_util.get_shared_lib_extension returns wrong debug extension
+* gh-3472: add module extensions to load_library search list
+* gh-3324: Make comparison function (gt, ge, ...) respect __array_priority__
+* gh-3497: np.insert behaves incorrectly with argument 'axis=-1'
+* gh-3541: make preprocessor tests consistent in halffloat.c
+* gh-3458: array_ass_boolean_subscript() writes 'non-existent' data to array
+* gh-2892: Regression in ufunc.reduceat with zero-sized index array
+* gh-3608: Regression when filling struct from tuple
+* gh-3701: add support for Python 3.4 ast.NameConstant
+* gh-3712: do not assume that GIL is enabled in xerbla
+* gh-3712: fix LAPACK error handling in lapack_litemodule
+* gh-3728: f2py fix decref on wrong object
+* gh-3743: Hash changed signature in Python 3.3
+* gh-3793: scalar int hashing broken on 64 bit python3
+* gh-3160: SandboxViolation easyinstalling 1.7.0 on Mac OS X 10.8.3
+* gh-3871: npy_math.h has invalid isinf for Solaris with SUNWspro12.2
+* gh-2561: Disable check for oldstyle classes in python3
+* gh-3900: Ensure NotImplemented is passed on in MaskedArray ufunc's
+* gh-2052: del scalar subscript causes segfault
+* gh-3832: fix a few uninitialized uses and memleaks
+* gh-3971: f2py changed string.lowercase to string.ascii_lowercase for python3
+* gh-3480: numpy.random.binomial raised ValueError for n == 0
+* gh-3992: hypot(inf, 0) shouldn't raise a warning, hypot(inf, inf) wrong result
+* gh-4018: Segmentation fault dealing with very large arrays
+* gh-4094: fix NaT handling in _strided_to_strided_string_to_datetime
+* gh-4051: fix uninitialized use in _strided_to_strided_string_to_datetime
+* gh-4123: lexsort segfault
+* gh-4141: Fix a few issues that show up with python 3.4b1
--- /dev/null
+=========================
+NumPy 1.8.0 Release Notes
+=========================
+
+This release supports Python 2.6 -2.7 and 3.2 - 3.3.
+
+
+Highlights
+==========
+
+
+* New, no 2to3, Python 2 and Python 3 are supported by a common code base.
+* New, gufuncs for linear algebra, enabling operations on stacked arrays.
+* New, inplace fancy indexing for ufuncs with the ``.at`` method.
+* New, ``partition`` function, partial sorting via selection for fast median.
+* New, ``nanmean``, ``nanvar``, and ``nanstd`` functions skipping NaNs.
+* New, ``full`` and ``full_like`` functions to create value initialized arrays.
+* New, ``PyUFunc_RegisterLoopForDescr``, better ufunc support for user dtypes.
+* Numerous performance improvements in many areas.
+
+
+Dropped Support
+===============
+
+
+Support for Python versions 2.4 and 2.5 has been dropped,
+
+Support for SCons has been removed.
+
+
+Future Changes
+==============
+
+
+The Datetime64 type remains experimental in this release. In 1.9 there will
+probably be some changes to make it more useable.
+
+The diagonal method currently returns a new array and raises a
+FutureWarning. In 1.9 it will return a readonly view.
+
+Multiple field selection from an array of structured type currently
+returns a new array and raises a FutureWarning. In 1.9 it will return a
+readonly view.
+
+The numpy/oldnumeric and numpy/numarray compatibility modules will be
+removed in 1.9.
+
+
+Compatibility notes
+===================
+
+
+The doc/sphinxext content has been moved into its own github repository,
+and is included in numpy as a submodule. See the instructions in
+doc/HOWTO_BUILD_DOCS.rst.txt for how to access the content.
+
+.. _numpydoc: https://github.com/numpy/numpydoc
+
+The hash function of numpy.void scalars has been changed. Previously the
+pointer to the data was hashed as an integer. Now, the hash function uses
+the tuple-hash algorithm to combine the hash functions of the elements of
+the scalar, but only if the scalar is read-only.
+
+Numpy has switched its build system to using 'separate compilation' by
+default. In previous releases this was supported, but not default. This
+should produce the same results as the old system, but if you're trying to
+do something complicated like link numpy statically or using an unusual
+compiler, then it's possible you will encounter problems. If so, please
+file a bug and as a temporary workaround you can re-enable the old build
+system by exporting the shell variable NPY_SEPARATE_COMPILATION=0.
+
+For the AdvancedNew iterator the ``oa_ndim`` flag should now be -1 to indicate
+that no ``op_axes`` and ``itershape`` are passed in. The ``oa_ndim == 0``
+case, now indicates a 0-D iteration and ``op_axes`` being NULL and the old
+usage is deprecated. This does not effect the ``NpyIter_New`` or
+``NpyIter_MultiNew`` functions.
+
+The functions nanargmin and nanargmax now return np.iinfo['intp'].min for
+the index in all-NaN slices. Previously the functions would raise a ValueError
+for array returns and NaN for scalar returns.
+
+NPY_RELAXED_STRIDES_CHECKING
+----------------------------
+There is a new compile time environment variable
+``NPY_RELAXED_STRIDES_CHECKING``. If this variable is set to 1, then
+numpy will consider more arrays to be C- or F-contiguous -- for
+example, it becomes possible to have a column vector which is
+considered both C- and F-contiguous simultaneously. The new definition
+is more accurate, allows for faster code that makes fewer unnecessary
+copies, and simplifies numpy's code internally. However, it may also
+break third-party libraries that make too-strong assumptions about the
+stride values of C- and F-contiguous arrays. (It is also currently
+known that this breaks Cython code using memoryviews, which will be
+fixed in Cython.) THIS WILL BECOME THE DEFAULT IN A FUTURE RELEASE, SO
+PLEASE TEST YOUR CODE NOW AGAINST NUMPY BUILT WITH::
+
+ NPY_RELAXED_STRIDES_CHECKING=1 python setup.py install
+
+You can check whether NPY_RELAXED_STRIDES_CHECKING is in effect by
+running::
+
+ np.ones((10, 1), order="C").flags.f_contiguous
+
+This will be ``True`` if relaxed strides checking is enabled, and
+``False`` otherwise. The typical problem we've seen so far is C code
+that works with C-contiguous arrays, and assumes that the itemsize can
+be accessed by looking at the last element in the ``PyArray_STRIDES(arr)``
+array. When relaxed strides are in effect, this is not true (and in
+fact, it never was true in some corner cases). Instead, use
+``PyArray_ITEMSIZE(arr)``.
+
+For more information check the "Internal memory layout of an ndarray"
+section in the documentation.
+
+Binary operations with non-arrays as second argument
+----------------------------------------------------
+Binary operations of the form ``<array-or-subclass> * <non-array-subclass>``
+where ``<non-array-subclass>`` declares an ``__array_priority__`` higher than
+that of ``<array-or-subclass>`` will now unconditionally return
+*NotImplemented*, giving ``<non-array-subclass>`` a chance to handle the
+operation. Previously, `NotImplemented` would only be returned if
+``<non-array-subclass>`` actually implemented the reversed operation, and after
+a (potentially expensive) array conversion of ``<non-array-subclass>`` had been
+attempted. (`bug <https://github.com/numpy/numpy/issues/3375>`_, `pull request
+<https://github.com/numpy/numpy/pull/3501>`_)
+
+Function `median` used with `overwrite_input` only partially sorts array
+------------------------------------------------------------------------
+If `median` is used with `overwrite_input` option the input array will now only
+be partially sorted instead of fully sorted.
+
+Fix to financial.npv
+--------------------
+The npv function had a bug. Contrary to what the documentation stated, it
+summed from indexes ``1`` to ``M`` instead of from ``0`` to ``M - 1``. The
+fix changes the returned value. The mirr function called the npv function,
+but worked around the problem, so that was also fixed and the return value
+of the mirr function remains unchanged.
+
+Runtime warnings when comparing NaN numbers
+-------------------------------------------
+Comparing ``NaN`` floating point numbers now raises the ``invalid`` runtime
+warning. If a ``NaN`` is expected the warning can be ignored using np.errstate.
+E.g.::
+
+ with np.errstate(invalid='ignore'):
+ operation()
+
+
+New Features
+============
+
+
+Support for linear algebra on stacked arrays
+--------------------------------------------
+The gufunc machinery is now used for np.linalg, allowing operations on
+stacked arrays and vectors. For example::
+
+ >>> a
+ array([[[ 1., 1.],
+ [ 0., 1.]],
+
+ [[ 1., 1.],
+ [ 0., 1.]]])
+
+ >>> np.linalg.inv(a)
+ array([[[ 1., -1.],
+ [ 0., 1.]],
+
+ [[ 1., -1.],
+ [ 0., 1.]]])
+
+In place fancy indexing for ufuncs
+----------------------------------
+The function ``at`` has been added to ufunc objects to allow in place
+ufuncs with no buffering when fancy indexing is used. For example, the
+following will increment the first and second items in the array, and will
+increment the third item twice: ``numpy.add.at(arr, [0, 1, 2, 2], 1)``
+
+This is what many have mistakenly thought ``arr[[0, 1, 2, 2]] += 1`` would do,
+but that does not work as the incremented value of ``arr[2]`` is simply copied
+into the third slot in ``arr`` twice, not incremented twice.
+
+New functions `partition` and `argpartition`
+--------------------------------------------
+New functions to partially sort arrays via a selection algorithm.
+
+A ``partition`` by index ``k`` moves the ``k`` smallest element to the front of
+an array. All elements before ``k`` are then smaller or equal than the value
+in position ``k`` and all elements following ``k`` are then greater or equal
+than the value in position ``k``. The ordering of the values within these
+bounds is undefined.
+A sequence of indices can be provided to sort all of them into their sorted
+position at once iterative partitioning.
+This can be used to efficiently obtain order statistics like median or
+percentiles of samples.
+``partition`` has a linear time complexity of ``O(n)`` while a full sort has
+``O(n log(n))``.
+
+New functions `nanmean`, `nanvar` and `nanstd`
+----------------------------------------------
+New nan aware statistical functions are added. In these functions the
+results are what would be obtained if nan values were omitted from all
+computations.
+
+New functions `full` and `full_like`
+------------------------------------
+New convenience functions to create arrays filled with a specific value;
+complementary to the existing `zeros` and `zeros_like` functions.
+
+IO compatibility with large files
+---------------------------------
+Large NPZ files >2GB can be loaded on 64-bit systems.
+
+Building against OpenBLAS
+-------------------------
+It is now possible to build numpy against OpenBLAS by editing site.cfg.
+
+New constant
+------------
+Euler's constant is now exposed in numpy as euler_gamma.
+
+New modes for qr
+----------------
+New modes 'complete', 'reduced', and 'raw' have been added to the qr
+factorization and the old 'full' and 'economic' modes are deprecated.
+The 'reduced' mode replaces the old 'full' mode and is the default as was
+the 'full' mode, so backward compatibility can be maintained by not
+specifying the mode.
+
+The 'complete' mode returns a full dimensional factorization, which can be
+useful for obtaining a basis for the orthogonal complement of the range
+space. The 'raw' mode returns arrays that contain the Householder
+reflectors and scaling factors that can be used in the future to apply q
+without needing to convert to a matrix. The 'economic' mode is simply
+deprecated, there isn't much use for it and it isn't any more efficient
+than the 'raw' mode.
+
+New `invert` argument to `in1d`
+-------------------------------
+The function `in1d` now accepts a `invert` argument which, when `True`,
+causes the returned array to be inverted.
+
+Advanced indexing using `np.newaxis`
+------------------------------------
+It is now possible to use `np.newaxis`/`None` together with index
+arrays instead of only in simple indices. This means that
+``array[np.newaxis, [0, 1]]`` will now work as expected and select the first
+two rows while prepending a new axis to the array.
+
+
+C-API
+-----
+New ufuncs can now be registered with builtin input types and a custom
+output type. Before this change, NumPy wouldn't be able to find the right
+ufunc loop function when the ufunc was called from Python, because the ufunc
+loop signature matching logic wasn't looking at the output operand type.
+Now the correct ufunc loop is found, as long as the user provides an output
+argument with the correct output type.
+
+runtests.py
+-----------
+A simple test runner script ``runtests.py`` was added. It also builds Numpy via
+``setup.py build`` and can be used to run tests easily during development.
+
+
+Improvements
+============
+
+IO performance improvements
+---------------------------
+Performance in reading large files was improved by chunking (see also IO compatibility).
+
+Performance improvements to `pad`
+---------------------------------
+The `pad` function has a new implementation, greatly improving performance for
+all inputs except `mode=<function>` (retained for backwards compatibility).
+Scaling with dimensionality is dramatically improved for rank >= 4.
+
+Performance improvements to `isnan`, `isinf`, `isfinite` and `byteswap`
+-----------------------------------------------------------------------
+`isnan`, `isinf`, `isfinite` and `byteswap` have been improved to take
+advantage of compiler builtins to avoid expensive calls to libc.
+This improves performance of these operations by about a factor of two on gnu
+libc systems.
+
+Performance improvements via SSE2 vectorization
+-----------------------------------------------
+Several functions have been optimized to make use of SSE2 CPU SIMD instructions.
+
+* Float32 and float64:
+ * base math (`add`, `subtract`, `divide`, `multiply`)
+ * `sqrt`
+ * `minimum/maximum`
+ * `absolute`
+* Bool:
+ * `logical_or`
+ * `logical_and`
+ * `logical_not`
+
+This improves performance of these operations up to 4x/2x for float32/float64
+and up to 10x for bool depending on the location of the data in the CPU caches.
+The performance gain is greatest for in-place operations.
+
+In order to use the improved functions the SSE2 instruction set must be enabled
+at compile time. It is enabled by default on x86_64 systems. On x86_32 with a
+capable CPU it must be enabled by passing the appropriate flag to the CFLAGS
+build variable (-msse2 with gcc).
+
+Performance improvements to `median`
+------------------------------------
+`median` is now implemented in terms of `partition` instead of `sort` which
+reduces its time complexity from O(n log(n)) to O(n).
+If used with the `overwrite_input` option the array will now only be partially
+sorted instead of fully sorted.
+
+
+Overrideable operand flags in ufunc C-API
+-----------------------------------------
+When creating a ufunc, the default ufunc operand flags can be overridden
+via the new op_flags attribute of the ufunc object. For example, to set
+the operand flag for the first input to read/write:
+
+PyObject \*ufunc = PyUFunc_FromFuncAndData(...);
+ufunc->op_flags[0] = NPY_ITER_READWRITE;
+
+This allows a ufunc to perform an operation in place. Also, global nditer flags
+can be overridden via the new iter_flags attribute of the ufunc object.
+For example, to set the reduce flag for a ufunc:
+
+ufunc->iter_flags = NPY_ITER_REDUCE_OK;
+
+
+Changes
+=======
+
+
+General
+-------
+The function np.take now allows 0-d arrays as indices.
+
+The separate compilation mode is now enabled by default.
+
+Several changes to np.insert and np.delete:
+
+* Previously, negative indices and indices that pointed past the end of
+ the array were simply ignored. Now, this will raise a Future or Deprecation
+ Warning. In the future they will be treated like normal indexing treats
+ them -- negative indices will wrap around, and out-of-bound indices will
+ generate an error.
+* Previously, boolean indices were treated as if they were integers (always
+ referring to either the 0th or 1st item in the array). In the future, they
+ will be treated as masks. In this release, they raise a FutureWarning
+ warning of this coming change.
+* In Numpy 1.7. np.insert already allowed the syntax
+ `np.insert(arr, 3, [1,2,3])` to insert multiple items at a single position.
+ In Numpy 1.8. this is also possible for `np.insert(arr, [3], [1, 2, 3])`.
+
+Padded regions from np.pad are now correctly rounded, not truncated.
+
+C-API Array Additions
+---------------------
+Four new functions have been added to the array C-API.
+
+* PyArray_Partition
+* PyArray_ArgPartition
+* PyArray_SelectkindConverter
+* PyDataMem_NEW_ZEROED
+
+C-API Ufunc Additions
+---------------------
+One new function has been added to the ufunc C-API that allows to register
+an inner loop for user types using the descr.
+
+* PyUFunc_RegisterLoopForDescr
+
+C-API Developer Improvements
+----------------------------
+The ``PyArray_Type`` instance creation function ``tp_new`` now
+uses ``tp_basicsize`` to determine how much memory to allocate.
+In previous releases only ``sizeof(PyArrayObject)`` bytes of
+memory were allocated, often requiring C-API subtypes to
+reimplement ``tp_new``.
+
+Deprecations
+============
+
+The 'full' and 'economic' modes of qr factorization are deprecated.
+
+General
+-------
+The use of non-integer for indices and most integer arguments has been
+deprecated. Previously float indices and function arguments such as axes or
+shapes were truncated to integers without warning. For example
+`arr.reshape(3., -1)` or `arr[0.]` will trigger a deprecation warning in
+NumPy 1.8., and in some future version of NumPy they will raise an error.
+
+
+Authors
+=======
+
+This release contains work by the following people who contributed at least
+one patch to this release. The names are in alphabetical order by first name:
+
+* 87
+* Adam Ginsburg +
+* Adam Griffiths +
+* Alexander Belopolsky +
+* Alex Barth +
+* Alex Ford +
+* Andreas Hilboll +
+* Andreas Kloeckner +
+* Andreas Schwab +
+* Andrew Horton +
+* argriffing +
+* Arink Verma +
+* Bago Amirbekian +
+* Bartosz Telenczuk +
+* bebert218 +
+* Benjamin Root +
+* Bill Spotz +
+* Bradley M. Froehle
+* Carwyn Pelley +
+* Charles Harris
+* Chris
+* Christian Brueffer +
+* Christoph Dann +
+* Christoph Gohlke
+* Dan Hipschman +
+* Daniel +
+* Dan Miller +
+* daveydave400 +
+* David Cournapeau
+* David Warde-Farley
+* Denis Laxalde
+* dmuellner +
+* Edward Catmur +
+* Egor Zindy +
+* endolith
+* Eric Firing
+* Eric Fode
+* Eric Moore +
+* Eric Price +
+* Fazlul Shahriar +
+* Félix Hartmann +
+* Fernando Perez
+* Frank B +
+* Frank Breitling +
+* Frederic
+* Gabriel
+* GaelVaroquaux
+* Guillaume Gay +
+* Han Genuit
+* HaroldMills +
+* hklemm +
+* jamestwebber +
+* Jason Madden +
+* Jay Bourque
+* jeromekelleher +
+* Jesús Gómez +
+* jmozmoz +
+* jnothman +
+* Johannes Schönberger +
+* John Benediktsson +
+* John Salvatier +
+* John Stechschulte +
+* Jonathan Waltman +
+* Joon Ro +
+* Jos de Kloe +
+* Joseph Martinot-Lagarde +
+* Josh Warner (Mac) +
+* Jostein Bø Fløystad +
+* Juan Luis Cano RodrÃguez +
+* Julian Taylor +
+* Julien Phalip +
+* K.-Michael Aye +
+* Kumar Appaiah +
+* Lars Buitinck
+* Leon Weber +
+* Luis Pedro Coelho
+* Marcin Juszkiewicz
+* Mark Wiebe
+* Marten van Kerkwijk +
+* Martin Baeuml +
+* Martin Spacek
+* Martin Teichmann +
+* Matt Davis +
+* Matthew Brett
+* Maximilian Albert +
+* m-d-w +
+* Michael Droettboom
+* mwtoews +
+* Nathaniel J. Smith
+* Nicolas Scheffer +
+* Nils Werner +
+* ochoadavid +
+* OndÅ™ej ÄŒertÃk
+* ovillellas +
+* Paul Ivanov
+* Pauli Virtanen
+* peterjc
+* Ralf Gommers
+* Raul Cota +
+* Richard Hattersley +
+* Robert Costa +
+* Robert Kern
+* Rob Ruana +
+* Ronan Lamy
+* Sandro Tosi
+* Sascha Peilicke +
+* Sebastian Berg
+* Skipper Seabold
+* Stefan van der Walt
+* Steve +
+* Takafumi Arakaki +
+* Thomas Robitaille +
+* Tomas Tomecek +
+* Travis E. Oliphant
+* Valentin Haenel
+* Vladimir Rutsky +
+* Warren Weckesser
+* Yaroslav Halchenko
+* Yury V. Zaytsev +
+
+A total of 119 people contributed to this release.
+People with a "+" by their names contributed a patch for the first time.
--- /dev/null
+=========================
+NumPy 1.8.1 Release Notes
+=========================
+
+This is a bugfix only release in the 1.8.x series.
+
+
+Issues fixed
+============
+
+* gh-4276: Fix mean, var, std methods for object arrays
+* gh-4262: remove insecure mktemp usage
+* gh-2385: absolute(complex(inf)) raises invalid warning in python3
+* gh-4024: Sequence assignment doesn't raise exception on shape mismatch
+* gh-4027: Fix chunked reading of strings longer than BUFFERSIZE
+* gh-4109: Fix object scalar return type of 0-d array indices
+* gh-4018: fix missing check for memory allocation failure in ufuncs
+* gh-4156: high order linalg.norm discards imaginary elements of complex arrays
+* gh-4144: linalg: norm fails on longdouble, signed int
+* gh-4094: fix NaT handling in _strided_to_strided_string_to_datetime
+* gh-4051: fix uninitialized use in _strided_to_strided_string_to_datetime
+* gh-4093: Loading compressed .npz file fails under Python 2.6.6
+* gh-4138: segfault with non-native endian memoryview in python 3.4
+* gh-4123: Fix missing NULL check in lexsort
+* gh-4170: fix native-only long long check in memoryviews
+* gh-4187: Fix large file support on 32 bit
+* gh-4152: fromfile: ensure file handle positions are in sync in python3
+* gh-4176: clang compatibility: Typos in conversion_utils
+* gh-4223: Fetching a non-integer item caused array return
+* gh-4197: fix minor memory leak in memoryview failure case
+* gh-4206: fix build with single-threaded python
+* gh-4220: add versionadded:: 1.8.0 to ufunc.at docstring
+* gh-4267: improve handling of memory allocation failure
+* gh-4267: fix use of capi without gil in ufunc.at
+* gh-4261: Detect vendor versions of GNU Compilers
+* gh-4253: IRR was returning nan instead of valid negative answer
+* gh-4254: fix unnecessary byte order flag change for byte arrays
+* gh-3263: numpy.random.shuffle clobbers mask of a MaskedArray
+* gh-4270: np.random.shuffle not work with flexible dtypes
+* gh-3173: Segmentation fault when 'size' argument to random.multinomial
+* gh-2799: allow using unique with lists of complex
+* gh-3504: fix linspace truncation for integer array scalar
+* gh-4191: get_info('openblas') does not read libraries key
+* gh-3348: Access violation in _descriptor_from_pep3118_format
+* gh-3175: segmentation fault with numpy.array() from bytearray
+* gh-4266: histogramdd - wrong result for entries very close to last boundary
+* gh-4408: Fix stride_stricks.as_strided function for object arrays
+* gh-4225: fix log1p and exmp1 return for np.inf on windows compiler builds
+* gh-4359: Fix infinite recursion in str.format of flex arrays
+* gh-4145: Incorrect shape of broadcast result with the exponent operator
+* gh-4483: Fix commutativity of {dot,multiply,inner}(scalar, matrix_of_objs)
+* gh-4466: Delay npyiter size check when size may change
+* gh-4485: Buffered stride was erroneously marked fixed
+* gh-4354: byte_bounds fails with datetime dtypes
+* gh-4486: segfault/error converting from/to high-precision datetime64 objects
+* gh-4428: einsum(None, None, None, None) causes segfault
+* gh-4134: uninitialized use for for size 1 object reductions
+
+Changes
+=======
+
+NDIter
+------
+When ``NpyIter_RemoveAxis`` is now called, the iterator range will be reset.
+
+When a multi index is being tracked and an iterator is not buffered, it is
+possible to use ``NpyIter_RemoveAxis``. In this case an iterator can shrink
+in size. Because the total size of an iterator is limited, the iterator
+may be too large before these calls. In this case its size will be set to ``-1``
+and an error issued not at construction time but when removing the multi
+index, setting the iterator range, or getting the next function.
+
+This has no effect on currently working code, but highlights the necessity
+of checking for an error return if these conditions can occur. In most
+cases the arrays being iterated are as large as the iterator so that such
+a problem cannot occur.
+
+Optional reduced verbosity for np.distutils
+-------------------------------------------
+Set ``numpy.distutils.system_info.system_info.verbosity = 0`` and then
+calls to ``numpy.distutils.system_info.get_info('blas_opt')`` will not
+print anything on the output. This is mostly for other packages using
+numpy.distutils.
+
+Deprecations
+============
+
+C-API
+-----
+
+The utility function npy_PyFile_Dup and npy_PyFile_DupClose are broken by the
+internal buffering python 3 applies to its file objects.
+To fix this two new functions npy_PyFile_Dup2 and npy_PyFile_DupClose2 are
+declared in npy_3kcompat.h and the old functions are deprecated.
+Due to the fragile nature of these functions it is recommended to instead use
+the python API when possible.
--- /dev/null
+=========================
+NumPy 1.8.2 Release Notes
+=========================
+
+This is a bugfix only release in the 1.8.x series.
+
+Issues fixed
+============
+
+* gh-4836: partition produces wrong results for multiple selections in equal ranges
+* gh-4656: Make fftpack._raw_fft threadsafe
+* gh-4628: incorrect argument order to _copyto in in np.nanmax, np.nanmin
+* gh-4642: Hold GIL for converting dtypes types with fields
+* gh-4733: fix np.linalg.svd(b, compute_uv=False)
+* gh-4853: avoid unaligned simd load on reductions on i386
+* gh-4722: Fix seg fault converting empty string to object
+* gh-4613: Fix lack of NULL check in array_richcompare
+* gh-4774: avoid unaligned access for strided byteswap
+* gh-650: Prevent division by zero when creating arrays from some buffers
+* gh-4602: ifort has issues with optimization flag O2, use O1
--- /dev/null
+=========================
+NumPy 1.9.0 Release Notes
+=========================
+
+This release supports Python 2.6 - 2.7 and 3.2 - 3.4.
+
+
+Highlights
+==========
+* Numerous performance improvements in various areas, most notably indexing and
+ operations on small arrays are significantly faster.
+ Indexing operations now also release the GIL.
+* Addition of `nanmedian` and `nanpercentile` rounds out the nanfunction set.
+
+
+Dropped Support
+===============
+
+* The oldnumeric and numarray modules have been removed.
+* The doc/pyrex and doc/cython directories have been removed.
+* The doc/numpybook directory has been removed.
+* The numpy/testing/numpytest.py file has been removed together with
+ the importall function it contained.
+
+
+Future Changes
+==============
+
+* The numpy/polynomial/polytemplate.py file will be removed in NumPy 1.10.0.
+* Default casting for inplace operations will change to 'same_kind' in
+ Numpy 1.10.0. This will certainly break some code that is currently
+ ignoring the warning.
+* Relaxed stride checking will be the default in 1.10.0
+* String version checks will break because, e.g., '1.9' > '1.10' is True. A
+ NumpyVersion class has been added that can be used for such comparisons.
+* The diagonal and diag functions will return writeable views in 1.10.0
+* The `S` and/or `a` dtypes may be changed to represent Python strings
+ instead of bytes, in Python 3 these two types are very different.
+
+
+Compatibility notes
+===================
+
+The diagonal and diag functions return readonly views.
+------------------------------------------------------
+In NumPy 1.8, the diagonal and diag functions returned readonly copies, in
+NumPy 1.9 they return readonly views, and in 1.10 they will return writeable
+views.
+
+Special scalar float values don't cause upcast to double anymore
+----------------------------------------------------------------
+In previous numpy versions operations involving floating point scalars
+containing special values ``NaN``, ``Inf`` and ``-Inf`` caused the result
+type to be at least ``float64``. As the special values can be represented
+in the smallest available floating point type, the upcast is not performed
+anymore.
+
+For example the dtype of:
+
+ ``np.array([1.], dtype=np.float32) * float('nan')``
+
+now remains ``float32`` instead of being cast to ``float64``.
+Operations involving non-special values have not been changed.
+
+Percentile output changes
+-------------------------
+If given more than one percentile to compute numpy.percentile returns an
+array instead of a list. A single percentile still returns a scalar. The
+array is equivalent to converting the list returned in older versions
+to an array via ``np.array``.
+
+If the ``overwrite_input`` option is used the input is only partially
+instead of fully sorted.
+
+ndarray.tofile exception type
+-----------------------------
+All ``tofile`` exceptions are now ``IOError``, some were previously
+``ValueError``.
+
+Invalid fill value exceptions
+-----------------------------
+Two changes to numpy.ma.core._check_fill_value:
+
+* When the fill value is a string and the array type is not one of
+ 'OSUV', TypeError is raised instead of the default fill value being used.
+
+* When the fill value overflows the array type, TypeError is raised instead
+ of OverflowError.
+
+Polynomial Classes no longer derived from PolyBase
+--------------------------------------------------
+This may cause problems with folks who depended on the polynomial classes
+being derived from PolyBase. They are now all derived from the abstract
+base class ABCPolyBase. Strictly speaking, there should be a deprecation
+involved, but no external code making use of the old baseclass could be
+found.
+
+Using numpy.random.binomial may change the RNG state vs. numpy < 1.9
+--------------------------------------------------------------------
+A bug in one of the algorithms to generate a binomial random variate has
+been fixed. This change will likely alter the number of random draws
+performed, and hence the sequence location will be different after a
+call to distribution.c::rk_binomial_btpe. Any tests which rely on the RNG
+being in a known state should be checked and/or updated as a result.
+
+Random seed enforced to be a 32 bit unsigned integer
+----------------------------------------------------
+``np.random.seed`` and ``np.random.RandomState`` now throw a ``ValueError``
+if the seed cannot safely be converted to 32 bit unsigned integers.
+Applications that now fail can be fixed by masking the higher 32 bit values to
+zero: ``seed = seed & 0xFFFFFFFF``. This is what is done silently in older
+versions so the random stream remains the same.
+
+Argmin and argmax out argument
+------------------------------
+The ``out`` argument to ``np.argmin`` and ``np.argmax`` and their
+equivalent C-API functions is now checked to match the desired output shape
+exactly. If the check fails a ``ValueError`` instead of ``TypeError`` is
+raised.
+
+Einsum
+------
+Remove unnecessary broadcasting notation restrictions.
+``np.einsum('ijk,j->ijk', A, B)`` can also be written as
+``np.einsum('ij...,j->ij...', A, B)`` (ellipsis is no longer required on 'j')
+
+Indexing
+--------
+
+The NumPy indexing has seen a complete rewrite in this version. This makes
+most advanced integer indexing operations much faster and should have no
+other implications. However some subtle changes and deprecations were
+introduced in advanced indexing operations:
+
+* Boolean indexing into scalar arrays will always return a new 1-d array.
+ This means that ``array(1)[array(True)]`` gives ``array([1])`` and
+ not the original array.
+
+* Advanced indexing into one dimensional arrays used to have
+ (undocumented) special handling regarding repeating the value array in
+ assignments when the shape of the value array was too small or did not
+ match. Code using this will raise an error. For compatibility you can
+ use ``arr.flat[index] = values``, which uses the old code branch. (for
+ example ``a = np.ones(10); a[np.arange(10)] = [1, 2, 3]``)
+
+* The iteration order over advanced indexes used to be always C-order.
+ In NumPy 1.9. the iteration order adapts to the inputs and is not
+ guaranteed (with the exception of a *single* advanced index which is
+ never reversed for compatibility reasons). This means that the result
+ is undefined if multiple values are assigned to the same element. An
+ example for this is ``arr[[0, 0], [1, 1]] = [1, 2]``, which may set
+ ``arr[0, 1]`` to either 1 or 2.
+
+* Equivalent to the iteration order, the memory layout of the advanced
+ indexing result is adapted for faster indexing and cannot be predicted.
+
+* All indexing operations return a view or a copy. No indexing operation
+ will return the original array object. (For example ``arr[...]``)
+
+* In the future Boolean array-likes (such as lists of python bools) will
+ always be treated as Boolean indexes and Boolean scalars (including
+ python ``True``) will be a legal *boolean* index. At this time, this is
+ already the case for scalar arrays to allow the general
+ ``positive = a[a > 0]`` to work when ``a`` is zero dimensional.
+
+* In NumPy 1.8 it was possible to use ``array(True)`` and
+ ``array(False)`` equivalent to 1 and 0 if the result of the operation
+ was a scalar. This will raise an error in NumPy 1.9 and, as noted
+ above, treated as a boolean index in the future.
+
+* All non-integer array-likes are deprecated, object arrays of custom
+ integer like objects may have to be cast explicitly.
+
+* The error reporting for advanced indexing is more informative, however
+ the error type has changed in some cases. (Broadcasting errors of
+ indexing arrays are reported as ``IndexError``)
+
+* Indexing with more then one ellipsis (``...``) is deprecated.
+
+Non-integer reduction axis indexes are deprecated
+-------------------------------------------------
+Non-integer axis indexes to reduction ufuncs like `add.reduce` or `sum` are
+deprecated.
+
+``promote_types`` and string dtype
+----------------------------------
+``promote_types`` function now returns a valid string length when given an
+integer or float dtype as one argument and a string dtype as another
+argument. Previously it always returned the input string dtype, even if it
+wasn't long enough to store the max integer/float value converted to a
+string.
+
+``can_cast`` and string dtype
+-----------------------------
+``can_cast`` function now returns False in "safe" casting mode for
+integer/float dtype and string dtype if the string dtype length is not long
+enough to store the max integer/float value converted to a string.
+Previously ``can_cast`` in "safe" mode returned True for integer/float
+dtype and a string dtype of any length.
+
+astype and string dtype
+-----------------------
+The ``astype`` method now returns an error if the string dtype to cast to
+is not long enough in "safe" casting mode to hold the max value of
+integer/float array that is being casted. Previously the casting was
+allowed even if the result was truncated.
+
+`npyio.recfromcsv` keyword arguments change
+-------------------------------------------
+`npyio.recfromcsv` no longer accepts the undocumented `update` keyword,
+which used to override the `dtype` keyword.
+
+The ``doc/swig`` directory moved
+--------------------------------
+The ``doc/swig`` directory has been moved to ``tools/swig``.
+
+The ``npy_3kcompat.h`` header changed
+-------------------------------------
+The unused ``simple_capsule_dtor`` function has been removed from
+``npy_3kcompat.h``. Note that this header is not meant to be used outside
+of numpy; other projects should be using their own copy of this file when
+needed.
+
+Negative indices in C-Api ``sq_item`` and ``sq_ass_item`` sequence methods
+--------------------------------------------------------------------------
+When directly accessing the ``sq_item`` or ``sq_ass_item`` PyObject slots
+for item getting, negative indices will not be supported anymore.
+``PySequence_GetItem`` and ``PySequence_SetItem`` however fix negative
+indices so that they can be used there.
+
+NDIter
+------
+When ``NpyIter_RemoveAxis`` is now called, the iterator range will be reset.
+
+When a multi index is being tracked and an iterator is not buffered, it is
+possible to use ``NpyIter_RemoveAxis``. In this case an iterator can shrink
+in size. Because the total size of an iterator is limited, the iterator
+may be too large before these calls. In this case its size will be set to ``-1``
+and an error issued not at construction time but when removing the multi
+index, setting the iterator range, or getting the next function.
+
+This has no effect on currently working code, but highlights the necessity
+of checking for an error return if these conditions can occur. In most
+cases the arrays being iterated are as large as the iterator so that such
+a problem cannot occur.
+
+This change was already applied to the 1.8.1 release.
+
+``zeros_like`` for string dtypes now returns empty strings
+----------------------------------------------------------
+To match the `zeros` function `zeros_like` now returns an array initialized
+with empty strings instead of an array filled with `'0'`.
+
+
+New Features
+============
+
+Percentile supports more interpolation options
+----------------------------------------------
+``np.percentile`` now has the interpolation keyword argument to specify in
+which way points should be interpolated if the percentiles fall between two
+values. See the documentation for the available options.
+
+Generalized axis support for median and percentile
+--------------------------------------------------
+``np.median`` and ``np.percentile`` now support generalized axis arguments like
+ufunc reductions do since 1.7. One can now say axis=(index, index) to pick a
+list of axes for the reduction. The ``keepdims`` keyword argument was also
+added to allow convenient broadcasting to arrays of the original shape.
+
+Dtype parameter added to ``np.linspace`` and ``np.logspace``
+------------------------------------------------------------
+The returned data type from the ``linspace`` and ``logspace`` functions can
+now be specified using the dtype parameter.
+
+More general ``np.triu`` and ``np.tril`` broadcasting
+-----------------------------------------------------
+For arrays with ``ndim`` exceeding 2, these functions will now apply to the
+final two axes instead of raising an exception.
+
+``tobytes`` alias for ``tostring`` method
+-----------------------------------------
+``ndarray.tobytes`` and ``MaskedArray.tobytes`` have been added as aliases
+for ``tostring`` which exports arrays as ``bytes``. This is more consistent
+in Python 3 where ``str`` and ``bytes`` are not the same.
+
+Build system
+------------
+Added experimental support for the ppc64le and OpenRISC architecture.
+
+Compatibility to python ``numbers`` module
+------------------------------------------
+All numerical numpy types are now registered with the type hierarchy in
+the python ``numbers`` module.
+
+``increasing`` parameter added to ``np.vander``
+-----------------------------------------------
+The ordering of the columns of the Vandermonde matrix can be specified with
+this new boolean argument.
+
+``unique_counts`` parameter added to ``np.unique``
+--------------------------------------------------
+The number of times each unique item comes up in the input can now be
+obtained as an optional return value.
+
+Support for median and percentile in nanfunctions
+-------------------------------------------------
+The ``np.nanmedian`` and ``np.nanpercentile`` functions behave like
+the median and percentile functions except that NaNs are ignored.
+
+NumpyVersion class added
+------------------------
+The class may be imported from numpy.lib and can be used for version
+comparison when the numpy version goes to 1.10.devel. For example::
+
+ >>> from numpy.lib import NumpyVersion
+ >>> if NumpyVersion(np.__version__) < '1.10.0'):
+ ... print('Wow, that is an old NumPy version!')
+
+Allow saving arrays with large number of named columns
+------------------------------------------------------
+The numpy storage format 1.0 only allowed the array header to have a total size
+of 65535 bytes. This can be exceeded by structured arrays with a large number
+of columns. A new format 2.0 has been added which extends the header size to 4
+GiB. `np.save` will automatically save in 2.0 format if the data requires it,
+else it will always use the more compatible 1.0 format.
+
+Full broadcasting support for ``np.cross``
+------------------------------------------
+``np.cross`` now properly broadcasts its two input arrays, even if they
+have different number of dimensions. In earlier versions this would result
+in either an error being raised, or wrong results computed.
+
+
+Improvements
+============
+
+Better numerical stability for sum in some cases
+------------------------------------------------
+Pairwise summation is now used in the sum method, but only along the fast
+axis and for groups of the values <= 8192 in length. This should also
+improve the accuracy of var and std in some common cases.
+
+Percentile implemented in terms of ``np.partition``
+---------------------------------------------------
+``np.percentile`` has been implemented in terms of ``np.partition`` which
+only partially sorts the data via a selection algorithm. This improves the
+time complexity from ``O(nlog(n))`` to ``O(n)``.
+
+Performance improvement for ``np.array``
+----------------------------------------
+The performance of converting lists containing arrays to arrays using
+``np.array`` has been improved. It is now equivalent in speed to
+``np.vstack(list)``.
+
+Performance improvement for ``np.searchsorted``
+-----------------------------------------------
+For the built-in numeric types, ``np.searchsorted`` no longer relies on the
+data type's ``compare`` function to perform the search, but is now
+implemented by type specific functions. Depending on the size of the
+inputs, this can result in performance improvements over 2x.
+
+Optional reduced verbosity for np.distutils
+-------------------------------------------
+Set ``numpy.distutils.system_info.system_info.verbosity = 0`` and then
+calls to ``numpy.distutils.system_info.get_info('blas_opt')`` will not
+print anything on the output. This is mostly for other packages using
+numpy.distutils.
+
+Covariance check in ``np.random.multivariate_normal``
+-----------------------------------------------------
+A ``RuntimeWarning`` warning is raised when the covariance matrix is not
+positive-semidefinite.
+
+Polynomial Classes no longer template based
+-------------------------------------------
+The polynomial classes have been refactored to use an abstract base class
+rather than a template in order to implement a common interface. This makes
+importing the polynomial package faster as the classes do not need to be
+compiled on import.
+
+More GIL releases
+-----------------
+Several more functions now release the Global Interpreter Lock allowing more
+efficient parallelization using the ``threading`` module. Most notably the GIL is
+now released for fancy indexing, ``np.where`` and the ``random`` module now
+uses a per-state lock instead of the GIL.
+
+MaskedArray support for more complicated base classes
+-----------------------------------------------------
+Built-in assumptions that the baseclass behaved like a plain array are being
+removed. In particalur, ``repr`` and ``str`` should now work more reliably.
+
+
+C-API
+-----
+
+
+Deprecations
+============
+
+Non-integer scalars for sequence repetition
+-------------------------------------------
+Using non-integer numpy scalars to repeat python sequences is deprecated.
+For example ``np.float_(2) * [1]`` will be an error in the future.
+
+``select`` input deprecations
+-----------------------------
+The integer and empty input to ``select`` is deprecated. In the future only
+boolean arrays will be valid conditions and an empty ``condlist`` will be
+considered an input error instead of returning the default.
+
+``rank`` function
+-----------------
+The ``rank`` function has been deprecated to avoid confusion with
+``numpy.linalg.matrix_rank``.
+
+Object array equality comparisons
+---------------------------------
+In the future object array comparisons both `==` and `np.equal` will not
+make use of identity checks anymore. For example:
+
+>>> a = np.array([np.array([1, 2, 3]), 1])
+>>> b = np.array([np.array([1, 2, 3]), 1])
+>>> a == b
+
+will consistently return False (and in the future an error) even if the array
+in `a` and `b` was the same object.
+
+The equality operator `==` will in the future raise errors like `np.equal`
+if broadcasting or element comparisons, etc. fails.
+
+Comparison with `arr == None` will in the future do an elementwise comparison
+instead of just returning False. Code should be using `arr is None`.
+
+All of these changes will give Deprecation- or FutureWarnings at this time.
+
+C-API
+-----
+
+The utility function npy_PyFile_Dup and npy_PyFile_DupClose are broken by the
+internal buffering python 3 applies to its file objects.
+To fix this two new functions npy_PyFile_Dup2 and npy_PyFile_DupClose2 are
+declared in npy_3kcompat.h and the old functions are deprecated.
+Due to the fragile nature of these functions it is recommended to instead use
+the python API when possible.
+
+This change was already applied to the 1.8.1 release.
--- /dev/null
+=========================
+NumPy 1.9.1 Release Notes
+=========================
+
+This is a bugfix only release in the 1.9.x series.
+
+Issues fixed
+============
+
+* gh-5184: restore linear edge behaviour of gradient to as it was in < 1.9.
+ The second order behaviour is available via the `edge_order` keyword
+* gh-4007: workaround Accelerate sgemv crash on OSX 10.9
+* gh-5100: restore object dtype inference from iterable objects without `len()`
+* gh-5163: avoid gcc-4.1.2 (red hat 5) miscompilation causing a crash
+* gh-5138: fix nanmedian on arrays containing inf
+* gh-5240: fix not returning out array from ufuncs with subok=False set
+* gh-5203: copy inherited masks in MaskedArray.__array_finalize__
+* gh-2317: genfromtxt did not handle filling_values=0 correctly
+* gh-5067: restore api of npy_PyFile_DupClose in python2
+* gh-5063: cannot convert invalid sequence index to tuple
+* gh-5082: Segmentation fault with argmin() on unicode arrays
+* gh-5095: don't propagate subtypes from np.where
+* gh-5104: np.inner segfaults with SciPy's sparse matrices
+* gh-5251: Issue with fromarrays not using correct format for unicode arrays
+* gh-5136: Import dummy_threading if importing threading fails
+* gh-5148: Make numpy import when run with Python flag '-OO'
+* gh-5147: Einsum double contraction in particular order causes ValueError
+* gh-479: Make f2py work with intent(in out)
+* gh-5170: Make python2 .npy files readable in python3
+* gh-5027: Use 'll' as the default length specifier for long long
+* gh-4896: fix build error with MSVC 2013 caused by C99 complex support
+* gh-4465: Make PyArray_PutTo respect writeable flag
+* gh-5225: fix crash when using arange on datetime without dtype set
+* gh-5231: fix build in c99 mode
--- /dev/null
+=========================
+NumPy 1.9.2 Release Notes
+=========================
+
+This is a bugfix only release in the 1.9.x series.
+
+Issues fixed
+============
+
+* `#5316 <https://github.com/numpy/numpy/issues/5316>`__: fix too large dtype alignment of strings and complex types
+* `#5424 <https://github.com/numpy/numpy/issues/5424>`__: fix ma.median when used on ndarrays
+* `#5481 <https://github.com/numpy/numpy/issues/5481>`__: Fix astype for structured array fields of different byte order
+* `#5354 <https://github.com/numpy/numpy/issues/5354>`__: fix segfault when clipping complex arrays
+* `#5524 <https://github.com/numpy/numpy/issues/5524>`__: allow np.argpartition on non ndarrays
+* `#5612 <https://github.com/numpy/numpy/issues/5612>`__: Fixes ndarray.fill to accept full range of uint64
+* `#5155 <https://github.com/numpy/numpy/issues/5155>`__: Fix loadtxt with comments=None and a string None data
+* `#4476 <https://github.com/numpy/numpy/issues/4476>`__: Masked array view fails if structured dtype has datetime component
+* `#5388 <https://github.com/numpy/numpy/issues/5388>`__: Make RandomState.set_state and RandomState.get_state threadsafe
+* `#5390 <https://github.com/numpy/numpy/issues/5390>`__: make seed, randint and shuffle threadsafe
+* `#5374 <https://github.com/numpy/numpy/issues/5374>`__: Fixed incorrect assert_array_almost_equal_nulp documentation
+* `#5393 <https://github.com/numpy/numpy/issues/5393>`__: Add support for ATLAS > 3.9.33.
+* `#5313 <https://github.com/numpy/numpy/issues/5313>`__: PyArray_AsCArray caused segfault for 3d arrays
+* `#5492 <https://github.com/numpy/numpy/issues/5492>`__: handle out of memory in rfftf
+* `#4181 <https://github.com/numpy/numpy/issues/4181>`__: fix a few bugs in the random.pareto docstring
+* `#5359 <https://github.com/numpy/numpy/issues/5359>`__: minor changes to linspace docstring
+* `#4723 <https://github.com/numpy/numpy/issues/4723>`__: fix a compile issues on AIX
--- /dev/null
+:orphan:
+
+==========================
+NumPy 1.xx.x Release Notes
+==========================
+
+
+Highlights
+==========
+
+
+New functions
+=============
+
+
+Deprecations
+============
+
+
+Future Changes
+==============
+
+
+Expired deprecations
+====================
+
+
+Compatibility notes
+===================
+
+
+C API changes
+=============
+
+
+New Features
+============
+
+
+Improvements
+============
+
+
+Changes
+=======
Indexing
********
-.. seealso:: :ref:`Indexing routines <routines.indexing>`
+.. seealso::
+
+ :ref:`Indexing <arrays.indexing>`
+
+ :ref:`Indexing routines <routines.indexing>`
.. automodule:: numpy.doc.indexing
==================
The only mandatory argument of :func:`~numpy.genfromtxt` is the source of
-the data. It can be a string, a list of strings, or a generator. If a
-single string is provided, it is assumed to be the name of a local or
-remote file, or an open file-like object with a :meth:`read` method, for
-example, a file or :class:`io.StringIO` object. If a list of strings
-or a generator returning strings is provided, each string is treated as one
-line in a file. When the URL of a remote file is passed, the file is
-automatically downloaded to the current directory and opened.
+the data. It can be a string, a list of strings, a generator or an open
+file-like object with a :meth:`read` method, for example, a file or
+:class:`io.StringIO` object. If a single string is provided, it is assumed
+to be the name of a local or remote file. If a list of strings or a generator
+returning strings is provided, each string is treated as one line in a file.
+When the URL of a remote file is passed, the file is automatically downloaded
+to the current directory and opened.
Recognized file types are text files and archives. Currently, the function
recognizes :class:`gzip` and :class:`bz2` (`bzip2`) archives. The type of
>>> # Without autostrip
>>> np.genfromtxt(StringIO(data), delimiter=",", dtype="|U5")
array([['1', ' abc ', ' 2'],
- ['3', ' xxx', ' 4']],
- dtype='|U5')
+ ['3', ' xxx', ' 4']], dtype='<U5')
>>> # With autostrip
>>> np.genfromtxt(StringIO(data), delimiter=",", dtype="|U5", autostrip=True)
array([['1', 'abc', '2'],
- ['3', 'xxx', '4']],
- dtype='|U5')
+ ['3', 'xxx', '4']], dtype='<U5')
The ``comments`` argument
... 9, 0
... """
>>> np.genfromtxt(StringIO(data), comments="#", delimiter=",")
- [[ 1. 2.]
- [ 3. 4.]
- [ 5. 6.]
- [ 7. 8.]
- [ 9. 0.]]
+ array([[1., 2.],
+ [3., 4.],
+ [5., 6.],
+ [7., 8.],
+ [9., 0.]])
.. versionadded:: 1.7.0
In the following example, the second column is converted from as string
representing a percentage to a float between 0 and 1::
- >>> convertfunc = lambda x: float(x.strip("%"))/100.
+ >>> convertfunc = lambda x: float(x.strip(b"%"))/100.
>>> data = u"1, 2.3%, 45.\n6, 78.9%, 0"
>>> names = ("i", "p", "n")
>>> # General case .....
>>> np.genfromtxt(StringIO(data), delimiter=",", names=names)
- array([(1.0, nan, 45.0), (6.0, nan, 0.0)],
+ array([(1., nan, 45.), (6., nan, 0.)],
dtype=[('i', '<f8'), ('p', '<f8'), ('n', '<f8')])
We need to keep in mind that by default, ``dtype=float``. A float is
To install NumPy run::
- python setup.py install
+ pip install .
To perform an in-place build that can be run from the source folder run::
*Note: for build instructions to do development work on NumPy itself, see*
:ref:`development-environment`.
+Testing
+-------
+
+Make sure to test your builds. To ensure everything stays in shape, see if all tests pass::
+
+ $ python runtests.py -v -m full
+
+For detailed info on testing, see :ref:`testing-builds`.
+
.. _parallel-builds:
Parallel builds
BLAS=None LAPACK=None ATLAS=None python setup.py build
+64-bit BLAS and LAPACK
+~~~~~~~~~~~~~~~~~~~~~~
+
+You can tell Numpy to use 64-bit BLAS/LAPACK libraries by setting the
+environment variable::
+
+ NPY_USE_BLAS_ILP64=1
+
+when building Numpy. The following 64-bit BLAS/LAPACK libraries are
+supported:
+
+1. OpenBLAS ILP64 with ``64_`` symbol suffix (``openblas64_``)
+2. OpenBLAS ILP64 without symbol suffix (``openblas_ilp64``)
+
+The order in which they are preferred is determined by
+``NPY_BLAS_ILP64_ORDER`` and ``NPY_LAPACK_ILP64_ORDER`` environment
+variables. The default value is ``openblas64_,openblas_ilp64``.
+
+.. note::
+
+ Using non-symbol-suffixed 64-bit BLAS/LAPACK in a program that also
+ uses 32-bit BLAS/LAPACK can cause crashes under certain conditions
+ (e.g. with embedded Python interpreters on Linux).
+
+ The 64-bit OpenBLAS with ``64_`` symbol suffix is obtained by
+ compiling OpenBLAS with settings::
+
+ make INTERFACE64=1 SYMBOLSUFFIX=64_
+
+ The symbol suffix avoids the symbol name clashes between 32-bit and
+ 64-bit BLAS/LAPACK libraries.
+
+
Supplying additional compiler flags
-----------------------------------
1-d loops registered to handle it separately. Also checking for
whether or not other data-types can be cast "safely" to and from this
new type or not will always return "can cast" unless you also register
-which types your new data-type can be cast to and from. Adding
-data-types is one of the less well-tested areas for NumPy 1.0, so
-there may be bugs remaining in the approach. Only add a new data-type
-if you can't do what you want to do using the OBJECT or VOID
-data-types that are already available. As an example of what I
-consider a useful application of the ability to add data-types is the
-possibility of adding a data-type of arbitrary precision floats to
-NumPy.
+which types your new data-type can be cast to and from.
+
+The NumPy source code includes an example of a custom data-type as part
+of its test suite. The file ``_rational_tests.c.src`` in the source code
+directory ``numpy/numpy/core/src/umath/`` contains an implementation of
+a data-type that represents a rational number as the ratio of two 32 bit
+integers.
.. index::
pair: dtype; adding new
static void
double_to_float(double *from, float* to, npy_intp n,
- void* ig1, void* ig2);
- while (n--) {
- (*to++) = (double) *(from++);
+ void* ignore1, void* ignore2) {
+ while (n--) {
+ (*to++) = (double) *(from++);
+ }
}
This could then be registered to convert doubles to floats using the
4. If you are writing the algorithm, then I recommend that you use the
stride information contained in the array to access the elements of
- the array (the :c:func:`PyArray_GETPTR` macros make this painless). Then,
+ the array (the :c:func:`PyArray_GetPtr` macros make this painless). Then,
you can relax your requirements so as not to force a single-segment
array and the data-copying that might result.
This flag is useful to specify an array that will be used for both
input and output. :c:func:`PyArray_ResolveWritebackIfCopy`
- must be called before :func:`Py_DECREF` at
+ must be called before :c:func:`Py_DECREF` at
the end of the interface routine to write back the temporary data
into the original array passed in. Use
of the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or
npy_intp variables, :c:func:`PyArray_STRIDES` (obj). In particular, this
c-array of integers shows how many **bytes** must be added to the
current element pointer to get to the next element in each dimension.
-For arrays less than 4-dimensions there are :c:func:`PyArray_GETPTR{k}`
+For arrays less than 4-dimensions there are ``PyArray_GETPTR{k}``
(obj, ...) macros where {k} is the integer 1, 2, 3, or 4 that make
using the array strides easier. The arguments .... represent {k} non-
negative integer indices into the array. For example, suppose ``E`` is
whether or not the striding pattern of a particular array matches the
C-style contiguous or Fortran-style contiguous or neither. Whether or
not the striding pattern matches a standard C or Fortran one can be
-tested Using :c:func:`PyArray_ISCONTIGUOUS` (obj) and
+tested Using :c:func:`PyArray_IS_C_CONTIGUOUS` (obj) and
:c:func:`PyArray_ISFORTRAN` (obj) respectively. Most third-party
libraries expect contiguous arrays. But, often it is not difficult to
support general-purpose striding. I encourage you to use the striding
Installation of the new package is easy using::
- python setup.py install
+ pip install .
assuming you have the proper permissions to write to the main site-
packages directory for the version of Python you are using. For the
objects.
1. Don't set the argtypes attribute of the function object and define an
- :obj:`_as_parameter_` method for the object you want to pass in. The
- :obj:`_as_parameter_` method must return a Python int which will be passed
+ ``_as_parameter_`` method for the object you want to pass in. The
+ ``_as_parameter_`` method must return a Python int which will be passed
directly to the function.
2. Set the argtypes attribute to a list whose entries contain objects
with a classmethod named from_param that knows how to convert your
object to an object that ctypes can understand (an int/long, string,
- unicode, or object with the :obj:`_as_parameter_` attribute).
+ unicode, or object with the ``_as_parameter_`` attribute).
NumPy uses both methods with a preference for the second method
because it can be safer. The ctypes attribute of the ndarray returns
crashes if the data-pointer to inappropriate arrays are passed in.
To implement the second method, NumPy provides the class-factory
-function :func:`ndpointer` in the :mod:`ctypeslib` module. This
+function :func:`ndpointer` in the :mod:`numpy.ctypeslib` module. This
class-factory function produces an appropriate class that can be
placed in an argtypes attribute entry of a ctypes function. The class
will contain a from_param method which ctypes will use to convert any
NumPy's main object is the homogeneous multidimensional array. It is a
table of elements (usually numbers), all of the same type, indexed by a
-tuple of positive integers. In NumPy dimensions are called *axes*.
+tuple of non-negative integers. In NumPy dimensions are called *axes*.
For example, the coordinates of a point in 3D space ``[1, 2, 1]`` has
one axis. That axis has 3 elements in it, so we say it has a length
`empty_like`,
`arange`,
`linspace`,
- `numpy.random.rand`,
- `numpy.random.randn`,
+ `numpy.random.RandomState.rand`,
+ `numpy.random.RandomState.randn`,
`fromfunction`,
`fromfile`
::
- >>> np.set_printoptions(threshold=np.nan)
+ >>> np.set_printoptions(threshold=sys.maxsize) # sys module should be imported
Basic Operations
array([[ 4., 3.],
[ 2., 8.]])
-On the other hand, the function `row_stack` is equivalent to `vstack`
+On the other hand, the function `ma.row_stack` is equivalent to `vstack`
for any input arrays.
-In general, for arrays of with more than two dimensions,
+In general, for arrays with more than two dimensions,
`hstack` stacks along their second
axes, `vstack` stacks along their
first axes, and `concatenate`
--- /dev/null
+# NumPy static imports for Cython
+#
+# If any of the PyArray_* functions are called, import_array must be
+# called first.
+#
+# This also defines backwards-compatibility buffer acquisition
+# code for use in Python 2.x (or Python <= 2.5 when NumPy starts
+# implementing PEP-3118 directly).
+#
+# Because of laziness, the format string of the buffer is statically
+# allocated. Increase the size if this is not enough, or submit a
+# patch to do this properly.
+#
+# Author: Dag Sverre Seljebotn
+#
+
+DEF _buffer_format_string_len = 255
+
+cimport cpython.buffer as pybuf
+from cpython.ref cimport Py_INCREF
+from cpython.mem cimport PyObject_Malloc, PyObject_Free
+from cpython.object cimport PyObject, PyTypeObject
+from cpython.buffer cimport PyObject_GetBuffer
+from cpython.type cimport type
+cimport libc.stdio as stdio
+
+cdef extern from "Python.h":
+ ctypedef int Py_intptr_t
+
+cdef extern from "numpy/arrayobject.h":
+ ctypedef Py_intptr_t npy_intp
+ ctypedef size_t npy_uintp
+
+ cdef enum NPY_TYPES:
+ NPY_BOOL
+ NPY_BYTE
+ NPY_UBYTE
+ NPY_SHORT
+ NPY_USHORT
+ NPY_INT
+ NPY_UINT
+ NPY_LONG
+ NPY_ULONG
+ NPY_LONGLONG
+ NPY_ULONGLONG
+ NPY_FLOAT
+ NPY_DOUBLE
+ NPY_LONGDOUBLE
+ NPY_CFLOAT
+ NPY_CDOUBLE
+ NPY_CLONGDOUBLE
+ NPY_OBJECT
+ NPY_STRING
+ NPY_UNICODE
+ NPY_VOID
+ NPY_DATETIME
+ NPY_TIMEDELTA
+ NPY_NTYPES
+ NPY_NOTYPE
+
+ NPY_INT8
+ NPY_INT16
+ NPY_INT32
+ NPY_INT64
+ NPY_INT128
+ NPY_INT256
+ NPY_UINT8
+ NPY_UINT16
+ NPY_UINT32
+ NPY_UINT64
+ NPY_UINT128
+ NPY_UINT256
+ NPY_FLOAT16
+ NPY_FLOAT32
+ NPY_FLOAT64
+ NPY_FLOAT80
+ NPY_FLOAT96
+ NPY_FLOAT128
+ NPY_FLOAT256
+ NPY_COMPLEX32
+ NPY_COMPLEX64
+ NPY_COMPLEX128
+ NPY_COMPLEX160
+ NPY_COMPLEX192
+ NPY_COMPLEX256
+ NPY_COMPLEX512
+
+ NPY_INTP
+
+ ctypedef enum NPY_ORDER:
+ NPY_ANYORDER
+ NPY_CORDER
+ NPY_FORTRANORDER
+ NPY_KEEPORDER
+
+ ctypedef enum NPY_CASTING:
+ NPY_NO_CASTING
+ NPY_EQUIV_CASTING
+ NPY_SAFE_CASTING
+ NPY_SAME_KIND_CASTING
+ NPY_UNSAFE_CASTING
+
+ ctypedef enum NPY_CLIPMODE:
+ NPY_CLIP
+ NPY_WRAP
+ NPY_RAISE
+
+ ctypedef enum NPY_SCALARKIND:
+ NPY_NOSCALAR,
+ NPY_BOOL_SCALAR,
+ NPY_INTPOS_SCALAR,
+ NPY_INTNEG_SCALAR,
+ NPY_FLOAT_SCALAR,
+ NPY_COMPLEX_SCALAR,
+ NPY_OBJECT_SCALAR
+
+ ctypedef enum NPY_SORTKIND:
+ NPY_QUICKSORT
+ NPY_HEAPSORT
+ NPY_MERGESORT
+
+ ctypedef enum NPY_SEARCHSIDE:
+ NPY_SEARCHLEFT
+ NPY_SEARCHRIGHT
+
+ enum:
+ # DEPRECATED since NumPy 1.7 ! Do not use in new code!
+ NPY_C_CONTIGUOUS
+ NPY_F_CONTIGUOUS
+ NPY_CONTIGUOUS
+ NPY_FORTRAN
+ NPY_OWNDATA
+ NPY_FORCECAST
+ NPY_ENSURECOPY
+ NPY_ENSUREARRAY
+ NPY_ELEMENTSTRIDES
+ NPY_ALIGNED
+ NPY_NOTSWAPPED
+ NPY_WRITEABLE
+ NPY_UPDATEIFCOPY
+ NPY_ARR_HAS_DESCR
+
+ NPY_BEHAVED
+ NPY_BEHAVED_NS
+ NPY_CARRAY
+ NPY_CARRAY_RO
+ NPY_FARRAY
+ NPY_FARRAY_RO
+ NPY_DEFAULT
+
+ NPY_IN_ARRAY
+ NPY_OUT_ARRAY
+ NPY_INOUT_ARRAY
+ NPY_IN_FARRAY
+ NPY_OUT_FARRAY
+ NPY_INOUT_FARRAY
+
+ NPY_UPDATE_ALL
+
+ enum:
+ # Added in NumPy 1.7 to replace the deprecated enums above.
+ NPY_ARRAY_C_CONTIGUOUS
+ NPY_ARRAY_F_CONTIGUOUS
+ NPY_ARRAY_OWNDATA
+ NPY_ARRAY_FORCECAST
+ NPY_ARRAY_ENSURECOPY
+ NPY_ARRAY_ENSUREARRAY
+ NPY_ARRAY_ELEMENTSTRIDES
+ NPY_ARRAY_ALIGNED
+ NPY_ARRAY_NOTSWAPPED
+ NPY_ARRAY_WRITEABLE
+ NPY_ARRAY_UPDATEIFCOPY
+
+ NPY_ARRAY_BEHAVED
+ NPY_ARRAY_BEHAVED_NS
+ NPY_ARRAY_CARRAY
+ NPY_ARRAY_CARRAY_RO
+ NPY_ARRAY_FARRAY
+ NPY_ARRAY_FARRAY_RO
+ NPY_ARRAY_DEFAULT
+
+ NPY_ARRAY_IN_ARRAY
+ NPY_ARRAY_OUT_ARRAY
+ NPY_ARRAY_INOUT_ARRAY
+ NPY_ARRAY_IN_FARRAY
+ NPY_ARRAY_OUT_FARRAY
+ NPY_ARRAY_INOUT_FARRAY
+
+ NPY_ARRAY_UPDATE_ALL
+
+ cdef enum:
+ NPY_MAXDIMS
+
+ npy_intp NPY_MAX_ELSIZE
+
+ ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
+
+ ctypedef struct PyArray_ArrayDescr:
+ # shape is a tuple, but Cython doesn't support "tuple shape"
+ # inside a non-PyObject declaration, so we have to declare it
+ # as just a PyObject*.
+ PyObject* shape
+
+ ctypedef struct PyArray_Descr:
+ pass
+
+ ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]:
+ # Use PyDataType_* macros when possible, however there are no macros
+ # for accessing some of the fields, so some are defined.
+ cdef PyTypeObject* typeobj
+ cdef char kind
+ cdef char type
+ # Numpy sometimes mutates this without warning (e.g. it'll
+ # sometimes change "|" to "<" in shared dtype objects on
+ # little-endian machines). If this matters to you, use
+ # PyArray_IsNativeByteOrder(dtype.byteorder) instead of
+ # directly accessing this field.
+ cdef char byteorder
+ cdef char flags
+ cdef int type_num
+ cdef int itemsize "elsize"
+ cdef int alignment
+ cdef dict fields
+ cdef tuple names
+ # Use PyDataType_HASSUBARRAY to test whether this field is
+ # valid (the pointer can be NULL). Most users should access
+ # this field via the inline helper method PyDataType_SHAPE.
+ cdef PyArray_ArrayDescr* subarray
+
+ ctypedef extern class numpy.flatiter [object PyArrayIterObject, check_size ignore]:
+ # Use through macros
+ pass
+
+ ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]:
+ cdef int numiter
+ cdef npy_intp size, index
+ cdef int nd
+ cdef npy_intp *dimensions
+ cdef void **iters
+
+ ctypedef struct PyArrayObject:
+ # For use in situations where ndarray can't replace PyArrayObject*,
+ # like PyArrayObject**.
+ pass
+
+ ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]:
+ cdef __cythonbufferdefaults__ = {"mode": "strided"}
+
+ cdef:
+ # Only taking a few of the most commonly used and stable fields.
+ # One should use PyArray_* macros instead to access the C fields.
+ char *data
+ int ndim "nd"
+ npy_intp *shape "dimensions"
+ npy_intp *strides
+ dtype descr # deprecated since NumPy 1.7 !
+ PyObject* base
+
+ # Note: This syntax (function definition in pxd files) is an
+ # experimental exception made for __getbuffer__ and __releasebuffer__
+ # -- the details of this may change.
+ def __getbuffer__(ndarray self, Py_buffer* info, int flags):
+ PyObject_GetBuffer(<object>self, info, flags);
+
+ def __releasebuffer__(ndarray self, Py_buffer* info):
+ # We should call a possible tp_bufferrelease(self, info) but no
+ # interface to that is exposed by cython or python. And currently
+ # the function is NULL in numpy, we rely on refcounting to release
+ # info when self is collected
+ pass
+
+
+ ctypedef unsigned char npy_bool
+
+ ctypedef signed char npy_byte
+ ctypedef signed short npy_short
+ ctypedef signed int npy_int
+ ctypedef signed long npy_long
+ ctypedef signed long long npy_longlong
+
+ ctypedef unsigned char npy_ubyte
+ ctypedef unsigned short npy_ushort
+ ctypedef unsigned int npy_uint
+ ctypedef unsigned long npy_ulong
+ ctypedef unsigned long long npy_ulonglong
+
+ ctypedef float npy_float
+ ctypedef double npy_double
+ ctypedef long double npy_longdouble
+
+ ctypedef signed char npy_int8
+ ctypedef signed short npy_int16
+ ctypedef signed int npy_int32
+ ctypedef signed long long npy_int64
+ ctypedef signed long long npy_int96
+ ctypedef signed long long npy_int128
+
+ ctypedef unsigned char npy_uint8
+ ctypedef unsigned short npy_uint16
+ ctypedef unsigned int npy_uint32
+ ctypedef unsigned long long npy_uint64
+ ctypedef unsigned long long npy_uint96
+ ctypedef unsigned long long npy_uint128
+
+ ctypedef float npy_float32
+ ctypedef double npy_float64
+ ctypedef long double npy_float80
+ ctypedef long double npy_float96
+ ctypedef long double npy_float128
+
+ ctypedef struct npy_cfloat:
+ double real
+ double imag
+
+ ctypedef struct npy_cdouble:
+ double real
+ double imag
+
+ ctypedef struct npy_clongdouble:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex64:
+ float real
+ float imag
+
+ ctypedef struct npy_complex128:
+ double real
+ double imag
+
+ ctypedef struct npy_complex160:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex192:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex256:
+ long double real
+ long double imag
+
+ ctypedef struct PyArray_Dims:
+ npy_intp *ptr
+ int len
+
+ int _import_array() except -1
+
+ #
+ # Macros from ndarrayobject.h
+ #
+ bint PyArray_CHKFLAGS(ndarray m, int flags)
+ bint PyArray_IS_C_CONTIGUOUS(ndarray arr)
+ bint PyArray_IS_F_CONTIGUOUS(ndarray arr)
+ bint PyArray_ISCONTIGUOUS(ndarray m)
+ bint PyArray_ISWRITEABLE(ndarray m)
+ bint PyArray_ISALIGNED(ndarray m)
+
+ int PyArray_NDIM(ndarray)
+ bint PyArray_ISONESEGMENT(ndarray)
+ bint PyArray_ISFORTRAN(ndarray)
+ int PyArray_FORTRANIF(ndarray)
+
+ void* PyArray_DATA(ndarray)
+ char* PyArray_BYTES(ndarray)
+ npy_intp* PyArray_DIMS(ndarray)
+ npy_intp* PyArray_STRIDES(ndarray)
+ npy_intp PyArray_DIM(ndarray, size_t)
+ npy_intp PyArray_STRIDE(ndarray, size_t)
+
+ PyObject *PyArray_BASE(ndarray) # returns borrowed reference!
+ PyArray_Descr *PyArray_DESCR(ndarray) # returns borrowed reference to dtype!
+ int PyArray_FLAGS(ndarray)
+ npy_intp PyArray_ITEMSIZE(ndarray)
+ int PyArray_TYPE(ndarray arr)
+
+ object PyArray_GETITEM(ndarray arr, void *itemptr)
+ int PyArray_SETITEM(ndarray arr, void *itemptr, object obj)
+
+ bint PyTypeNum_ISBOOL(int)
+ bint PyTypeNum_ISUNSIGNED(int)
+ bint PyTypeNum_ISSIGNED(int)
+ bint PyTypeNum_ISINTEGER(int)
+ bint PyTypeNum_ISFLOAT(int)
+ bint PyTypeNum_ISNUMBER(int)
+ bint PyTypeNum_ISSTRING(int)
+ bint PyTypeNum_ISCOMPLEX(int)
+ bint PyTypeNum_ISPYTHON(int)
+ bint PyTypeNum_ISFLEXIBLE(int)
+ bint PyTypeNum_ISUSERDEF(int)
+ bint PyTypeNum_ISEXTENDED(int)
+ bint PyTypeNum_ISOBJECT(int)
+
+ bint PyDataType_ISBOOL(dtype)
+ bint PyDataType_ISUNSIGNED(dtype)
+ bint PyDataType_ISSIGNED(dtype)
+ bint PyDataType_ISINTEGER(dtype)
+ bint PyDataType_ISFLOAT(dtype)
+ bint PyDataType_ISNUMBER(dtype)
+ bint PyDataType_ISSTRING(dtype)
+ bint PyDataType_ISCOMPLEX(dtype)
+ bint PyDataType_ISPYTHON(dtype)
+ bint PyDataType_ISFLEXIBLE(dtype)
+ bint PyDataType_ISUSERDEF(dtype)
+ bint PyDataType_ISEXTENDED(dtype)
+ bint PyDataType_ISOBJECT(dtype)
+ bint PyDataType_HASFIELDS(dtype)
+ bint PyDataType_HASSUBARRAY(dtype)
+
+ bint PyArray_ISBOOL(ndarray)
+ bint PyArray_ISUNSIGNED(ndarray)
+ bint PyArray_ISSIGNED(ndarray)
+ bint PyArray_ISINTEGER(ndarray)
+ bint PyArray_ISFLOAT(ndarray)
+ bint PyArray_ISNUMBER(ndarray)
+ bint PyArray_ISSTRING(ndarray)
+ bint PyArray_ISCOMPLEX(ndarray)
+ bint PyArray_ISPYTHON(ndarray)
+ bint PyArray_ISFLEXIBLE(ndarray)
+ bint PyArray_ISUSERDEF(ndarray)
+ bint PyArray_ISEXTENDED(ndarray)
+ bint PyArray_ISOBJECT(ndarray)
+ bint PyArray_HASFIELDS(ndarray)
+
+ bint PyArray_ISVARIABLE(ndarray)
+
+ bint PyArray_SAFEALIGNEDCOPY(ndarray)
+ bint PyArray_ISNBO(char) # works on ndarray.byteorder
+ bint PyArray_IsNativeByteOrder(char) # works on ndarray.byteorder
+ bint PyArray_ISNOTSWAPPED(ndarray)
+ bint PyArray_ISBYTESWAPPED(ndarray)
+
+ bint PyArray_FLAGSWAP(ndarray, int)
+
+ bint PyArray_ISCARRAY(ndarray)
+ bint PyArray_ISCARRAY_RO(ndarray)
+ bint PyArray_ISFARRAY(ndarray)
+ bint PyArray_ISFARRAY_RO(ndarray)
+ bint PyArray_ISBEHAVED(ndarray)
+ bint PyArray_ISBEHAVED_RO(ndarray)
+
+
+ bint PyDataType_ISNOTSWAPPED(dtype)
+ bint PyDataType_ISBYTESWAPPED(dtype)
+
+ bint PyArray_DescrCheck(object)
+
+ bint PyArray_Check(object)
+ bint PyArray_CheckExact(object)
+
+ # Cannot be supported due to out arg:
+ # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&)
+ # bint PyArray_HasArrayInterface(op, out)
+
+
+ bint PyArray_IsZeroDim(object)
+ # Cannot be supported due to ## ## in macro:
+ # bint PyArray_IsScalar(object, verbatim work)
+ bint PyArray_CheckScalar(object)
+ bint PyArray_IsPythonNumber(object)
+ bint PyArray_IsPythonScalar(object)
+ bint PyArray_IsAnyScalar(object)
+ bint PyArray_CheckAnyScalar(object)
+ ndarray PyArray_GETCONTIGUOUS(ndarray)
+ bint PyArray_SAMESHAPE(ndarray, ndarray)
+ npy_intp PyArray_SIZE(ndarray)
+ npy_intp PyArray_NBYTES(ndarray)
+
+ object PyArray_FROM_O(object)
+ object PyArray_FROM_OF(object m, int flags)
+ object PyArray_FROM_OT(object m, int type)
+ object PyArray_FROM_OTF(object m, int type, int flags)
+ object PyArray_FROMANY(object m, int type, int min, int max, int flags)
+ object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran)
+ object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran)
+ void PyArray_FILLWBYTE(object, int val)
+ npy_intp PyArray_REFCOUNT(object)
+ object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth)
+ unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2)
+ bint PyArray_EquivByteorders(int b1, int b2)
+ object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
+ object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data)
+ #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr)
+ object PyArray_ToScalar(void* data, ndarray arr)
+
+ void* PyArray_GETPTR1(ndarray m, npy_intp i)
+ void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j)
+ void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k)
+ void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l)
+
+ void PyArray_XDECREF_ERR(ndarray)
+ # Cannot be supported due to out arg
+ # void PyArray_DESCR_REPLACE(descr)
+
+
+ object PyArray_Copy(ndarray)
+ object PyArray_FromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth)
+
+ object PyArray_Cast(ndarray mp, int type_num)
+ object PyArray_Take(ndarray ap, object items, int axis)
+ object PyArray_Put(ndarray ap, object items, object values)
+
+ void PyArray_ITER_RESET(flatiter it) nogil
+ void PyArray_ITER_NEXT(flatiter it) nogil
+ void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil
+ void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil
+ void* PyArray_ITER_DATA(flatiter it) nogil
+ bint PyArray_ITER_NOTDONE(flatiter it) nogil
+
+ void PyArray_MultiIter_RESET(broadcast multi) nogil
+ void PyArray_MultiIter_NEXT(broadcast multi) nogil
+ void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil
+ void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil
+ void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil
+ void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil
+ bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil
+
+ # Functions from __multiarray_api.h
+
+ # Functions taking dtype and returning object/ndarray are disabled
+ # for now as they steal dtype references. I'm conservative and disable
+ # more than is probably needed until it can be checked further.
+ int PyArray_SetNumericOps (object)
+ object PyArray_GetNumericOps ()
+ int PyArray_INCREF (ndarray)
+ int PyArray_XDECREF (ndarray)
+ void PyArray_SetStringFunction (object, int)
+ dtype PyArray_DescrFromType (int)
+ object PyArray_TypeObjectFromType (int)
+ char * PyArray_Zero (ndarray)
+ char * PyArray_One (ndarray)
+ #object PyArray_CastToType (ndarray, dtype, int)
+ int PyArray_CastTo (ndarray, ndarray)
+ int PyArray_CastAnyTo (ndarray, ndarray)
+ int PyArray_CanCastSafely (int, int)
+ npy_bool PyArray_CanCastTo (dtype, dtype)
+ int PyArray_ObjectType (object, int)
+ dtype PyArray_DescrFromObject (object, dtype)
+ #ndarray* PyArray_ConvertToCommonType (object, int *)
+ dtype PyArray_DescrFromScalar (object)
+ dtype PyArray_DescrFromTypeObject (object)
+ npy_intp PyArray_Size (object)
+ #object PyArray_Scalar (void *, dtype, object)
+ #object PyArray_FromScalar (object, dtype)
+ void PyArray_ScalarAsCtype (object, void *)
+ #int PyArray_CastScalarToCtype (object, void *, dtype)
+ #int PyArray_CastScalarDirect (object, dtype, void *, int)
+ object PyArray_ScalarFromObject (object)
+ #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int)
+ object PyArray_FromDims (int, int *, int)
+ #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *)
+ #object PyArray_FromAny (object, dtype, int, int, int, object)
+ object PyArray_EnsureArray (object)
+ object PyArray_EnsureAnyArray (object)
+ #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *)
+ #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *)
+ #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp)
+ #object PyArray_FromIter (object, dtype, npy_intp)
+ object PyArray_Return (ndarray)
+ #object PyArray_GetField (ndarray, dtype, int)
+ #int PyArray_SetField (ndarray, dtype, int, object)
+ object PyArray_Byteswap (ndarray, npy_bool)
+ object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER)
+ int PyArray_MoveInto (ndarray, ndarray)
+ int PyArray_CopyInto (ndarray, ndarray)
+ int PyArray_CopyAnyInto (ndarray, ndarray)
+ int PyArray_CopyObject (ndarray, object)
+ object PyArray_NewCopy (ndarray, NPY_ORDER)
+ object PyArray_ToList (ndarray)
+ object PyArray_ToString (ndarray, NPY_ORDER)
+ int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *)
+ int PyArray_Dump (object, object, int)
+ object PyArray_Dumps (object, int)
+ int PyArray_ValidType (int)
+ void PyArray_UpdateFlags (ndarray, int)
+ object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object)
+ #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object)
+ #dtype PyArray_DescrNew (dtype)
+ dtype PyArray_DescrNewFromType (int)
+ double PyArray_GetPriority (object, double)
+ object PyArray_IterNew (object)
+ object PyArray_MultiIterNew (int, ...)
+
+ int PyArray_PyIntAsInt (object)
+ npy_intp PyArray_PyIntAsIntp (object)
+ int PyArray_Broadcast (broadcast)
+ void PyArray_FillObjectArray (ndarray, object)
+ int PyArray_FillWithScalar (ndarray, object)
+ npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)
+ dtype PyArray_DescrNewByteorder (dtype, char)
+ object PyArray_IterAllButAxis (object, int *)
+ #object PyArray_CheckFromAny (object, dtype, int, int, int, object)
+ #object PyArray_FromArray (ndarray, dtype, int)
+ object PyArray_FromInterface (object)
+ object PyArray_FromStructInterface (object)
+ #object PyArray_FromArrayAttr (object, dtype, object)
+ #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*)
+ int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND)
+ object PyArray_NewFlagsObject (object)
+ npy_bool PyArray_CanCastScalar (type, type)
+ #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t)
+ int PyArray_RemoveSmallest (broadcast)
+ int PyArray_ElementStrides (object)
+ void PyArray_Item_INCREF (char *, dtype)
+ void PyArray_Item_XDECREF (char *, dtype)
+ object PyArray_FieldNames (object)
+ object PyArray_Transpose (ndarray, PyArray_Dims *)
+ object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE)
+ object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE)
+ object PyArray_PutMask (ndarray, object, object)
+ object PyArray_Repeat (ndarray, object, int)
+ object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
+ int PyArray_Sort (ndarray, int, NPY_SORTKIND)
+ object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
+ object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE)
+ object PyArray_ArgMax (ndarray, int, ndarray)
+ object PyArray_ArgMin (ndarray, int, ndarray)
+ object PyArray_Reshape (ndarray, object)
+ object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER)
+ object PyArray_Squeeze (ndarray)
+ #object PyArray_View (ndarray, dtype, type)
+ object PyArray_SwapAxes (ndarray, int, int)
+ object PyArray_Max (ndarray, int, ndarray)
+ object PyArray_Min (ndarray, int, ndarray)
+ object PyArray_Ptp (ndarray, int, ndarray)
+ object PyArray_Mean (ndarray, int, int, ndarray)
+ object PyArray_Trace (ndarray, int, int, int, int, ndarray)
+ object PyArray_Diagonal (ndarray, int, int, int)
+ object PyArray_Clip (ndarray, object, object, ndarray)
+ object PyArray_Conjugate (ndarray, ndarray)
+ object PyArray_Nonzero (ndarray)
+ object PyArray_Std (ndarray, int, int, ndarray, int)
+ object PyArray_Sum (ndarray, int, int, ndarray)
+ object PyArray_CumSum (ndarray, int, int, ndarray)
+ object PyArray_Prod (ndarray, int, int, ndarray)
+ object PyArray_CumProd (ndarray, int, int, ndarray)
+ object PyArray_All (ndarray, int, ndarray)
+ object PyArray_Any (ndarray, int, ndarray)
+ object PyArray_Compress (ndarray, object, int, ndarray)
+ object PyArray_Flatten (ndarray, NPY_ORDER)
+ object PyArray_Ravel (ndarray, NPY_ORDER)
+ npy_intp PyArray_MultiplyList (npy_intp *, int)
+ int PyArray_MultiplyIntList (int *, int)
+ void * PyArray_GetPtr (ndarray, npy_intp*)
+ int PyArray_CompareLists (npy_intp *, npy_intp *, int)
+ #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype)
+ #int PyArray_As1D (object*, char **, int *, int)
+ #int PyArray_As2D (object*, char ***, int *, int *, int)
+ int PyArray_Free (object, void *)
+ #int PyArray_Converter (object, object*)
+ int PyArray_IntpFromSequence (object, npy_intp *, int)
+ object PyArray_Concatenate (object, int)
+ object PyArray_InnerProduct (object, object)
+ object PyArray_MatrixProduct (object, object)
+ object PyArray_CopyAndTranspose (object)
+ object PyArray_Correlate (object, object, int)
+ int PyArray_TypestrConvert (int, int)
+ #int PyArray_DescrConverter (object, dtype*)
+ #int PyArray_DescrConverter2 (object, dtype*)
+ int PyArray_IntpConverter (object, PyArray_Dims *)
+ #int PyArray_BufferConverter (object, chunk)
+ int PyArray_AxisConverter (object, int *)
+ int PyArray_BoolConverter (object, npy_bool *)
+ int PyArray_ByteorderConverter (object, char *)
+ int PyArray_OrderConverter (object, NPY_ORDER *)
+ unsigned char PyArray_EquivTypes (dtype, dtype)
+ #object PyArray_Zeros (int, npy_intp *, dtype, int)
+ #object PyArray_Empty (int, npy_intp *, dtype, int)
+ object PyArray_Where (object, object, object)
+ object PyArray_Arange (double, double, double, int)
+ #object PyArray_ArangeObj (object, object, object, dtype)
+ int PyArray_SortkindConverter (object, NPY_SORTKIND *)
+ object PyArray_LexSort (object, int)
+ object PyArray_Round (ndarray, int, ndarray)
+ unsigned char PyArray_EquivTypenums (int, int)
+ int PyArray_RegisterDataType (dtype)
+ int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *)
+ int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND)
+ #void PyArray_InitArrFuncs (PyArray_ArrFuncs *)
+ object PyArray_IntTupleFromIntp (int, npy_intp *)
+ int PyArray_TypeNumFromName (char *)
+ int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *)
+ #int PyArray_OutputConverter (object, ndarray*)
+ object PyArray_BroadcastToShape (object, npy_intp *, int)
+ void _PyArray_SigintHandler (int)
+ void* _PyArray_GetSigintBuf ()
+ #int PyArray_DescrAlignConverter (object, dtype*)
+ #int PyArray_DescrAlignConverter2 (object, dtype*)
+ int PyArray_SearchsideConverter (object, void *)
+ object PyArray_CheckAxis (ndarray, int *, int)
+ npy_intp PyArray_OverflowMultiplyList (npy_intp *, int)
+ int PyArray_CompareString (char *, char *, size_t)
+ int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead.
+
+
+# Typedefs that matches the runtime dtype objects in
+# the numpy module.
+
+# The ones that are commented out needs an IFDEF function
+# in Cython to enable them only on the right systems.
+
+ctypedef npy_int8 int8_t
+ctypedef npy_int16 int16_t
+ctypedef npy_int32 int32_t
+ctypedef npy_int64 int64_t
+#ctypedef npy_int96 int96_t
+#ctypedef npy_int128 int128_t
+
+ctypedef npy_uint8 uint8_t
+ctypedef npy_uint16 uint16_t
+ctypedef npy_uint32 uint32_t
+ctypedef npy_uint64 uint64_t
+#ctypedef npy_uint96 uint96_t
+#ctypedef npy_uint128 uint128_t
+
+ctypedef npy_float32 float32_t
+ctypedef npy_float64 float64_t
+#ctypedef npy_float80 float80_t
+#ctypedef npy_float128 float128_t
+
+ctypedef float complex complex64_t
+ctypedef double complex complex128_t
+
+# The int types are mapped a bit surprising --
+# numpy.int corresponds to 'l' and numpy.long to 'q'
+ctypedef npy_long int_t
+ctypedef npy_longlong long_t
+ctypedef npy_longlong longlong_t
+
+ctypedef npy_ulong uint_t
+ctypedef npy_ulonglong ulong_t
+ctypedef npy_ulonglong ulonglong_t
+
+ctypedef npy_intp intp_t
+ctypedef npy_uintp uintp_t
+
+ctypedef npy_double float_t
+ctypedef npy_double double_t
+ctypedef npy_longdouble longdouble_t
+
+ctypedef npy_cfloat cfloat_t
+ctypedef npy_cdouble cdouble_t
+ctypedef npy_clongdouble clongdouble_t
+
+ctypedef npy_cdouble complex_t
+
+cdef inline object PyArray_MultiIterNew1(a):
+ return PyArray_MultiIterNew(1, <void*>a)
+
+cdef inline object PyArray_MultiIterNew2(a, b):
+ return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+
+cdef inline object PyArray_MultiIterNew3(a, b, c):
+ return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+
+cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+
+cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+
+cdef inline tuple PyDataType_SHAPE(dtype d):
+ if PyDataType_HASSUBARRAY(d):
+ return <tuple>d.subarray.shape
+ else:
+ return ()
+
+cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
+ # Recursive utility function used in __getbuffer__ to get format
+ # string. The new location in the format string is returned.
+
+ cdef dtype child
+ cdef int endian_detector = 1
+ cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
+ cdef tuple fields
+
+ for childname in descr.names:
+ fields = descr.fields[childname]
+ child, new_offset = fields
+
+ if (end - f) - <int>(new_offset - offset[0]) < 15:
+ raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+
+ if ((child.byteorder == c'>' and little_endian) or
+ (child.byteorder == c'<' and not little_endian)):
+ raise ValueError(u"Non-native byte order not supported")
+ # One could encode it in the format string and have Cython
+ # complain instead, BUT: < and > in format strings also imply
+ # standardized sizes for datatypes, and we rely on native in
+ # order to avoid reencoding data types based on their size.
+ #
+ # A proper PEP 3118 exporter for other clients than Cython
+ # must deal properly with this!
+
+ # Output padding bytes
+ while offset[0] < new_offset:
+ f[0] = 120 # "x"; pad byte
+ f += 1
+ offset[0] += 1
+
+ offset[0] += child.itemsize
+
+ if not PyDataType_HASFIELDS(child):
+ t = child.type_num
+ if end - f < 5:
+ raise RuntimeError(u"Format string allocated too short.")
+
+ # Until ticket #99 is fixed, use integers to avoid warnings
+ if t == NPY_BYTE: f[0] = 98 #"b"
+ elif t == NPY_UBYTE: f[0] = 66 #"B"
+ elif t == NPY_SHORT: f[0] = 104 #"h"
+ elif t == NPY_USHORT: f[0] = 72 #"H"
+ elif t == NPY_INT: f[0] = 105 #"i"
+ elif t == NPY_UINT: f[0] = 73 #"I"
+ elif t == NPY_LONG: f[0] = 108 #"l"
+ elif t == NPY_ULONG: f[0] = 76 #"L"
+ elif t == NPY_LONGLONG: f[0] = 113 #"q"
+ elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
+ elif t == NPY_FLOAT: f[0] = 102 #"f"
+ elif t == NPY_DOUBLE: f[0] = 100 #"d"
+ elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
+ elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
+ elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
+ elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ elif t == NPY_OBJECT: f[0] = 79 #"O"
+ else:
+ raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ f += 1
+ else:
+ # Cython ignores struct boundary information ("T{...}"),
+ # so don't output it
+ f = _util_dtypestring(child, f, end, offset)
+ return f
+
+
+#
+# ufunc API
+#
+
+cdef extern from "numpy/ufuncobject.h":
+
+ ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *)
+
+ ctypedef extern class numpy.ufunc [object PyUFuncObject, check_size ignore]:
+ cdef:
+ int nin, nout, nargs
+ int identity
+ PyUFuncGenericFunction *functions
+ void **data
+ int ntypes
+ int check_return
+ char *name
+ char *types
+ char *doc
+ void *ptr
+ PyObject *obj
+ PyObject *userloops
+
+ cdef enum:
+ PyUFunc_Zero
+ PyUFunc_One
+ PyUFunc_None
+ UFUNC_ERR_IGNORE
+ UFUNC_ERR_WARN
+ UFUNC_ERR_RAISE
+ UFUNC_ERR_CALL
+ UFUNC_ERR_PRINT
+ UFUNC_ERR_LOG
+ UFUNC_MASK_DIVIDEBYZERO
+ UFUNC_MASK_OVERFLOW
+ UFUNC_MASK_UNDERFLOW
+ UFUNC_MASK_INVALID
+ UFUNC_SHIFT_DIVIDEBYZERO
+ UFUNC_SHIFT_OVERFLOW
+ UFUNC_SHIFT_UNDERFLOW
+ UFUNC_SHIFT_INVALID
+ UFUNC_FPE_DIVIDEBYZERO
+ UFUNC_FPE_OVERFLOW
+ UFUNC_FPE_UNDERFLOW
+ UFUNC_FPE_INVALID
+ UFUNC_ERR_DEFAULT
+ UFUNC_ERR_DEFAULT2
+
+ object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *,
+ void **, char *, int, int, int, int, char *, char *, int)
+ int PyUFunc_RegisterLoopForType(ufunc, int,
+ PyUFuncGenericFunction, int *, void *)
+ int PyUFunc_GenericFunction \
+ (ufunc, PyObject *, PyObject *, PyArrayObject **)
+ void PyUFunc_f_f_As_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_f_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_g_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F_As_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_G_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f_As_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_gg_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F_As_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_GG_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_On_Om \
+ (char **, npy_intp *, npy_intp *, void *)
+ int PyUFunc_GetPyValues \
+ (char *, int *, int *, PyObject **)
+ int PyUFunc_checkfperr \
+ (int, PyObject *, int *)
+ void PyUFunc_clearfperr()
+ int PyUFunc_getfperr()
+ int PyUFunc_handlefperr \
+ (int, PyObject *, int, int *)
+ int PyUFunc_ReplaceLoopBySignature \
+ (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)
+ object PyUFunc_FromFuncAndDataAndSignature \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int,
+ int, char *, char *, int, char *)
+
+ int _import_umath() except -1
+
+cdef inline void set_array_base(ndarray arr, object base):
+ Py_INCREF(base) # important to do this before stealing the reference below!
+ PyArray_SetBaseObject(arr, base)
+
+cdef inline object get_array_base(ndarray arr):
+ base = PyArray_BASE(arr)
+ if base is NULL:
+ return None
+ return <object>base
+
+# Versions of the import_* functions which are more suitable for
+# Cython code.
+cdef inline int import_array() except -1:
+ try:
+ _import_array()
+ except Exception:
+ raise ImportError("numpy.core.multiarray failed to import")
+
+cdef inline int import_umath() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy.core.umath failed to import")
+
+cdef inline int import_ufunc() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy.core.umath failed to import")
from .core import *
from . import compat
from . import lib
+ # FIXME: why have numpy.lib if everything is imported here??
from .lib import *
+
from . import linalg
from . import fft
from . import polynomial
# Make these accessible from numpy name-space
# but not imported in from numpy import *
+ # TODO[gh-6103]: Deprecate these
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
unicode = str
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
+ # These are added by `from .core import *` and `core.__all__`, but we
+ # overwrite them above with builtins we do _not_ want to export.
+ __all__.remove('long')
+ __all__.remove('unicode')
+
+ # Remove things that are in the numpy.lib but not in the numpy namespace
+ # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
+ # that prevents adding more things to the main namespace by accident.
+ # The list below will grow until the `from .lib import *` fixme above is
+ # taken care of
+ __all__.remove('Arrayterator')
+ del Arrayterator
+
# Filter out Cython harmless warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
oldnumeric = 'removed'
numarray = 'removed'
- # We don't actually use this ourselves anymore, but I'm not 100% sure that
- # no-one else in the world is using it (though I hope not)
- from .testing import Tester
+ if sys.version_info[:2] >= (3, 7):
+ # Importing Tester requires importing all of UnitTest which is not a
+ # cheap import Since it is mainly used in test suits, we lazy import it
+ # here to save on the order of 10 ms of import time for most users
+ #
+ # The previous way Tester was imported also had a side effect of adding
+ # the full `numpy.testing` namespace
+ #
+ # module level getattr is only supported in 3.7 onwards
+ # https://www.python.org/dev/peps/pep-0562/
+ def __getattr__(attr):
+ if attr == 'testing':
+ import numpy.testing as testing
+ return testing
+ elif attr == 'Tester':
+ from .testing import Tester
+ return Tester
+ else:
+ raise AttributeError("module {!r} has no attribute "
+ "{!r}".format(__name__, attr))
+
+ def __dir__():
+ return list(globals().keys()) + ['Tester', 'testing']
+
+ else:
+ # We don't actually use this ourselves anymore, but I'm not 100% sure that
+ # no-one else in the world is using it (though I hope not)
+ from .testing import Tester
# Pytest testing
from numpy._pytesttester import PytestTester
"""
Pytest test runner.
- This class is made available in ``numpy.testing``, and a test function
- is typically added to a package's __init__.py like so::
+ A test function is typically added to a package's __init__.py like so::
- from numpy.testing import PytestTester
+ from numpy._pytesttester import PytestTester
test = PytestTester(__name__).test
del PytestTester
module_name : module name
The name of the module to test.
+ Notes
+ -----
+ Unlike the previous ``nose``-based implementation, this class is not
+ publicly exposed as it performs some ``numpy``-specific warning
+ suppression.
+
"""
def __init__(self, module_name):
self.module_name = module_name
"""
-Python 3 compatibility tools.
+Python 3.X compatibility tools.
-"""
-from __future__ import division, absolute_import, print_function
+While this file was originally intented for Python 2 -> 3 transition,
+it is now used to create a compatibility layer between different
+minor versions of Python 3.
+While the active version of numpy may not support a given version of python, we
+allow downstream libraries to continue to use these shims for forward
+compatibility with numpy while they transition their code to newer versions of
+Python.
+"""
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
"""
from __future__ import division, absolute_import, print_function
+import os
+
import pytest
import numpy
def pytest_configure(config):
config.addinivalue_line("markers",
"valgrind_error: Tests that are known to error under valgrind.")
+ config.addinivalue_line("markers",
+ "leaks_references: Tests that are known to leak references.")
config.addinivalue_line("markers",
"slow: Tests that are very slow.")
+def pytest_addoption(parser):
+ parser.addoption("--available-memory", action="store", default=None,
+ help=("Set amount of memory available for running the "
+ "test suite. This can result to tests requiring "
+ "especially large amounts of memory to be skipped. "
+ "Equivalent to setting environment variable "
+ "NPY_AVAILABLE_MEM. Default: determined"
+ "automatically."))
+
+
+def pytest_sessionstart(session):
+ available_mem = session.config.getoption('available_memory')
+ if available_mem is not None:
+ os.environ['NPY_AVAILABLE_MEM'] = available_mem
+
+
#FIXME when yield tests are gone.
@pytest.hookimpl()
def pytest_itemcollected(item):
+"""
+Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
+
+Please note that this module is private. All functions and objects
+are available in the main ``numpy`` namespace - use that instead.
+
+"""
+
from __future__ import division, absolute_import, print_function
-from .info import __doc__
from numpy.version import version as __version__
import os
>>> fl = x.flat
>>> fl.coords
(0, 0)
- >>> fl.next()
+ >>> next(fl)
0
>>> fl.coords
(0, 1)
>>> fl = x.flat
>>> fl.index
0
- >>> fl.next()
+ >>> next(fl)
0
>>> fl.index
1
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
- If operand flags `"writeonly"` or `"readwrite"` are used the operands may
- be views into the original data with the `WRITEBACKIFCOPY` flag. In this case
- nditer must be used as a context manager or the nditer.close
- method must be called before using the result. The temporary
- data will be written back to the original data when the `__exit__`
- function is called but not before:
+ If operand flags `"writeonly"` or `"readwrite"` are used the
+ operands may be views into the original data with the
+ `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a
+ context manager or the `nditer.close` method must be called before
+ using the result. The temporary data will be written back to the
+ original data when the `__exit__` function is called but not before:
>>> a = np.arange(6, dtype='i4')[::-2]
>>> with np.nditer(a, [],
`x.data` will still point at some part of `a.data`, and writing to
one will affect the other.
+ Context management and the `close` method appeared in version 1.15.0.
+
""")
# nditer methods
Resolve all writeback semantics in writeable operands.
+ .. versionadded:: 1.15.0
+
See Also
--------
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
- >>> row.next(), col.next()
+ >>> next(row), next(col)
(1, 4)
"""))
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
- sequence. This argument can only be used to 'upcast' the array. For
- downcasting, use the .astype(t) method.
+ sequence.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
- the data must be in exactly this format.
+ the data must be in exactly this format. Most builtin numeric types are
+ supported and extension types may be supported.
+
+ .. versionadded:: 1.18.0
+ Complex dtypes.
+
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
+ Most builtin numeric types are supported and extension types may be supported.
+
+ .. versionadded:: 1.18.0
+ Complex dtypes.
+
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
- data storage, as the binary files generated are are not platform
+ data storage, as the binary files generated are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
See Also
--------
- linspace : Evenly spaced numbers with careful handling of endpoints.
- ogrid: Arrays of evenly spaced numbers in N-dimensions.
- mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
+ numpy.linspace : Evenly spaced numbers with careful handling of endpoints.
+ numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions.
+ numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
- Return the compile time NDARRAY_VERSION number.
+ Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number.
""")
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
+ Arrays of byte-strings are not swapped. The real and imaginary
+ parts of a complex number are swapped individually.
Parameters
----------
>>> list(map(hex, A))
['0x100', '0x1', '0x3322']
- Arrays of strings are not swapped
+ Arrays of byte-strings are not swapped
- >>> A = np.array(['ceg', 'fac'])
+ >>> A = np.array([b'ceg', b'fac'])
>>> A.byteswap()
- Traceback (most recent call last):
- ...
- UnicodeDecodeError: ...
+ array([b'ceg', b'fac'], dtype='|S3')
+
+ ``A.newbyteorder().byteswap()`` produces an array with the same values
+ but different representation in memory
+
+ >>> A = np.array([1, 2, 3])
+ >>> A.view(np.uint8)
+ array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
+ 0, 0], dtype=uint8)
+ >>> A.newbyteorder().byteswap(inplace=True)
+ array([1, 2, 3])
+ >>> A.view(np.uint8)
+ array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
+ 0, 3], dtype=uint8)
"""))
See Also
--------
numpy.sort : Return a sorted copy of an array.
- argsort : Indirect sort.
- lexsort : Indirect stable sort on multiple keys.
- searchsorted : Find elements in sorted array.
- partition: Partial sort.
+ numpy.argsort : Indirect sort.
+ numpy.lexsort : Indirect stable sort on multiple keys.
+ numpy.searchsorted : Find elements in sorted array.
+ numpy.partition: Partial sort.
Notes
-----
Examples
--------
- For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``:
+ For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
+ except that ``tolist`` changes numpy scalars to Python scalars:
- >>> a = np.array([1, 2])
- >>> list(a)
+ >>> a = np.uint32([1, 2])
+ >>> a_list = list(a)
+ >>> a_list
[1, 2]
- >>> a.tolist()
+ >>> type(a_list[0])
+ <class 'numpy.uint32'>
+ >>> a_tolist = a.tolist()
+ >>> a_tolist
[1, 2]
+ >>> type(a_tolist[0])
+ <class 'int'>
- However, for a 2D array, ``tolist`` applies recursively:
+ Additionally, for a 2D array, ``tolist`` applies recursively:
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
See Also
--------
- vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy
+ vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy.
Notes
-----
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
- number of outputs; use `None` for uninitialized outputs to be
+ number of outputs; use None for uninitialized outputs to be
allocated by the ufunc.
where : array_like, optional
This condition is broadcast over the input. At locations where the
-----
Generalized ufuncs are used internally in many linalg functions, and in
the testing suite; the examples below are taken from these.
- For ufuncs that operate on scalars, the signature is `None`, which is
+ For ufuncs that operate on scalars, the signature is None, which is
equivalent to '()' for every argument.
Examples
.. versionadded:: 1.7.0
- If this is `None`, a reduction is performed over all the axes.
+ If this is None, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
- A location into which the result is stored. If not provided or `None`,
+ A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
- A location into which the result is stored. If not provided or `None`,
+ A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
- A location into which the result is stored. If not provided or `None`,
+ A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
`__array_interface__` attribute.
Warning: This attribute exists specifically for `__array_interface__`,
- and is not a datatype description compatible with `np.dtype`.
+ and passing it directly to `np.dtype` will not accurately reconstruct
+ some dtypes (e.g., scalar and subarray dtypes).
Examples
--------
>>> np.{ftype}(-.25).as_integer_ratio()
(-1, 4)
""".format(ftype=float_name)))
+
from a list of the field names and dtypes with no additional
dtype parameters.
- Duplicates the C `is_dtype_struct_simple_unaligned_layout` functio.
+ Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
"""
total_offset = 0
for name in dtype.names:
)
+def _name_includes_bit_suffix(dtype):
+ if dtype.type == np.object_:
+ # pointer size varies by system, best to omit it
+ return False
+ elif dtype.type == np.bool_:
+ # implied
+ return False
+ elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
+ # unspecified
+ return False
+ else:
+ return True
+
+
def _name_get(dtype):
- # provides dtype.name.__get__
+ # provides dtype.name.__get__, documented as returning a "bit name"
if dtype.isbuiltin == 2:
# user dtypes don't promise to do anything special
return dtype.type.__name__
- # Builtin classes are documented as returning a "bit name"
- name = dtype.type.__name__
-
- # handle bool_, str_, etc
- if name[-1] == '_':
- name = name[:-1]
+ if issubclass(dtype.type, np.void):
+ # historically, void subclasses preserve their name, eg `record64`
+ name = dtype.type.__name__
+ else:
+ name = _kind_name(dtype)
- # append bit counts to str, unicode, and void
- if np.issubdtype(dtype, np.flexible) and not _isunsized(dtype):
+ # append bit counts
+ if _name_includes_bit_suffix(dtype):
name += "{}".format(dtype.itemsize * 8)
# append metadata to datetimes
- elif dtype.type in (np.datetime64, np.timedelta64):
+ if dtype.type in (np.datetime64, np.timedelta64):
name += _datetime_metadata_str(dtype)
return name
self.shape = shape
self.dtype = dtype
- def __str__(self):
- return "Unable to allocate array with shape {} and data type {}".format(self.shape, self.dtype)
+ @property
+ def _total_size(self):
+ num_bytes = self.dtype.itemsize
+ for dim in self.shape:
+ num_bytes *= dim
+ return num_bytes
+
+ @staticmethod
+ def _size_to_string(num_bytes):
+ """ Convert a number of bytes into a binary size string """
+ import math
+
+ # https://en.wikipedia.org/wiki/Binary_prefix
+ LOG2_STEP = 10
+ STEP = 1024
+ units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
+
+ unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
+ unit_val = 1 << (unit_i * LOG2_STEP)
+ n_units = num_bytes / unit_val
+ del unit_val
+
+ # ensure we pick a unit that is correct after rounding
+ if round(n_units) == STEP:
+ unit_i += 1
+ n_units /= STEP
+
+ # deal with sizes so large that we don't have units for them
+ if unit_i >= len(units):
+ new_unit_i = len(units) - 1
+ n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
+ unit_i = new_unit_i
+
+ unit_name = units[unit_i]
+ # format with a sensible number of digits
+ if unit_i == 0:
+ # no decimal point on bytes
+ return '{:.0f} {}'.format(n_units, unit_name)
+ elif round(n_units) < 1000:
+ # 3 significant figures, if none are dropped to the left of the .
+ return '{:#.3g} {}'.format(n_units, unit_name)
+ else:
+ # just give all the digits otherwise
+ return '{:#.0f} {}'.format(n_units, unit_name)
+ def __str__(self):
+ size_str = self._size_to_string(self._total_size)
+ return (
+ "Unable to allocate {} for an array with shape {} and data type {}"
+ .format(size_str, self.shape, self.dtype)
+ )
crashing. User Beware! The value of this attribute is exactly the same
as ``self._array_interface_['data'][0]``.
- Note that unlike `data_as`, a reference will not be kept to the array:
+ Note that unlike ``data_as``, a reference will not be kept to the array:
code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
pointer to a deallocated array, and should be spelt
``(a + b).ctypes.data_as(ctypes.c_void_p)``
"`False`", stacklevel=3)
if threshold is not None:
# forbid the bad threshold arg suggested by stack overflow, gh-12351
- if not isinstance(threshold, numbers.Number) or np.isnan(threshold):
- raise ValueError("threshold must be numeric and non-NAN, try "
+ if not isinstance(threshold, numbers.Number):
+ raise TypeError("threshold must be numeric")
+ if np.isnan(threshold):
+ raise ValueError("threshold must be non-NAN, try "
"sys.maxsize for untruncated representation")
return options
----------
precision : int or None, optional
Number of digits of precision for floating point output (default 8).
- May be `None` if `floatmode` is not `fixed`, to print as many digits as
+ May be None if `floatmode` is not `fixed`, to print as many digits as
necessary to uniquely specify the value.
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
+ To always use the full repr without summarization, pass `sys.maxsize`.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
See Also
--------
- get_printoptions, set_string_function, array2string
+ get_printoptions, printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
+ Use `printoptions` as a context manager to set the values temporarily.
+
Examples
--------
Floating point precision can be set:
To put back the default options, you can use:
- >>> np.set_printoptions(edgeitems=3,infstr='inf',
+ >>> np.set_printoptions(edgeitems=3, infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
+
+ Also to temporarily override options, use `printoptions` as a context manager:
+
+ >>> with np.printoptions(precision=2, suppress=True, threshold=5):
+ ... np.linspace(0, 10, 10)
+ array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ])
+
"""
legacy = kwarg.pop('legacy', None)
if kwarg:
See Also
--------
- set_printoptions, set_string_function
+ set_printoptions, printoptions, set_string_function
"""
return _format_options.copy()
pad_left=self.pad_left,
pad_right=self.pad_right)
-# for back-compatibility, we keep the classes for each float type too
-class FloatFormat(FloatingFormat):
- def __init__(self, *args, **kwargs):
- warnings.warn("FloatFormat has been replaced by FloatingFormat",
- DeprecationWarning, stacklevel=2)
- super(FloatFormat, self).__init__(*args, **kwargs)
-
-
-class LongFloatFormat(FloatingFormat):
- def __init__(self, *args, **kwargs):
- warnings.warn("LongFloatFormat has been replaced by FloatingFormat",
- DeprecationWarning, stacklevel=2)
- super(LongFloatFormat, self).__init__(*args, **kwargs)
-
@set_module('numpy')
def format_float_scientific(x, precision=None, unique=True, trim='k',
return r + i
-# for back-compatibility, we keep the classes for each complex type too
-class ComplexFormat(ComplexFloatingFormat):
- def __init__(self, *args, **kwargs):
- warnings.warn(
- "ComplexFormat has been replaced by ComplexFloatingFormat",
- DeprecationWarning, stacklevel=2)
- super(ComplexFormat, self).__init__(*args, **kwargs)
-
-class LongComplexFormat(ComplexFloatingFormat):
- def __init__(self, *args, **kwargs):
- warnings.warn(
- "LongComplexFormat has been replaced by ComplexFloatingFormat",
- DeprecationWarning, stacklevel=2)
- super(LongComplexFormat, self).__init__(*args, **kwargs)
-
class _TimelikeFormat(object):
def __init__(self, data):
return "({})".format(", ".join(str_fields))
-# for backwards compatibility
-class StructureFormat(StructuredVoidFormat):
- def __init__(self, *args, **kwargs):
- # NumPy 1.14, 2018-02-14
- warnings.warn(
- "StructureFormat has been replaced by StructuredVoidFormat",
- DeprecationWarning, stacklevel=2)
- super(StructureFormat, self).__init__(*args, **kwargs)
-
-
def _void_scalar_repr(x):
"""
Implements the repr for structured-void scalars. It is called from the
arr, max_line_width, precision, suppress_small)
-_guarded_str = _recursive_guard()(str)
+@_recursive_guard()
+def _guarded_repr_or_str(v):
+ if isinstance(v, bytes):
+ return repr(v)
+ return str(v)
def _array_str_implementation(
# obtain a scalar and call str on it, avoiding problems for subclasses
# for which indexing with () returns a 0d instead of a scalar by using
# ndarray's getindex. Also guard against recursive 0d object arrays.
- return _guarded_str(np.ndarray.__getitem__(a, ()))
+ return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))
return array2string(a, max_line_width, precision, suppress_small, ' ', "")
else:
return multiarray.set_string_function(f, repr)
-set_string_function(_default_array_str, 0)
-set_string_function(_default_array_repr, 1)
+set_string_function(_default_array_str, False)
+set_string_function(_default_array_repr, True)
# Deprecate PyArray_SetNumericOps and PyArray_GetNumericOps,
# Add fields core_dim_flags and core_dim_sizes to PyUFuncObject.
# Add PyUFunc_FromFuncAndDataAndSignatureAndIdentity to ufunc_funcs_api.
+# Version 13 (NumPy 1.17) No change.
+# Version 13 (NumPy 1.18) No change.
0x0000000d = 5b0e8bbded00b166125974fc71e80a33
"""
from __future__ import division, absolute_import, print_function
+from numpy.distutils.conv_template import process_file as process_c_file
+
import sys, os, re
import hashlib
+import io
import textwrap
This function does foo...
*/
"""
- fo = open(filename, 'r')
+ if filename.endswith(('.c.src', '.h.src')):
+ fo = io.StringIO(process_c_file(filename))
+ else:
+ fo = open(filename, 'r')
functions = []
return_type = None
function_name = None
elif state == STATE_ARGS:
if line.startswith('{'):
# finished
- fargs_str = ' '.join(function_args).rstrip(' )')
+ # remove any white space and the closing bracket:
+ fargs_str = ' '.join(function_args).rstrip()[:-1].rstrip()
fargs = split_arguments(fargs_str)
f = Function(function_name, return_type, fargs,
'\n'.join(doclist))
'P': 'OBJECT',
}
-all = '?bBhHiIlLqQefdgFDGOMm'
+noobj = '?bBhHiIlLqQefdgFDGmM'
+all = '?bBhHiIlLqQefdgFDGOmM'
+
O = 'O'
P = 'P'
ints = 'bBhHiIlLqQ'
noint = inexact+O
nointP = inexact+P
allP = bints+times+flts+cmplxP
-nobool = all[1:]
-noobj = all[:-3]+all[-2:]
-nobool_or_obj = all[1:-3]+all[-2:]
-nobool_or_datetime = all[1:-2]+all[-1:]
+nobool_or_obj = noobj[1:]
+nobool_or_datetime = noobj[1:-1] + O # includes m - timedelta64
intflt = ints+flts
intfltcmplx = ints+flts+cmplx
nocmplx = bints+times+flts
Ufunc(2, 1, None, # Zero is only a unit to the right, not the left
docstrings.get('numpy.core.umath.subtract'),
'PyUFunc_SubtractionTypeResolver',
- TD(notimes_or_obj, simd=[('avx2', ints)]),
+ TD(ints + inexact, simd=[('avx2', ints)]),
[TypeDescription('M', FullTypeDescr, 'Mm', 'M'),
TypeDescription('m', FullTypeDescr, 'mm', 'm'),
TypeDescription('M', FullTypeDescr, 'MM', 'm'),
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.square'),
None,
- TD(ints+inexact, simd=[('avx2', ints)]),
+ TD(ints+inexact, simd=[('avx2', ints), ('fma', 'fd'), ('avx512f', 'fd')]),
TD(O, f='Py_square'),
),
'reciprocal':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.reciprocal'),
None,
- TD(ints+inexact, simd=[('avx2', ints)]),
+ TD(ints+inexact, simd=[('avx2', ints), ('fma', 'fd'), ('avx512f','fd')]),
TD(O, f='Py_reciprocal'),
),
# This is no longer used as numpy.ones_like, however it is
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.absolute'),
'PyUFunc_AbsoluteTypeResolver',
- TD(bints+flts+timedeltaonly),
+ TD(bints+flts+timedeltaonly, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
TD(cmplx, out=('f', 'd', 'g')),
TD(O, f='PyNumber_Absolute'),
),
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.negative'),
'PyUFunc_NegativeTypeResolver',
- TD(bints+flts+timedeltaonly, simd=[('avx2', ints)]),
+ TD(ints+flts+timedeltaonly, simd=[('avx2', ints)]),
TD(cmplx, f='neg'),
TD(O, f='PyNumber_Negative'),
),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'greater_equal':
Ufunc(2, 1, None,
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'less':
Ufunc(2, 1, None,
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'less_equal':
Ufunc(2, 1, None,
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'equal':
Ufunc(2, 1, None,
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'not_equal':
Ufunc(2, 1, None,
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'logical_and':
Ufunc(2, 1, True_,
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalAnd'),
+ TD(O, f='npy_ObjectLogicalAnd', out='?'),
),
'logical_not':
Ufunc(1, 1, None,
None,
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalNot'),
+ TD(O, f='npy_ObjectLogicalNot', out='?'),
),
'logical_or':
Ufunc(2, 1, False_,
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalOr'),
+ TD(O, f='npy_ObjectLogicalOr', out='?'),
),
'logical_xor':
Ufunc(2, 1, False_,
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cos'),
None,
- TD(inexact, f='cos', astype={'e':'f'}),
+ TD('e', f='cos', astype={'e':'f'}),
+ TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
+ TD('fdg' + cmplx, f='cos'),
TD(P, f='cos'),
),
'sin':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sin'),
None,
- TD(inexact, f='sin', astype={'e':'f'}),
+ TD('e', f='sin', astype={'e':'f'}),
+ TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
+ TD('fdg' + cmplx, f='sin'),
TD(P, f='sin'),
),
'tan':
docstrings.get('numpy.core.umath.exp'),
None,
TD('e', f='exp', astype={'e':'f'}),
- TD('f', simd=[('avx2', 'f'), ('avx512f', 'f')]),
- TD(inexact, f='exp', astype={'e':'f'}),
+ TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
+ TD('fdg' + cmplx, f='exp'),
TD(P, f='exp'),
),
'exp2':
docstrings.get('numpy.core.umath.log'),
None,
TD('e', f='log', astype={'e':'f'}),
- TD('f', simd=[('avx2', 'f'), ('avx512f', 'f')]),
- TD(inexact, f='log', astype={'e':'f'}),
+ TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
+ TD('fdg' + cmplx, f='log'),
TD(P, f='log'),
),
'log2':
docstrings.get('numpy.core.umath.sqrt'),
None,
TD('e', f='sqrt', astype={'e':'f'}),
- TD(inexactvec),
- TD(inexact, f='sqrt', astype={'e':'f'}),
+ TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
+ TD('fdg' + cmplx, f='sqrt'),
TD(P, f='sqrt'),
),
'cbrt':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.ceil'),
None,
- TD(flts, f='ceil', astype={'e':'f'}),
+ TD('e', f='ceil', astype={'e':'f'}),
+ TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
+ TD('fdg', f='ceil'),
TD(O, f='npy_ObjectCeil'),
),
'trunc':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.trunc'),
None,
- TD(flts, f='trunc', astype={'e':'f'}),
+ TD('e', f='trunc', astype={'e':'f'}),
+ TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
+ TD('fdg', f='trunc'),
TD(O, f='npy_ObjectTrunc'),
),
'fabs':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.floor'),
None,
- TD(flts, f='floor', astype={'e':'f'}),
+ TD('e', f='floor', astype={'e':'f'}),
+ TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
+ TD('fdg', f='floor'),
TD(O, f='npy_ObjectFloor'),
),
'rint':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rint'),
None,
- TD(inexact, f='rint', astype={'e':'f'}),
+ TD('e', f='rint', astype={'e':'f'}),
+ TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
+ TD('fdg' + cmplx, f='rint'),
TD(P, f='rint'),
),
'arctan2':
'isnan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isnan'),
- None,
- TD(nodatetime_or_obj, out='?'),
+ 'PyUFunc_IsFiniteTypeResolver',
+ TD(noobj, out='?'),
),
'isnat':
Ufunc(1, 1, None,
'isinf':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isinf'),
- None,
- TD(nodatetime_or_obj, out='?'),
+ 'PyUFunc_IsFiniteTypeResolver',
+ TD(noobj, out='?'),
),
'isfinite':
Ufunc(1, 1, None,
'PARAMS': textwrap.dedent("""
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
- a shape that the inputs broadcast to. If not provided or `None`,
+ a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
Returns
-------
y : ndarray or bool
- Boolean result of the logical OR operation applied to the elements
+ Boolean result of the logical AND operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not
- provided or `None`, a freshly-allocated array is returned.
+ provided or None, a freshly-allocated array is returned.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
Many of the Python string operations that have optional arguments
do not use 'None' to indicate a default value. In these cases,
- we need to remove all `None` arguments, and those following them.
+ we need to remove all None arguments, and those following them.
"""
newargs = []
for chk in args:
a : array_like of str or unicode
sep : str or unicode, optional
- If `sep` is not specified or `None`, any whitespace string
+ If `sep` is not specified or None, any whitespace string
is a separator.
maxsplit : int, optional
If `maxsplit` is given, at most `maxsplit` splits are done,
a : array_like of str or unicode
sep : str or unicode, optional
- If `sep` is not specified or `None`, any whitespace string is a
+ If `sep` is not specified or None, any whitespace string is a
separator.
maxsplit : int, optional
This constructor creates the array, using `buffer` (with `offset`
and `strides`) if it is not ``None``. If `buffer` is ``None``, then
constructs a new array with `strides` in "C order", unless both
- ``len(shape) >= 2`` and ``order='Fortran'``, in which case `strides`
+ ``len(shape) >= 2`` and ``order='F'``, in which case `strides`
is in "Fortran order".
Methods
unicode : bool, optional
When true, the resulting `chararray` can contain Unicode
characters, when false only 8-bit characters. If unicode is
- `None` and `obj` is one of the following:
+ None and `obj` is one of the following:
- a `chararray`,
- an ndarray of type `str` or `unicode`
unicode : bool, optional
When true, the resulting `chararray` can contain Unicode
characters, when false only 8-bit characters. If unicode is
- `None` and `obj` is one of the following:
+ None and `obj` is one of the following:
- a `chararray`,
- an ndarray of type `str` or 'unicode`
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
- 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
+ 'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
See Also
--------
ndarray.choose : equivalent method
+ numpy.take_along_axis : Preferable if `choices` is an array
Notes
-----
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
- argsort : Full indirect sort
+ argsort : Full indirect sort.
+ take_along_axis : Apply ``index_array`` from argpartition
+ to an array as if by calling partition.
Notes
-----
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
+ Multi-dimensional array:
+
+ >>> x = np.array([[3, 4, 2], [1, 3, 1]])
+ >>> index_array = np.argpartition(x, kth=1, axis=-1)
+ >>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1)
+ array([[2, 3, 4],
+ [1, 1, 3]])
+
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
.. versionadded:: 1.12.0
- quicksort has been changed to an introsort which will switch
- heapsort when it does not make enough progress. This makes its
- worst case O(n*log(n)).
-
- 'stable' automatically choses the best stable sorting algorithm
- for the data type being sorted. It, along with 'mergesort' is
- currently mapped to timsort or radix sort depending on the
- data type. API forward compatibility currently limits the
+ quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
+ When sorting does not make enough progress it switches to
+ `heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
+ This implementation makes quicksort O(n*log(n)) in the worst case.
+
+ 'stable' automatically chooses the best stable sorting algorithm
+ for the data type being sorted.
+ It, along with 'mergesort' is currently mapped to
+ `timsort <https://en.wikipedia.org/wiki/Timsort>`_
+ or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
+ depending on the data type.
+ API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
- default sort if none is chosen. For details of timsort, refer to
+ default sort if none is chosen. For timsort details, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
+ .. versionchanged:: 1.17.0
+
+ NaT now sorts to the end of arrays for consistency with NaN.
+
Examples
--------
>>> a = np.array([[1,4],[3,1]])
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
+ take_along_axis : Apply ``index_array`` from argsort
+ to an array as if by calling sort.
Notes
-----
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
+ take_along_axis : Apply ``np.expand_dims(index_array, axis)``
+ from argmax to an array as if by calling max.
Notes
-----
>>> np.argmax(b) # Only the first occurrence is returned.
1
+ >>> x = np.array([[4,2,3], [1,0,3]])
+ >>> index_array = np.argmax(x, axis=-1)
+ >>> # Same as np.max(x, axis=-1, keepdims=True)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
+ array([[4],
+ [3]])
+ >>> # Same as np.max(x, axis=-1)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
+ array([4, 3])
+
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
+ take_along_axis : Apply ``np.expand_dims(index_array, axis)``
+ from argmin to an array as if by calling min.
Notes
-----
>>> np.argmin(b) # Only the first occurrence is returned.
0
+ >>> x = np.array([[4,2,3], [1,0,3]])
+ >>> index_array = np.argmin(x, axis=-1)
+ >>> # Same as np.min(x, axis=-1, keepdims=True)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
+ array([[2],
+ [0]])
+ >>> # Same as np.max(x, axis=-1)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
+ array([2, 0])
+
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
Raises
------
ValueError
- If `axis` is not `None`, and an axis being squeezed is not of length 1
+ If `axis` is not None, and an axis being squeezed is not of length 1
See Also
--------
which returns a row for each non-zero element.
.. note::
- When called on a zero-d array or scalar, ``nonzero(a)`` is treated
- as ``nonzero(atleast1d(a))``.
- ..deprecated:: 1.17.0
- Use `atleast1d` explicitly if this behavior is deliberate.
+ When called on a zero-d array or scalar, ``nonzero(a)`` is treated
+ as ``nonzero(atleast1d(a))``.
+
+ .. deprecated:: 1.17.0
+
+ Use `atleast1d` explicitly if this behavior is deliberate.
Parameters
----------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
np.extract: Equivalent method when working on 1-D arrays
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Examples
--------
----------
a : array_like
Array containing elements to clip.
- a_min : scalar or array_like or `None`
- Minimum value. If `None`, clipping is not performed on lower
+ a_min : scalar or array_like or None
+ Minimum value. If None, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
- `None`.
- a_max : scalar or array_like or `None`
- Maximum value. If `None`, clipping is not performed on upper
+ None.
+ a_max : scalar or array_like or None
+ Maximum value. If None, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
- `None`. If `a_min` or `a_max` are array_like, then the three
+ None. If `a_min` or `a_max` are array_like, then the three
arrays will be broadcasted to match their shapes.
out : ndarray, optional
The results will be placed in this array. It may be the input
See Also
--------
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Examples
--------
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
- The default (`axis` = `None`) is to perform a logical OR over all
+ The default (``axis=None``) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
- See `doc.ufuncs` (Section "Output arguments") for details.
+ See `ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
- The default (`axis` = `None`) is to perform a logical AND over all
+ The default (``axis=None``) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
- will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
- "Output arguments") for more details.
+ will consist of 0.0's and 1.0's). See `ufuncs-output-type` for more
+ details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
- but the type will be cast if necessary. See `doc.ufuncs`
- (Section "Output arguments") for more details.
+ but the type will be cast if necessary. See `ufuncs-output-type` for
+ more details.
Returns
-------
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
- See `doc.ufuncs` (Section "Output arguments") for more details.
+ See `ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
- See `doc.ufuncs` (Section "Output arguments") for more details.
+ See `ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
7
"""
+ # NumPy 1.18.0, 2019-08-02
+ warnings.warn(
+ "`np.alen` is deprecated, use `len` instead",
+ DeprecationWarning, stacklevel=2)
try:
return len(a)
except TypeError:
See Also
--------
ndarray.prod : equivalent method
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Notes
-----
See Also
--------
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Notes
-----
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
- values will be cast if necessary. See `doc.ufuncs` (Section
- "Output arguments") for details.
+ values will be cast if necessary. See `ufuncs-output-type` for more
+ details.
Returns
-------
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
- -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
- to the inexact representation of decimal fractions in the IEEE
- floating point standard [1]_ and errors introduced when scaling
- by powers of ten.
+ -0.5 and 0.5 round to 0.0, etc.
+
+ ``np.around`` uses a fast but sometimes inexact algorithm to round
+ floating-point datatypes. For positive `decimals` it is equivalent to
+ ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has
+ error due to the inexact representation of decimal fractions in the IEEE
+ floating point standard [1]_ and errors introduced when scaling by powers
+ of ten. For instance, note the extra "1" in the following:
+
+ >>> np.round(56294995342131.5, 3)
+ 56294995342131.51
+
+ If your goal is to print such values with a fixed number of decimals, it is
+ preferable to use numpy's float printing routines to limit the number of
+ printed decimals:
+
+ >>> np.format_float_positional(56294995342131.5, precision=3)
+ '56294995342131.5'
+
+ The float printing routines use an accurate but much more computationally
+ demanding algorithm to compute the number of digits after the decimal
+ point.
+
+ Alternatively, Python's builtin `round` function uses a more accurate
+ but slower algorithm for 64-bit floating point values:
+
+ >>> round(56294995342131.5, 3)
+ 56294995342131.5
+ >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997
+ (16.06, 16.05)
+
References
----------
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
- See `doc.ufuncs` for details.
+ See `ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
See Also
--------
var, mean, nanmean, nanstd, nanvar
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Notes
-----
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
- the default is `float32`; for arrays of float types it is the same as
+ the default is `float64`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
See Also
--------
std, mean, nanmean, nanstd, nanvar
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Notes
-----
numpy.all : Equivalent function; see for details.
"""
return all(*args, **kwargs)
-
-
-@array_function_dispatch(_ndim_dispatcher)
-def rank(a):
- """
- Return the number of dimensions of an array.
-
- .. note::
- This function is deprecated in NumPy 1.9 to avoid confusion with
- `numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
- should be used instead.
-
- See Also
- --------
- ndim : equivalent non-deprecated function
-
- Notes
- -----
- In the old Numeric package, `rank` was the term used for the number of
- dimensions, but in NumPy `ndim` is used instead.
- """
- # 2014-04-12, 1.9
- warnings.warn(
- "`rank` is deprecated; use the `ndim` attribute or function instead. "
- "To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
- VisibleDeprecationWarning, stacklevel=3)
- return ndim(a)
import functools
import warnings
import operator
+import types
from . import numeric as _nx
from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS,
overrides.array_function_dispatch, module='numpy')
-def _index_deprecate(i, stacklevel=2):
- try:
- i = operator.index(i)
- except TypeError:
- msg = ("object of type {} cannot be safely interpreted as "
- "an integer.".format(type(i)))
- i = int(i)
- stacklevel += 1
- warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel)
- return i
-
-
def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
dtype=None, axis=None):
return (start, stop)
>>> plt.show()
"""
- # 2016-02-25, 1.12
- num = _index_deprecate(num)
+ try:
+ num = operator.index(num)
+ except TypeError:
+ raise TypeError(
+ "object of type {} cannot be safely interpreted as an integer."
+ .format(type(num)))
+
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# from overriding what class is produced, and thus prevents, e.g. use of Quantities,
# see gh-7142. Hence, we multiply in place only for standard scalar types.
_mult_inplace = _nx.isscalar(delta)
- if num > 1:
+ if div > 0:
step = delta / div
if _nx.any(step == 0):
# Special handling for denormal numbers, gh-5437
else:
y = y * step
else:
- # 0 and 1 item long sequences have an undefined step
+ # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0)
+ # have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
return result.astype(dtype, copy=False)
-#always succeed
-def _add_docstring(obj, doc):
+def _needs_add_docstring(obj):
+ """
+ Returns true if the only way to set the docstring of `obj` from python is
+ via add_docstring.
+
+ This function errs on the side of being overly conservative.
+ """
+ Py_TPFLAGS_HEAPTYPE = 1 << 9
+
+ if isinstance(obj, (types.FunctionType, types.MethodType, property)):
+ return False
+
+ if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:
+ return False
+
+ return True
+
+
+def _add_docstring(obj, doc, warn_on_python):
+ if warn_on_python and not _needs_add_docstring(obj):
+ warnings.warn(
+ "add_newdoc was used on a pure-python object {}. "
+ "Prefer to attach it directly to the source."
+ .format(obj),
+ UserWarning,
+ stacklevel=3)
try:
add_docstring(obj, doc)
except Exception:
pass
-def add_newdoc(place, obj, doc):
+def add_newdoc(place, obj, doc, warn_on_python=True):
"""
- Adds documentation to obj which is in module place.
+ Add documentation to an existing object, typically one defined in C
- If doc is a string add it to obj as a docstring
+ The purpose is to allow easier editing of the docstrings without requiring
+ a re-compile. This exists primarily for internal use within numpy itself.
- If doc is a tuple, then the first element is interpreted as
- an attribute of obj and the second as the docstring
- (method, docstring)
-
- If doc is a list, then each element of the list should be a
- sequence of length two --> [(method1, docstring1),
- (method2, docstring2), ...]
+ Parameters
+ ----------
+ place : str
+ The absolute name of the module to import from
+ obj : str
+ The name of the object to add documentation to, typically a class or
+ function name
+ doc : {str, Tuple[str, str], List[Tuple[str, str]]}
+ If a string, the documentation to apply to `obj`
+
+ If a tuple, then the first element is interpreted as an attribute of
+ `obj` and the second as the docstring to apply - ``(method, docstring)``
+
+ If a list, then each element of the list should be a tuple of length
+ two - ``[(method1, docstring1), (method2, docstring2), ...]``
+ warn_on_python : bool
+ If True, the default, emit `UserWarning` if this is used to attach
+ documentation to a pure-python object.
+ Notes
+ -----
This routine never raises an error if the docstring can't be written, but
will raise an error if the object being documented does not exist.
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
+
+ Since this function grabs the ``char *`` from a c-level str object and puts
+ it into the ``tp_doc`` slot of the type of `obj`, it violates a number of
+ C-API best-practices, by:
+
+ - modifying a `PyTypeObject` after calling `PyType_Ready`
+ - calling `Py_INCREF` on the str and losing the reference, so the str
+ will never be released
+
+ If possible it should be avoided.
"""
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
- _add_docstring(new, doc.strip())
+ _add_docstring(new, doc.strip(), warn_on_python)
elif isinstance(doc, tuple):
- _add_docstring(getattr(new, doc[0]), doc[1].strip())
+ attr, docstring = doc
+ _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
elif isinstance(doc, list):
- for val in doc:
- _add_docstring(getattr(new, val[0]), val[1].strip())
+ for attr, docstring in doc:
+ _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
* type of the function which translates a set of coordinates to a
* pointer to the data
*/
-typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*);
+typedef char* (*npy_iter_get_dataptr_t)(
+ PyArrayIterObject* iter, const npy_intp*);
struct PyArrayIterObject_tag {
PyObject_HEAD
#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num)
#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL)
#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL)
-#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0)
+#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \
+ !PyDataType_HASFIELDS(dtype))
#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0)
#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj))
#else
#define NPY_GCC_TARGET_AVX
#endif
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS
+#define HAVE_ATTRIBUTE_TARGET_FMA
+#define NPY_GCC_TARGET_FMA __attribute__((target("avx2,fma")))
+#endif
+
#if defined HAVE_ATTRIBUTE_TARGET_AVX2 && defined HAVE_LINK_AVX2
#define NPY_GCC_TARGET_AVX2 __attribute__((target("avx2")))
-#elif defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS
-#define NPY_GCC_TARGET_AVX2 __attribute__((target("avx2")))
#else
#define NPY_GCC_TARGET_AVX2
#endif
#define NPY_COEFF_Q3_LOGf 9.864942958519418960339e-01f
#define NPY_COEFF_Q4_LOGf 1.546476374983906719538e-01f
#define NPY_COEFF_Q5_LOGf 5.875095403124574342950e-03f
-
+/*
+ * Constants used in vector implementation of sinf/cosf(x)
+ */
+#define NPY_TWO_O_PIf 0x1.45f306p-1f
+#define NPY_CODY_WAITE_PI_O_2_HIGHf -0x1.921fb0p+00f
+#define NPY_CODY_WAITE_PI_O_2_MEDf -0x1.5110b4p-22f
+#define NPY_CODY_WAITE_PI_O_2_LOWf -0x1.846988p-48f
+#define NPY_COEFF_INVF0_COSINEf 0x1.000000p+00f
+#define NPY_COEFF_INVF2_COSINEf -0x1.000000p-01f
+#define NPY_COEFF_INVF4_COSINEf 0x1.55553cp-05f
+#define NPY_COEFF_INVF6_COSINEf -0x1.6c06dcp-10f
+#define NPY_COEFF_INVF8_COSINEf 0x1.98e616p-16f
+#define NPY_COEFF_INVF3_SINEf -0x1.555556p-03f
+#define NPY_COEFF_INVF5_SINEf 0x1.11119ap-07f
+#define NPY_COEFF_INVF7_SINEf -0x1.a06bbap-13f
+#define NPY_COEFF_INVF9_SINEf 0x1.7d3bbcp-19f
/*
* Integer functions.
*/
NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b);
NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b);
+NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b);
+NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b);
+NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b);
+NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b);
+NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b);
+NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b);
+
+NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b);
+NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b);
+NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b);
+NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b);
+NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b);
+NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b);
+NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b);
+NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b);
+NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b);
+NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b);
+
+/*
+ * avx function has a common API for both sin & cos. This enum is used to
+ * distinguish between the two
+ */
+typedef enum {
+ npy_compute_sin,
+ npy_compute_cos
+} NPY_TRIG_OP;
+
/*
* C99 double math funcs
*/
#define NPY_1_13_API_VERSION 0x00000008
#define NPY_1_14_API_VERSION 0x00000008
#define NPY_1_15_API_VERSION 0x00000008
+#define NPY_1_16_API_VERSION 0x00000008
+#define NPY_1_17_API_VERSION 0x00000008
+#define NPY_1_18_API_VERSION 0x00000008
#endif
--- /dev/null
+#ifndef _RANDOM_BITGEN_H
+#define _RANDOM_BITGEN_H
+
+#pragma once
+#include <stddef.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+/* Must match the declaration in numpy/random/<any>.pxd */
+
+typedef struct bitgen {
+ void *state;
+ uint64_t (*next_uint64)(void *st);
+ uint32_t (*next_uint32)(void *st);
+ double (*next_double)(void *st);
+ uint64_t (*next_raw)(void *st);
+} bitgen_t;
+
+
+#endif
--- /dev/null
+#ifndef _RANDOMDGEN__DISTRIBUTIONS_H_
+#define _RANDOMDGEN__DISTRIBUTIONS_H_
+
+#include "Python.h"
+#include "numpy/npy_common.h"
+#include <stddef.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "numpy/npy_math.h"
+#include "numpy/random/bitgen.h"
+
+/*
+ * RAND_INT_TYPE is used to share integer generators with RandomState which
+ * used long in place of int64_t. If changing a distribution that uses
+ * RAND_INT_TYPE, then the original unmodified copy must be retained for
+ * use in RandomState by copying to the legacy distributions source file.
+ */
+#ifdef NP_RANDOM_LEGACY
+#define RAND_INT_TYPE long
+#define RAND_INT_MAX LONG_MAX
+#else
+#define RAND_INT_TYPE int64_t
+#define RAND_INT_MAX INT64_MAX
+#endif
+
+#ifdef _MSC_VER
+#define DECLDIR __declspec(dllexport)
+#else
+#define DECLDIR extern
+#endif
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? x : y)
+#define MAX(x, y) (((x) > (y)) ? x : y)
+#endif
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846264338328
+#endif
+
+typedef struct s_binomial_t {
+ int has_binomial; /* !=0: following parameters initialized for binomial */
+ double psave;
+ RAND_INT_TYPE nsave;
+ double r;
+ double q;
+ double fm;
+ RAND_INT_TYPE m;
+ double p1;
+ double xm;
+ double xl;
+ double xr;
+ double c;
+ double laml;
+ double lamr;
+ double p2;
+ double p3;
+ double p4;
+} binomial_t;
+
+DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state);
+DECLDIR double random_standard_uniform(bitgen_t *bitgen_state);
+DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *);
+
+DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state);
+DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state);
+DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state);
+DECLDIR uint64_t random_uint(bitgen_t *bitgen_state);
+
+DECLDIR double random_standard_exponential(bitgen_t *bitgen_state);
+DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state);
+DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *);
+DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *);
+
+DECLDIR double random_standard_normal(bitgen_t *bitgen_state);
+DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state);
+DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *);
+DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape);
+DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape);
+
+DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale);
+
+DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale);
+DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale);
+
+DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale);
+DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range);
+DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b);
+DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df);
+DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden);
+DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state);
+DECLDIR double random_pareto(bitgen_t *bitgen_state, double a);
+DECLDIR double random_weibull(bitgen_t *bitgen_state, double a);
+DECLDIR double random_power(bitgen_t *bitgen_state, double a);
+DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma);
+DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode);
+DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df);
+DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
+ double nonc);
+DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
+ double dfden, double nonc);
+DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale);
+DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa);
+DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode,
+ double right);
+
+DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam);
+DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n,
+ double p);
+
+DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial);
+
+DECLDIR RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p);
+DECLDIR RAND_INT_TYPE random_geometric(bitgen_t *bitgen_state, double p);
+DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a);
+DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state,
+ int64_t good, int64_t bad, int64_t sample);
+DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max);
+
+/* Generate random uint64 numbers in closed interval [off, off + rng]. */
+DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off,
+ uint64_t rng, uint64_t mask,
+ bool use_masked);
+
+/* Generate random uint32 numbers in closed interval [off, off + rng]. */
+DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
+ uint32_t off, uint32_t rng,
+ uint32_t mask, bool use_masked,
+ int *bcnt, uint32_t *buf);
+DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
+ uint16_t off, uint16_t rng,
+ uint16_t mask, bool use_masked,
+ int *bcnt, uint32_t *buf);
+DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off,
+ uint8_t rng, uint8_t mask,
+ bool use_masked, int *bcnt,
+ uint32_t *buf);
+DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off,
+ npy_bool rng, npy_bool mask,
+ bool use_masked, int *bcnt,
+ uint32_t *buf);
+
+DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off,
+ uint64_t rng, npy_intp cnt,
+ bool use_masked, uint64_t *out);
+DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off,
+ uint32_t rng, npy_intp cnt,
+ bool use_masked, uint32_t *out);
+DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off,
+ uint16_t rng, npy_intp cnt,
+ bool use_masked, uint16_t *out);
+DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off,
+ uint8_t rng, npy_intp cnt,
+ bool use_masked, uint8_t *out);
+DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off,
+ npy_bool rng, npy_intp cnt,
+ bool use_masked, npy_bool *out);
+
+DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix,
+ double *pix, npy_intp d, binomial_t *binomial);
+
+/* multivariate hypergeometric, "count" method */
+DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates);
+
+/* multivariate hypergeometric, "marginals" method */
+DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates);
+
+/* Common to legacy-distributions.c and distributions.c but not exported */
+
+RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state,
+ RAND_INT_TYPE n,
+ double p,
+ binomial_t *binomial);
+RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
+ RAND_INT_TYPE n,
+ double p,
+ binomial_t *binomial);
+double random_loggam(double x);
+static NPY_INLINE double next_double(bitgen_t *bitgen_state) {
+ return bitgen_state->next_double(bitgen_state->state);
+}
+
+#endif
#define UFUNC_PYVALS_NAME "UFUNC_PYVALS"
-#define UFUNC_CHECK_ERROR(arg) \
- do {if ((((arg)->obj & UFUNC_OBJ_NEEDS_API) && PyErr_Occurred()) || \
- ((arg)->errormask && \
- PyUFunc_checkfperr((arg)->errormask, \
- (arg)->errobj, \
- &(arg)->first))) \
- goto fail;} while (0)
-
/*
* THESE MACROS ARE DEPRECATED.
* Use npy_set_floatstatus_* in the npymath library.
#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW
#define UFUNC_FPE_INVALID NPY_FPE_INVALID
-#define UFUNC_CHECK_STATUS(ret) \
- { \
- ret = npy_clear_floatstatus(); \
- }
#define generate_divbyzero_error() npy_set_floatstatus_divbyzero()
#define generate_overflow_error() npy_set_floatstatus_overflow()
+++ /dev/null
-"""Defines a multi-dimensional array and useful procedures for Numerical computation.
-
-Functions
-
-- array - NumPy Array construction
-- zeros - Return an array of all zeros
-- empty - Return an uninitialized array
-- shape - Return shape of sequence or array
-- rank - Return number of dimensions
-- size - Return number of elements in entire array or a
- certain dimension
-- fromstring - Construct array from (byte) string
-- take - Select sub-arrays using sequence of indices
-- put - Set sub-arrays using sequence of 1-D indices
-- putmask - Set portion of arrays using a mask
-- reshape - Return array with new shape
-- repeat - Repeat elements of array
-- choose - Construct new array from indexed array tuple
-- correlate - Correlate two 1-d arrays
-- searchsorted - Search for element in 1-d array
-- sum - Total sum over a specified dimension
-- average - Average, possibly weighted, over axis or array.
-- cumsum - Cumulative sum over a specified dimension
-- product - Total product over a specified dimension
-- cumproduct - Cumulative product over a specified dimension
-- alltrue - Logical and over an entire axis
-- sometrue - Logical or over an entire axis
-- allclose - Tests if sequences are essentially equal
-
-More Functions:
-
-- arange - Return regularly spaced array
-- asarray - Guarantee NumPy array
-- convolve - Convolve two 1-d arrays
-- swapaxes - Exchange axes
-- concatenate - Join arrays together
-- transpose - Permute axes
-- sort - Sort elements of array
-- argsort - Indices of sorted array
-- argmax - Index of largest value
-- argmin - Index of smallest value
-- inner - Innerproduct of two arrays
-- dot - Dot product (matrix multiplication)
-- outer - Outerproduct of two arrays
-- resize - Return array with arbitrary new shape
-- indices - Tuple of indices
-- fromfunction - Construct array from universal function
-- diagonal - Return diagonal array
-- trace - Trace of array
-- dump - Dump array to file object (pickle)
-- dumps - Return pickled string representing data
-- load - Return array stored in file object
-- loads - Return array from pickled string
-- ravel - Return array as 1-D
-- nonzero - Indices of nonzero elements for 1-D array
-- shape - Shape of array
-- where - Construct array from binary result
-- compress - Elements of array where condition is true
-- clip - Clip array between two values
-- ones - Array of all ones
-- identity - 2-D identity array (matrix)
-
-(Universal) Math Functions
-
- add logical_or exp
- subtract logical_xor log
- multiply logical_not log10
- divide maximum sin
- divide_safe minimum sinh
- conjugate bitwise_and sqrt
- power bitwise_or tan
- absolute bitwise_xor tanh
- negative invert ceil
- greater left_shift fabs
- greater_equal right_shift floor
- less arccos arctan2
- less_equal arcsin fmod
- equal arctan hypot
- not_equal cos around
- logical_and cosh sign
- arccosh arcsinh arctanh
-
-"""
-from __future__ import division, absolute_import, print_function
-
-depends = ['testing']
-global_symbols = ['*']
from . import overrides
from . import umath
+from . import shape_base
from .overrides import set_module
from .umath import (multiply, invert, sin, PINF, NAN)
from . import numerictypes
overrides.array_function_dispatch, module='numpy')
-def loads(*args, **kwargs):
- # NumPy 1.15.0, 2017-12-10
- warnings.warn(
- "np.core.numeric.loads is deprecated, use pickle.loads instead",
- DeprecationWarning, stacklevel=2)
- return pickle.loads(*args, **kwargs)
-
-
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian',
'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
- 'isclose', 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones',
+ 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones',
'identity', 'allclose', 'compare_chararrays', 'putmask',
'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
fill_value : scalar
Fill value.
dtype : data-type, optional
- The desired data-type for the array The default, `None`, means
+ The desired data-type for the array The default, None, means
`np.array(fill_value).dtype`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
- >>> np.isfortran(np.array([1, 2], order='FORTRAN'))
+ >>> np.isfortran(np.array([1, 2], order='F'))
False
"""
Returns
-------
- index_array : ndarray
+ index_array : (N, a.ndim) ndarray
Indices of elements that are non-zero. Indices are grouped by element.
+ This array will have shape ``(N, a.ndim)`` where ``N`` is the number of
+ non-zero items.
See Also
--------
Notes
-----
- ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
+ ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``,
+ but produces a result of the correct shape for a 0D array.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``nonzero(a)`` instead.
[1, 2]])
"""
+ # nonzero does not behave well on 0d, so promote to 1d
+ if np.ndim(a) == 0:
+ a = shape_base.atleast_1d(a)
+ # then remove the added dimension
+ return argwhere(a)[:,:0]
return transpose(nonzero(a))
Returns
-------
output : ndarray
- The tensor dot product of the input.
+ The tensor dot product of the input.
See Also
--------
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
+ The shape of the result consists of the non-contracted axes of the
+ first tensor, followed by the non-contracted axes of the second.
+
Examples
--------
A "traditional" example:
@set_module('numpy')
-def isscalar(num):
+def isscalar(element):
"""
- Returns True if the type of `num` is a scalar type.
+ Returns True if the type of `element` is a scalar type.
Parameters
----------
- num : any
+ element : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
- True if `num` is a scalar type, False if it is not.
+ True if `element` is a scalar type, False if it is not.
See Also
--------
Notes
-----
- In almost all cases ``np.ndim(x) == 0`` should be used instead of this
- function, as that will also return true for 0d arrays. This is how
- numpy overloads functions in the style of the ``dx`` arguments to `gradient`
- and the ``bins`` argument to `histogram`. Some key differences:
+ If you need a stricter way to identify a *numerical* scalar, use
+ ``isinstance(x, numbers.Number)``, as that returns ``False`` for most
+ non-numerical elements such as strings.
+
+ In most cases ``np.ndim(x) == 0`` should be used instead of this function,
+ as that will also return true for 0d arrays. This is how numpy overloads
+ functions in the style of the ``dx`` arguments to `gradient` and the ``bins``
+ argument to `histogram`. Some key differences:
+--------------------------------------+---------------+-------------------+
| x |``isscalar(x)``|``np.ndim(x) == 0``|
True
"""
- return (isinstance(num, generic)
- or type(num) in ScalarType
- or isinstance(num, numbers.Number))
+ return (isinstance(element, generic)
+ or type(element) in ScalarType
+ or isinstance(element, numbers.Number))
@set_module('numpy')
"will raise an error in the future.", DeprecationWarning,
stacklevel=3)
+ # Ensure that num is a Python integer to avoid overflow or unwanted
+ # casts to floating point.
+ num = operator.index(num)
+
if num == 0:
return '0' * (width or 1)
return ''.join(reversed(res or '0'))
-def load(file):
- """
- Wrapper around cPickle.load which accepts either a file-like object or
- a filename.
-
- Note that the NumPy binary format is not based on pickle/cPickle anymore.
- For details on the preferred way of loading and saving files, see `load`
- and `save`.
-
- See Also
- --------
- load, save
-
- """
- # NumPy 1.15.0, 2017-12-10
- warnings.warn(
- "np.core.numeric.load is deprecated, use pickle.load instead",
- DeprecationWarning, stacklevel=2)
- if isinstance(file, type("")):
- with open(file, "rb") as file_pointer:
- return pickle.load(file_pointer)
- return pickle.load(file)
-
-
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
`atol` are added together to compare against the absolute difference
between `a` and `b`.
- If either array contains one or more NaNs, False is returned.
- Infs are treated as equal if they are in the same place and of the same
- sign in both arrays.
+ NaNs are treated as equal if they are in the same place and if
+ ``equal_nan=True``. Infs are treated as equal if they are in the same
+ place and of the same sign in both arrays.
Parameters
----------
Examples
--------
- >>> for sctype in [np.int32, np.double, np.complex, np.string_, np.ndarray]:
+ >>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]:
... print(np.sctype2char(sctype))
l # may vary
d
return decorator
+
+# Call textwrap.dedent here instead of in the function so as to avoid
+# calling dedent multiple times on the same text
+_wrapped_func_source = textwrap.dedent("""
+ @functools.wraps(implementation)
+ def {name}(*args, **kwargs):
+ relevant_args = dispatcher(*args, **kwargs)
+ return implement_array_function(
+ implementation, {name}, relevant_args, args, kwargs)
+ """)
+
+
def array_function_dispatch(dispatcher, module=None, verify=True,
docs_from_dispatcher=False):
"""Decorator for adding dispatch with the __array_function__ protocol.
# more interpettable name. Otherwise, the original function does not
# show up at all in many cases, e.g., if it's written in C or if the
# dispatcher gets an invalid keyword argument.
- source = textwrap.dedent("""
- @functools.wraps(implementation)
- def {name}(*args, **kwargs):
- relevant_args = dispatcher(*args, **kwargs)
- return implement_array_function(
- implementation, {name}, relevant_args, args, kwargs)
- """).format(name=implementation.__name__)
+ source = _wrapped_func_source.format(name=implementation.__name__)
source_object = compile(
source, filename='<__array_function__ internals>', mode='exec')
self._createdescr(byteorder)
self.dtype = self._descr
- def _parseFormats(self, formats, aligned=0):
+ def _parseFormats(self, formats, aligned=False):
""" Parse the field formats """
if formats is None:
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
- from numpy.distutils.system_info import get_info
+ from numpy.distutils.system_info import get_info, dict_append
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
rep = check_long_double_representation(config_cmd)
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
+ if check_for_right_shift_internal_compiler_error(config_cmd):
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_LONG_right_shift')
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONG_right_shift')
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift')
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift')
+
# Py3K check
- if sys.version_info[0] == 3:
+ if sys.version_info[0] >= 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
#endif
"""))
- print('File:', target)
+ log.info('File: %s' % target)
with open(target) as target_f:
- print(target_f.read())
- print('EOF')
+ log.info(target_f.read())
+ log.info('EOF')
else:
mathlibs = []
with open(target) as target_f:
"""))
# Dump the numpyconfig.h header to stdout
- print('File: %s' % target)
+ log.info('File: %s' % target)
with open(target) as target_f:
- print(target_f.read())
- print('EOF')
+ log.info(target_f.read())
+ log.info('EOF')
config.add_data_files((header_dir, target))
return target
join(codegen_dir, 'genapi.py'),
]
- #######################################################################
- # dummy module #
- #######################################################################
-
- # npymath needs the config.h and numpyconfig.h files to be generated, but
- # build_clib cannot handle generate_config_h and generate_numpyconfig_h
- # (don't ask). Because clib are generated before extensions, we have to
- # explicitly add an extension which has generate_config_h and
- # generate_numpyconfig_h as sources *before* adding npymath.
-
- config.add_extension('_dummy',
- sources=[join('src', 'dummymodule.c'),
- generate_config_h,
- generate_numpyconfig_h,
- generate_numpy_api]
- )
-
#######################################################################
# npymath library #
#######################################################################
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
+ # rerun the failing command in verbose mode
+ config_cmd.compiler.verbose = True
+ config_cmd.try_link('int main(void) { return 0;}')
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
join('src', 'common', 'numpyos.c'),
]
- blas_info = get_info('blas_opt', 0)
- if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
+ if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
+ blas_info = get_info('blas_ilp64_opt', 2)
+ else:
+ blas_info = get_info('blas_opt', 0)
+
+ have_blas = blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', [])
+
+ if have_blas:
extra_info = blas_info
# These files are also in MANIFEST.in so that they are always in
# the source distribution independently of HAVE_CBLAS.
import warnings
import copy
import binascii
+import textwrap
from numpy.distutils.misc_util import mingw32
#-------------------
# How to change C_API_VERSION ?
# - increase C_API_VERSION value
-# - record the hash for the new C API with the script cversions.py
+# - record the hash for the new C API with the cversions.py script
# and add the hash to cversions.txt
# The hash values are used to remind developers when the C API number was not
# updated - generates a MismatchCAPIWarning warning which is turned into an
# codegen_dir have been updated without the API version being
# updated. Any modification in those .txt files should be reflected
# in the api and eventually abi versions.
- # To compute the checksum of the current API, use
- # code_generators/cversions.py script
+ # To compute the checksum of the current API, use numpy/core/cversions.py
if not curapi_hash == api_hash:
msg = ("API mismatch detected, the C API version "
"numbers have to be updated. Current C api version is %d, "
- "with checksum %s, but recorded checksum for C API version %d in "
- "codegen_dir/cversions.txt is %s. If functions were added in the "
- "C API, you have to update C_API_VERSION in %s."
+ "with checksum %s, but recorded checksum for C API version %d "
+ "in core/codegen_dir/cversions.txt is %s. If functions were "
+ "added in the C API, you have to update C_API_VERSION in %s."
)
warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
__file__),
# gcc 4.8.4 support attributes but not with intrisics
# tested via "#include<%s> int %s %s(void *){code; return 0;};" % (header, attribute, name, code)
# function name will be converted to HAVE_<upper-case-name> preprocessor macro
-OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS = [('__attribute__((target("avx2")))',
+OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS = [('__attribute__((target("avx2,fma")))',
'attribute_target_avx2_with_intrinsics',
- '__m256 temp = _mm256_set1_ps(1.0)',
+ '__m256 temp = _mm256_set1_ps(1.0); temp = \
+ _mm256_fmadd_ps(temp, temp, temp)',
'immintrin.h'),
('__attribute__((target("avx512f")))',
'attribute_target_avx512f_with_intrinsics',
else:
# We never detected the after_sequence
raise ValueError("Could not lock sequences (%s)" % saw)
+
+
+def check_for_right_shift_internal_compiler_error(cmd):
+ """
+ On our arm CI, this fails with an internal compilation error
+
+ The failure looks like the following, and can be reproduced on ARM64 GCC 5.4:
+
+ <source>: In function 'right_shift':
+ <source>:4:20: internal compiler error: in expand_shift_1, at expmed.c:2349
+ ip1[i] = ip1[i] >> in2;
+ ^
+ Please submit a full bug report,
+ with preprocessed source if appropriate.
+ See <http://gcc.gnu.org/bugs.html> for instructions.
+ Compiler returned: 1
+
+ This function returns True if this compiler bug is present, and we need to
+ turn off optimization for the function
+ """
+ cmd._check_compiler()
+ has_optimize = cmd.try_compile(textwrap.dedent("""\
+ __attribute__((optimize("O3"))) void right_shift() {}
+ """), None, None)
+ if not has_optimize:
+ return False
+
+ no_err = cmd.try_compile(textwrap.dedent("""\
+ typedef long the_type; /* fails also for unsigned and long long */
+ __attribute__((optimize("O3"))) void right_shift(the_type in2, the_type *ip1, int n) {
+ for (int i = 0; i < n; i++) {
+ if (in2 < (the_type)sizeof(the_type) * 8) {
+ ip1[i] = ip1[i] >> in2;
+ }
+ }
+ }
+ """), None, None)
+ return not no_err
from . import numeric as _nx
from . import overrides
-from .numeric import array, asanyarray, newaxis
+from ._asarray import array, asanyarray
from .multiarray import normalize_axis_index
+from . import fromnumeric as _from_nx
array_function_dispatch = functools.partial(
if ary.ndim == 0:
result = ary.reshape(1, 1)
elif ary.ndim == 1:
- result = ary[newaxis, :]
+ result = ary[_nx.newaxis, :]
else:
result = ary
res.append(result)
if ary.ndim == 0:
result = ary.reshape(1, 1, 1)
elif ary.ndim == 1:
- result = ary[newaxis, :, newaxis]
+ result = ary[_nx.newaxis, :, _nx.newaxis]
elif ary.ndim == 2:
- result = ary[:, :, newaxis]
+ result = ary[:, :, _nx.newaxis]
else:
result = ary
res.append(result)
# Internal functions to eliminate the overhead of repeated dispatch in one of
# the two possible paths inside np.block.
# Use getattr to protect against __array_function__ being disabled.
-_size = getattr(_nx.size, '__wrapped__', _nx.size)
-_ndim = getattr(_nx.ndim, '__wrapped__', _nx.ndim)
-_concatenate = getattr(_nx.concatenate, '__wrapped__', _nx.concatenate)
+_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
+_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
+_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate)
def _block_format_index(index):
first_index : list of int
The full index of an element from the bottom of the nesting in
`arrays`. If any element at the bottom is an empty list, this will
- refer to it, and the last index along the empty axis will be `None`.
+ refer to it, and the last index along the empty axis will be None.
max_arr_ndim : int
The maximum of the ndims of the arrays nested in `arrays`.
final_size: int
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
- to existing slice tuple without creating a new tuple everytime.
+ to existing slice tuple without creating a new tuple every time.
"""
# Cache a result that will be reused.
* check whether __array_ufunc__ equals None.
*/
attr = PyArray_LookupSpecial(other, "__array_ufunc__");
- if (attr) {
+ if (attr != NULL) {
defer = !inplace && (attr == Py_None);
Py_DECREF(attr);
return defer;
}
+ else if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
/*
* Otherwise, we need to check for the legacy __array_priority__. But if
* other.__class__ is a subtype of self.__class__, then it's already had
static void
gemm(int typenum, enum CBLAS_ORDER order,
enum CBLAS_TRANSPOSE transA, enum CBLAS_TRANSPOSE transB,
- int m, int n, int k,
- PyArrayObject *A, int lda, PyArrayObject *B, int ldb, PyArrayObject *R)
+ npy_intp m, npy_intp n, npy_intp k,
+ PyArrayObject *A, npy_intp lda, PyArrayObject *B, npy_intp ldb, PyArrayObject *R)
{
const void *Adata = PyArray_DATA(A), *Bdata = PyArray_DATA(B);
void *Rdata = PyArray_DATA(R);
- int ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
+ npy_intp ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
switch (typenum) {
case NPY_DOUBLE:
- cblas_dgemm(order, transA, transB, m, n, k, 1.,
+ CBLAS_FUNC(cblas_dgemm)(order, transA, transB, m, n, k, 1.,
Adata, lda, Bdata, ldb, 0., Rdata, ldc);
break;
case NPY_FLOAT:
- cblas_sgemm(order, transA, transB, m, n, k, 1.f,
+ CBLAS_FUNC(cblas_sgemm)(order, transA, transB, m, n, k, 1.f,
Adata, lda, Bdata, ldb, 0.f, Rdata, ldc);
break;
case NPY_CDOUBLE:
- cblas_zgemm(order, transA, transB, m, n, k, oneD,
+ CBLAS_FUNC(cblas_zgemm)(order, transA, transB, m, n, k, oneD,
Adata, lda, Bdata, ldb, zeroD, Rdata, ldc);
break;
case NPY_CFLOAT:
- cblas_cgemm(order, transA, transB, m, n, k, oneF,
+ CBLAS_FUNC(cblas_cgemm)(order, transA, transB, m, n, k, oneF,
Adata, lda, Bdata, ldb, zeroF, Rdata, ldc);
break;
}
*/
static void
gemv(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
- PyArrayObject *A, int lda, PyArrayObject *X, int incX,
+ PyArrayObject *A, npy_intp lda, PyArrayObject *X, npy_intp incX,
PyArrayObject *R)
{
const void *Adata = PyArray_DATA(A), *Xdata = PyArray_DATA(X);
void *Rdata = PyArray_DATA(R);
- int m = PyArray_DIM(A, 0), n = PyArray_DIM(A, 1);
+ npy_intp m = PyArray_DIM(A, 0), n = PyArray_DIM(A, 1);
switch (typenum) {
case NPY_DOUBLE:
- cblas_dgemv(order, trans, m, n, 1., Adata, lda, Xdata, incX,
+ CBLAS_FUNC(cblas_dgemv)(order, trans, m, n, 1., Adata, lda, Xdata, incX,
0., Rdata, 1);
break;
case NPY_FLOAT:
- cblas_sgemv(order, trans, m, n, 1.f, Adata, lda, Xdata, incX,
+ CBLAS_FUNC(cblas_sgemv)(order, trans, m, n, 1.f, Adata, lda, Xdata, incX,
0.f, Rdata, 1);
break;
case NPY_CDOUBLE:
- cblas_zgemv(order, trans, m, n, oneD, Adata, lda, Xdata, incX,
+ CBLAS_FUNC(cblas_zgemv)(order, trans, m, n, oneD, Adata, lda, Xdata, incX,
zeroD, Rdata, 1);
break;
case NPY_CFLOAT:
- cblas_cgemv(order, trans, m, n, oneF, Adata, lda, Xdata, incX,
+ CBLAS_FUNC(cblas_cgemv)(order, trans, m, n, oneF, Adata, lda, Xdata, incX,
zeroF, Rdata, 1);
break;
}
*/
static void
syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
- int n, int k,
- PyArrayObject *A, int lda, PyArrayObject *R)
+ npy_intp n, npy_intp k,
+ PyArrayObject *A, npy_intp lda, PyArrayObject *R)
{
const void *Adata = PyArray_DATA(A);
void *Rdata = PyArray_DATA(R);
- int ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
+ npy_intp ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
npy_intp i;
npy_intp j;
switch (typenum) {
case NPY_DOUBLE:
- cblas_dsyrk(order, CblasUpper, trans, n, k, 1.,
+ CBLAS_FUNC(cblas_dsyrk)(order, CblasUpper, trans, n, k, 1.,
Adata, lda, 0., Rdata, ldc);
for (i = 0; i < n; i++) {
}
break;
case NPY_FLOAT:
- cblas_ssyrk(order, CblasUpper, trans, n, k, 1.f,
+ CBLAS_FUNC(cblas_ssyrk)(order, CblasUpper, trans, n, k, 1.f,
Adata, lda, 0.f, Rdata, ldc);
for (i = 0; i < n; i++) {
}
break;
case NPY_CDOUBLE:
- cblas_zsyrk(order, CblasUpper, trans, n, k, oneD,
+ CBLAS_FUNC(cblas_zsyrk)(order, CblasUpper, trans, n, k, oneD,
Adata, lda, zeroD, Rdata, ldc);
for (i = 0; i < n; i++) {
}
break;
case NPY_CFLOAT:
- cblas_csyrk(order, CblasUpper, trans, n, k, oneF,
+ CBLAS_FUNC(cblas_csyrk)(order, CblasUpper, trans, n, k, oneF,
Adata, lda, zeroF, Rdata, ldc);
for (i = 0; i < n; i++) {
PyArrayObject *out)
{
PyArrayObject *result = NULL, *out_buf = NULL;
- int j, lda, ldb;
+ npy_intp j, lda, ldb;
npy_intp l;
int nd;
npy_intp ap1stride = 0;
*((double *)PyArray_DATA(ap1));
}
else if (ap1shape != _matrix) {
- cblas_daxpy(l,
+ CBLAS_FUNC(cblas_daxpy)(l,
*((double *)PyArray_DATA(ap2)),
(double *)PyArray_DATA(ap1),
ap1stride/sizeof(double),
(double *)PyArray_DATA(out_buf), 1);
}
else {
- int maxind, oind, i, a1s, outs;
+ int maxind, oind;
+ npy_intp i, a1s, outs;
char *ptr, *optr;
double val;
a1s = PyArray_STRIDE(ap1, maxind) / sizeof(double);
outs = PyArray_STRIDE(out_buf, maxind) / sizeof(double);
for (i = 0; i < PyArray_DIM(ap1, oind); i++) {
- cblas_daxpy(l, val, (double *)ptr, a1s,
+ CBLAS_FUNC(cblas_daxpy)(l, val, (double *)ptr, a1s,
(double *)optr, outs);
ptr += PyArray_STRIDE(ap1, oind);
optr += PyArray_STRIDE(out_buf, oind);
res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real;
}
else if (ap1shape != _matrix) {
- cblas_zaxpy(l,
+ CBLAS_FUNC(cblas_zaxpy)(l,
(double *)PyArray_DATA(ap2),
(double *)PyArray_DATA(ap1),
ap1stride/sizeof(npy_cdouble),
(double *)PyArray_DATA(out_buf), 1);
}
else {
- int maxind, oind, i, a1s, outs;
+ int maxind, oind;
+ npy_intp i, a1s, outs;
char *ptr, *optr;
double *pval;
a1s = PyArray_STRIDE(ap1, maxind) / sizeof(npy_cdouble);
outs = PyArray_STRIDE(out_buf, maxind) / sizeof(npy_cdouble);
for (i = 0; i < PyArray_DIM(ap1, oind); i++) {
- cblas_zaxpy(l, pval, (double *)ptr, a1s,
+ CBLAS_FUNC(cblas_zaxpy)(l, pval, (double *)ptr, a1s,
(double *)optr, outs);
ptr += PyArray_STRIDE(ap1, oind);
optr += PyArray_STRIDE(out_buf, oind);
*((float *)PyArray_DATA(ap1));
}
else if (ap1shape != _matrix) {
- cblas_saxpy(l,
+ CBLAS_FUNC(cblas_saxpy)(l,
*((float *)PyArray_DATA(ap2)),
(float *)PyArray_DATA(ap1),
ap1stride/sizeof(float),
(float *)PyArray_DATA(out_buf), 1);
}
else {
- int maxind, oind, i, a1s, outs;
+ int maxind, oind;
+ npy_intp i, a1s, outs;
char *ptr, *optr;
float val;
a1s = PyArray_STRIDE(ap1, maxind) / sizeof(float);
outs = PyArray_STRIDE(out_buf, maxind) / sizeof(float);
for (i = 0; i < PyArray_DIM(ap1, oind); i++) {
- cblas_saxpy(l, val, (float *)ptr, a1s,
+ CBLAS_FUNC(cblas_saxpy)(l, val, (float *)ptr, a1s,
(float *)optr, outs);
ptr += PyArray_STRIDE(ap1, oind);
optr += PyArray_STRIDE(out_buf, oind);
res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real;
}
else if (ap1shape != _matrix) {
- cblas_caxpy(l,
+ CBLAS_FUNC(cblas_caxpy)(l,
(float *)PyArray_DATA(ap2),
(float *)PyArray_DATA(ap1),
ap1stride/sizeof(npy_cfloat),
(float *)PyArray_DATA(out_buf), 1);
}
else {
- int maxind, oind, i, a1s, outs;
+ int maxind, oind;
+ npy_intp i, a1s, outs;
char *ptr, *optr;
float *pval;
a1s = PyArray_STRIDE(ap1, maxind) / sizeof(npy_cfloat);
outs = PyArray_STRIDE(out_buf, maxind) / sizeof(npy_cfloat);
for (i = 0; i < PyArray_DIM(ap1, oind); i++) {
- cblas_caxpy(l, pval, (float *)ptr, a1s,
+ CBLAS_FUNC(cblas_caxpy)(l, pval, (float *)ptr, a1s,
(float *)optr, outs);
ptr += PyArray_STRIDE(ap1, oind);
optr += PyArray_STRIDE(out_buf, oind);
/* Matrix vector multiplication -- Level 2 BLAS */
/* lda must be MAX(M,1) */
enum CBLAS_ORDER Order;
- int ap2s;
+ npy_intp ap2s;
if (!PyArray_ISONESEGMENT(ap1)) {
PyObject *new;
else if (ap1shape != _matrix && ap2shape == _matrix) {
/* Vector matrix multiplication -- Level 2 BLAS */
enum CBLAS_ORDER Order;
- int ap1s;
+ npy_intp ap1s;
if (!PyArray_ISONESEGMENT(ap2)) {
PyObject *new;
*/
enum CBLAS_ORDER Order;
enum CBLAS_TRANSPOSE Trans1, Trans2;
- int M, N, L;
+ npy_intp M, N, L;
/* Optimization possible: */
/*
}
/*
- * Stripped down version of PyObject_GetAttrString,
- * avoids lookups for None, tuple, and List objects,
- * and doesn't create a PyErr since this code ignores it.
+ * Stripped down version of PyObject_GetAttrString(obj, name) that does not
+ * raise PyExc_AttributeError.
*
- * This can be much faster then PyObject_GetAttrString where
- * exceptions are not used by caller.
+ * This allows it to avoid creating then discarding exception objects when
+ * performing lookups on objects without any attributes.
*
- * 'obj' is the object to search for attribute.
- *
- * 'name' is the attribute to search for.
- *
- * Returns attribute value on success, NULL on failure.
+ * Returns attribute value on success, NULL without an exception set if
+ * there is no such attribute, and NULL with an exception on failure.
*/
static NPY_INLINE PyObject *
maybe_get_attr(PyObject *obj, char *name)
/* Attribute referenced by (char *)name */
if (tp->tp_getattr != NULL) {
res = (*tp->tp_getattr)(obj, name);
- if (res == NULL) {
+ if (res == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
}
}
}
res = (*tp->tp_getattro)(obj, w);
Py_DECREF(w);
- if (res == NULL) {
+ if (res == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
}
}
/*
* Enumerated and derived types
*/
-#define CBLAS_INDEX size_t /* this may vary between platforms */
-
enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102};
enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113};
enum CBLAS_UPLO {CblasUpper=121, CblasLower=122};
enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132};
enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};
-/*
- * ===========================================================================
- * Prototypes for level 1 BLAS functions (complex are recast as routines)
- * ===========================================================================
- */
-float cblas_sdsdot(const int N, const float alpha, const float *X,
- const int incX, const float *Y, const int incY);
-double cblas_dsdot(const int N, const float *X, const int incX, const float *Y,
- const int incY);
-float cblas_sdot(const int N, const float *X, const int incX,
- const float *Y, const int incY);
-double cblas_ddot(const int N, const double *X, const int incX,
- const double *Y, const int incY);
-
-/*
- * Functions having prefixes Z and C only
- */
-void cblas_cdotu_sub(const int N, const void *X, const int incX,
- const void *Y, const int incY, void *dotu);
-void cblas_cdotc_sub(const int N, const void *X, const int incX,
- const void *Y, const int incY, void *dotc);
-
-void cblas_zdotu_sub(const int N, const void *X, const int incX,
- const void *Y, const int incY, void *dotu);
-void cblas_zdotc_sub(const int N, const void *X, const int incX,
- const void *Y, const int incY, void *dotc);
-
-
-/*
- * Functions having prefixes S D SC DZ
- */
-float cblas_snrm2(const int N, const float *X, const int incX);
-float cblas_sasum(const int N, const float *X, const int incX);
-
-double cblas_dnrm2(const int N, const double *X, const int incX);
-double cblas_dasum(const int N, const double *X, const int incX);
-
-float cblas_scnrm2(const int N, const void *X, const int incX);
-float cblas_scasum(const int N, const void *X, const int incX);
-
-double cblas_dznrm2(const int N, const void *X, const int incX);
-double cblas_dzasum(const int N, const void *X, const int incX);
-
-
-/*
- * Functions having standard 4 prefixes (S D C Z)
- */
-CBLAS_INDEX cblas_isamax(const int N, const float *X, const int incX);
-CBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX);
-CBLAS_INDEX cblas_icamax(const int N, const void *X, const int incX);
-CBLAS_INDEX cblas_izamax(const int N, const void *X, const int incX);
-
-/*
- * ===========================================================================
- * Prototypes for level 1 BLAS routines
- * ===========================================================================
- */
-
-/*
- * Routines with standard 4 prefixes (s, d, c, z)
- */
-void cblas_sswap(const int N, float *X, const int incX,
- float *Y, const int incY);
-void cblas_scopy(const int N, const float *X, const int incX,
- float *Y, const int incY);
-void cblas_saxpy(const int N, const float alpha, const float *X,
- const int incX, float *Y, const int incY);
-
-void cblas_dswap(const int N, double *X, const int incX,
- double *Y, const int incY);
-void cblas_dcopy(const int N, const double *X, const int incX,
- double *Y, const int incY);
-void cblas_daxpy(const int N, const double alpha, const double *X,
- const int incX, double *Y, const int incY);
-
-void cblas_cswap(const int N, void *X, const int incX,
- void *Y, const int incY);
-void cblas_ccopy(const int N, const void *X, const int incX,
- void *Y, const int incY);
-void cblas_caxpy(const int N, const void *alpha, const void *X,
- const int incX, void *Y, const int incY);
-
-void cblas_zswap(const int N, void *X, const int incX,
- void *Y, const int incY);
-void cblas_zcopy(const int N, const void *X, const int incX,
- void *Y, const int incY);
-void cblas_zaxpy(const int N, const void *alpha, const void *X,
- const int incX, void *Y, const int incY);
-
-
-/*
- * Routines with S and D prefix only
- */
-void cblas_srotg(float *a, float *b, float *c, float *s);
-void cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P);
-void cblas_srot(const int N, float *X, const int incX,
- float *Y, const int incY, const float c, const float s);
-void cblas_srotm(const int N, float *X, const int incX,
- float *Y, const int incY, const float *P);
-
-void cblas_drotg(double *a, double *b, double *c, double *s);
-void cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P);
-void cblas_drot(const int N, double *X, const int incX,
- double *Y, const int incY, const double c, const double s);
-void cblas_drotm(const int N, double *X, const int incX,
- double *Y, const int incY, const double *P);
-
-
-/*
- * Routines with S D C Z CS and ZD prefixes
- */
-void cblas_sscal(const int N, const float alpha, float *X, const int incX);
-void cblas_dscal(const int N, const double alpha, double *X, const int incX);
-void cblas_cscal(const int N, const void *alpha, void *X, const int incX);
-void cblas_zscal(const int N, const void *alpha, void *X, const int incX);
-void cblas_csscal(const int N, const float alpha, void *X, const int incX);
-void cblas_zdscal(const int N, const double alpha, void *X, const int incX);
-
-/*
- * ===========================================================================
- * Prototypes for level 2 BLAS
- * ===========================================================================
- */
-
-/*
- * Routines with standard 4 prefixes (S, D, C, Z)
- */
-void cblas_sgemv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const float alpha, const float *A, const int lda,
- const float *X, const int incX, const float beta,
- float *Y, const int incY);
-void cblas_sgbmv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const int KL, const int KU, const float alpha,
- const float *A, const int lda, const float *X,
- const int incX, const float beta, float *Y, const int incY);
-void cblas_strmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const float *A, const int lda,
- float *X, const int incX);
-void cblas_stbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const float *A, const int lda,
- float *X, const int incX);
-void cblas_stpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const float *Ap, float *X, const int incX);
-void cblas_strsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const float *A, const int lda, float *X,
- const int incX);
-void cblas_stbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const float *A, const int lda,
- float *X, const int incX);
-void cblas_stpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const float *Ap, float *X, const int incX);
-
-void cblas_dgemv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const double alpha, const double *A, const int lda,
- const double *X, const int incX, const double beta,
- double *Y, const int incY);
-void cblas_dgbmv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const int KL, const int KU, const double alpha,
- const double *A, const int lda, const double *X,
- const int incX, const double beta, double *Y, const int incY);
-void cblas_dtrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const double *A, const int lda,
- double *X, const int incX);
-void cblas_dtbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const double *A, const int lda,
- double *X, const int incX);
-void cblas_dtpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const double *Ap, double *X, const int incX);
-void cblas_dtrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const double *A, const int lda, double *X,
- const int incX);
-void cblas_dtbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const double *A, const int lda,
- double *X, const int incX);
-void cblas_dtpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const double *Ap, double *X, const int incX);
-
-void cblas_cgemv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *X, const int incX, const void *beta,
- void *Y, const int incY);
-void cblas_cgbmv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const int KL, const int KU, const void *alpha,
- const void *A, const int lda, const void *X,
- const int incX, const void *beta, void *Y, const int incY);
-void cblas_ctrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ctbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ctpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *Ap, void *X, const int incX);
-void cblas_ctrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *A, const int lda, void *X,
- const int incX);
-void cblas_ctbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ctpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *Ap, void *X, const int incX);
-
-void cblas_zgemv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *X, const int incX, const void *beta,
- void *Y, const int incY);
-void cblas_zgbmv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const int KL, const int KU, const void *alpha,
- const void *A, const int lda, const void *X,
- const int incX, const void *beta, void *Y, const int incY);
-void cblas_ztrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ztbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ztpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *Ap, void *X, const int incX);
-void cblas_ztrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *A, const int lda, void *X,
- const int incX);
-void cblas_ztbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ztpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *Ap, void *X, const int incX);
-
-
-/*
- * Routines with S and D prefixes only
- */
-void cblas_ssymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *A,
- const int lda, const float *X, const int incX,
- const float beta, float *Y, const int incY);
-void cblas_ssbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const int K, const float alpha, const float *A,
- const int lda, const float *X, const int incX,
- const float beta, float *Y, const int incY);
-void cblas_sspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *Ap,
- const float *X, const int incX,
- const float beta, float *Y, const int incY);
-void cblas_sger(const enum CBLAS_ORDER order, const int M, const int N,
- const float alpha, const float *X, const int incX,
- const float *Y, const int incY, float *A, const int lda);
-void cblas_ssyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *X,
- const int incX, float *A, const int lda);
-void cblas_sspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *X,
- const int incX, float *Ap);
-void cblas_ssyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *X,
- const int incX, const float *Y, const int incY, float *A,
- const int lda);
-void cblas_sspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *X,
- const int incX, const float *Y, const int incY, float *A);
-
-void cblas_dsymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *A,
- const int lda, const double *X, const int incX,
- const double beta, double *Y, const int incY);
-void cblas_dsbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const int K, const double alpha, const double *A,
- const int lda, const double *X, const int incX,
- const double beta, double *Y, const int incY);
-void cblas_dspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *Ap,
- const double *X, const int incX,
- const double beta, double *Y, const int incY);
-void cblas_dger(const enum CBLAS_ORDER order, const int M, const int N,
- const double alpha, const double *X, const int incX,
- const double *Y, const int incY, double *A, const int lda);
-void cblas_dsyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *X,
- const int incX, double *A, const int lda);
-void cblas_dspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *X,
- const int incX, double *Ap);
-void cblas_dsyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *X,
- const int incX, const double *Y, const int incY, double *A,
- const int lda);
-void cblas_dspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *X,
- const int incX, const double *Y, const int incY, double *A);
-
-
-/*
- * Routines with C and Z prefixes only
- */
-void cblas_chemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const void *alpha, const void *A,
- const int lda, const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_chbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const int K, const void *alpha, const void *A,
- const int lda, const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_chpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const void *alpha, const void *Ap,
- const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_cgeru(const enum CBLAS_ORDER order, const int M, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_cgerc(const enum CBLAS_ORDER order, const int M, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_cher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const void *X, const int incX,
- void *A, const int lda);
-void cblas_chpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const void *X,
- const int incX, void *A);
-void cblas_cher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_chpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *Ap);
-
-void cblas_zhemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const void *alpha, const void *A,
- const int lda, const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_zhbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const int K, const void *alpha, const void *A,
- const int lda, const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_zhpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const void *alpha, const void *Ap,
- const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_zgeru(const enum CBLAS_ORDER order, const int M, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_zgerc(const enum CBLAS_ORDER order, const int M, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_zher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const void *X, const int incX,
- void *A, const int lda);
-void cblas_zhpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const void *X,
- const int incX, void *A);
-void cblas_zher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_zhpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *Ap);
+#define CBLAS_INDEX size_t /* this may vary between platforms */
-/*
- * ===========================================================================
- * Prototypes for level 3 BLAS
- * ===========================================================================
- */
+#ifdef NO_APPEND_FORTRAN
+#define BLAS_FORTRAN_SUFFIX
+#else
+#define BLAS_FORTRAN_SUFFIX _
+#endif
-/*
- * Routines with standard 4 prefixes (S, D, C, Z)
- */
-void cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
- const int K, const float alpha, const float *A,
- const int lda, const float *B, const int ldb,
- const float beta, float *C, const int ldc);
-void cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const float alpha, const float *A, const int lda,
- const float *B, const int ldb, const float beta,
- float *C, const int ldc);
-void cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const float alpha, const float *A, const int lda,
- const float beta, float *C, const int ldc);
-void cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const float alpha, const float *A, const int lda,
- const float *B, const int ldb, const float beta,
- float *C, const int ldc);
-void cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const float alpha, const float *A, const int lda,
- float *B, const int ldb);
-void cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const float alpha, const float *A, const int lda,
- float *B, const int ldb);
+#ifndef BLAS_SYMBOL_PREFIX
+#define BLAS_SYMBOL_PREFIX
+#endif
-void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
- const int K, const double alpha, const double *A,
- const int lda, const double *B, const int ldb,
- const double beta, double *C, const int ldc);
-void cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const double alpha, const double *A, const int lda,
- const double *B, const int ldb, const double beta,
- double *C, const int ldc);
-void cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const double alpha, const double *A, const int lda,
- const double beta, double *C, const int ldc);
-void cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const double alpha, const double *A, const int lda,
- const double *B, const int ldb, const double beta,
- double *C, const int ldc);
-void cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const double alpha, const double *A, const int lda,
- double *B, const int ldb);
-void cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const double alpha, const double *A, const int lda,
- double *B, const int ldb);
+#ifndef BLAS_SYMBOL_SUFFIX
+#define BLAS_SYMBOL_SUFFIX
+#endif
-void cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
- const int K, const void *alpha, const void *A,
- const int lda, const void *B, const int ldb,
- const void *beta, void *C, const int ldc);
-void cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *beta, void *C, const int ldc);
-void cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- void *B, const int ldb);
-void cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- void *B, const int ldb);
+#define BLAS_FUNC_CONCAT(name,prefix,suffix,suffix2) prefix ## name ## suffix ## suffix2
+#define BLAS_FUNC_EXPAND(name,prefix,suffix,suffix2) BLAS_FUNC_CONCAT(name,prefix,suffix,suffix2)
-void cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
- const int K, const void *alpha, const void *A,
- const int lda, const void *B, const int ldb,
- const void *beta, void *C, const int ldc);
-void cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *beta, void *C, const int ldc);
-void cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- void *B, const int ldb);
-void cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- void *B, const int ldb);
+#define CBLAS_FUNC(name) BLAS_FUNC_EXPAND(name,BLAS_SYMBOL_PREFIX,,BLAS_SYMBOL_SUFFIX)
+#define BLAS_FUNC(name) BLAS_FUNC_EXPAND(name,BLAS_SYMBOL_PREFIX,BLAS_FORTRAN_SUFFIX,BLAS_SYMBOL_SUFFIX)
+#ifdef HAVE_BLAS_ILP64
+#define CBLAS_INT npy_int64
+#else
+#define CBLAS_INT int
+#endif
-/*
- * Routines with prefixes C and Z only
- */
-void cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const float alpha, const void *A, const int lda,
- const float beta, void *C, const int ldc);
-void cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const float beta,
- void *C, const int ldc);
+#define BLASNAME(name) CBLAS_FUNC(name)
+#define BLASINT CBLAS_INT
-void cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const double alpha, const void *A, const int lda,
- const double beta, void *C, const int ldc);
-void cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const double beta,
- void *C, const int ldc);
+#include "npy_cblas_base.h"
-void cblas_xerbla(int p, const char *rout, const char *form, ...);
+#undef BLASINT
+#undef BLASNAME
#ifdef __cplusplus
}
--- /dev/null
+/*
+ * This header provides numpy a consistent interface to CBLAS code. It is needed
+ * because not all providers of cblas provide cblas.h. For instance, MKL provides
+ * mkl_cblas.h and also typedefs the CBLAS_XXX enums.
+ */
+
+/*
+ * ===========================================================================
+ * Prototypes for level 1 BLAS functions (complex are recast as routines)
+ * ===========================================================================
+ */
+float BLASNAME(cblas_sdsdot)(const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, const float *Y, const BLASINT incY);
+double BLASNAME(cblas_dsdot)(const BLASINT N, const float *X, const BLASINT incX, const float *Y,
+ const BLASINT incY);
+float BLASNAME(cblas_sdot)(const BLASINT N, const float *X, const BLASINT incX,
+ const float *Y, const BLASINT incY);
+double BLASNAME(cblas_ddot)(const BLASINT N, const double *X, const BLASINT incX,
+ const double *Y, const BLASINT incY);
+
+/*
+ * Functions having prefixes Z and C only
+ */
+void BLASNAME(cblas_cdotu_sub)(const BLASINT N, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *dotu);
+void BLASNAME(cblas_cdotc_sub)(const BLASINT N, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *dotc);
+
+void BLASNAME(cblas_zdotu_sub)(const BLASINT N, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *dotu);
+void BLASNAME(cblas_zdotc_sub)(const BLASINT N, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *dotc);
+
+
+/*
+ * Functions having prefixes S D SC DZ
+ */
+float BLASNAME(cblas_snrm2)(const BLASINT N, const float *X, const BLASINT incX);
+float BLASNAME(cblas_sasum)(const BLASINT N, const float *X, const BLASINT incX);
+
+double BLASNAME(cblas_dnrm2)(const BLASINT N, const double *X, const BLASINT incX);
+double BLASNAME(cblas_dasum)(const BLASINT N, const double *X, const BLASINT incX);
+
+float BLASNAME(cblas_scnrm2)(const BLASINT N, const void *X, const BLASINT incX);
+float BLASNAME(cblas_scasum)(const BLASINT N, const void *X, const BLASINT incX);
+
+double BLASNAME(cblas_dznrm2)(const BLASINT N, const void *X, const BLASINT incX);
+double BLASNAME(cblas_dzasum)(const BLASINT N, const void *X, const BLASINT incX);
+
+
+/*
+ * Functions having standard 4 prefixes (S D C Z)
+ */
+CBLAS_INDEX BLASNAME(cblas_isamax)(const BLASINT N, const float *X, const BLASINT incX);
+CBLAS_INDEX BLASNAME(cblas_idamax)(const BLASINT N, const double *X, const BLASINT incX);
+CBLAS_INDEX BLASNAME(cblas_icamax)(const BLASINT N, const void *X, const BLASINT incX);
+CBLAS_INDEX BLASNAME(cblas_izamax)(const BLASINT N, const void *X, const BLASINT incX);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 1 BLAS routines
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (s, d, c, z)
+ */
+void BLASNAME(cblas_sswap)(const BLASINT N, float *X, const BLASINT incX,
+ float *Y, const BLASINT incY);
+void BLASNAME(cblas_scopy)(const BLASINT N, const float *X, const BLASINT incX,
+ float *Y, const BLASINT incY);
+void BLASNAME(cblas_saxpy)(const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, float *Y, const BLASINT incY);
+
+void BLASNAME(cblas_dswap)(const BLASINT N, double *X, const BLASINT incX,
+ double *Y, const BLASINT incY);
+void BLASNAME(cblas_dcopy)(const BLASINT N, const double *X, const BLASINT incX,
+ double *Y, const BLASINT incY);
+void BLASNAME(cblas_daxpy)(const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, double *Y, const BLASINT incY);
+
+void BLASNAME(cblas_cswap)(const BLASINT N, void *X, const BLASINT incX,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_ccopy)(const BLASINT N, const void *X, const BLASINT incX,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_caxpy)(const BLASINT N, const void *alpha, const void *X,
+ const BLASINT incX, void *Y, const BLASINT incY);
+
+void BLASNAME(cblas_zswap)(const BLASINT N, void *X, const BLASINT incX,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_zcopy)(const BLASINT N, const void *X, const BLASINT incX,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_zaxpy)(const BLASINT N, const void *alpha, const void *X,
+ const BLASINT incX, void *Y, const BLASINT incY);
+
+
+/*
+ * Routines with S and D prefix only
+ */
+void BLASNAME(cblas_srotg)(float *a, float *b, float *c, float *s);
+void BLASNAME(cblas_srotmg)(float *d1, float *d2, float *b1, const float b2, float *P);
+void BLASNAME(cblas_srot)(const BLASINT N, float *X, const BLASINT incX,
+ float *Y, const BLASINT incY, const float c, const float s);
+void BLASNAME(cblas_srotm)(const BLASINT N, float *X, const BLASINT incX,
+ float *Y, const BLASINT incY, const float *P);
+
+void BLASNAME(cblas_drotg)(double *a, double *b, double *c, double *s);
+void BLASNAME(cblas_drotmg)(double *d1, double *d2, double *b1, const double b2, double *P);
+void BLASNAME(cblas_drot)(const BLASINT N, double *X, const BLASINT incX,
+ double *Y, const BLASINT incY, const double c, const double s);
+void BLASNAME(cblas_drotm)(const BLASINT N, double *X, const BLASINT incX,
+ double *Y, const BLASINT incY, const double *P);
+
+
+/*
+ * Routines with S D C Z CS and ZD prefixes
+ */
+void BLASNAME(cblas_sscal)(const BLASINT N, const float alpha, float *X, const BLASINT incX);
+void BLASNAME(cblas_dscal)(const BLASINT N, const double alpha, double *X, const BLASINT incX);
+void BLASNAME(cblas_cscal)(const BLASINT N, const void *alpha, void *X, const BLASINT incX);
+void BLASNAME(cblas_zscal)(const BLASINT N, const void *alpha, void *X, const BLASINT incX);
+void BLASNAME(cblas_csscal)(const BLASINT N, const float alpha, void *X, const BLASINT incX);
+void BLASNAME(cblas_zdscal)(const BLASINT N, const double alpha, void *X, const BLASINT incX);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 2 BLAS
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (S, D, C, Z)
+ */
+void BLASNAME(cblas_sgemv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const float alpha, const float *A, const BLASINT lda,
+ const float *X, const BLASINT incX, const float beta,
+ float *Y, const BLASINT incY);
+void BLASNAME(cblas_sgbmv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const BLASINT KL, const BLASINT KU, const float alpha,
+ const float *A, const BLASINT lda, const float *X,
+ const BLASINT incX, const float beta, float *Y, const BLASINT incY);
+void BLASNAME(cblas_strmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const float *A, const BLASINT lda,
+ float *X, const BLASINT incX);
+void BLASNAME(cblas_stbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const float *A, const BLASINT lda,
+ float *X, const BLASINT incX);
+void BLASNAME(cblas_stpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const float *Ap, float *X, const BLASINT incX);
+void BLASNAME(cblas_strsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const float *A, const BLASINT lda, float *X,
+ const BLASINT incX);
+void BLASNAME(cblas_stbsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const float *A, const BLASINT lda,
+ float *X, const BLASINT incX);
+void BLASNAME(cblas_stpsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const float *Ap, float *X, const BLASINT incX);
+
+void BLASNAME(cblas_dgemv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const double alpha, const double *A, const BLASINT lda,
+ const double *X, const BLASINT incX, const double beta,
+ double *Y, const BLASINT incY);
+void BLASNAME(cblas_dgbmv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const BLASINT KL, const BLASINT KU, const double alpha,
+ const double *A, const BLASINT lda, const double *X,
+ const BLASINT incX, const double beta, double *Y, const BLASINT incY);
+void BLASNAME(cblas_dtrmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const double *A, const BLASINT lda,
+ double *X, const BLASINT incX);
+void BLASNAME(cblas_dtbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const double *A, const BLASINT lda,
+ double *X, const BLASINT incX);
+void BLASNAME(cblas_dtpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const double *Ap, double *X, const BLASINT incX);
+void BLASNAME(cblas_dtrsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const double *A, const BLASINT lda, double *X,
+ const BLASINT incX);
+void BLASNAME(cblas_dtbsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const double *A, const BLASINT lda,
+ double *X, const BLASINT incX);
+void BLASNAME(cblas_dtpsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const double *Ap, double *X, const BLASINT incX);
+
+void BLASNAME(cblas_cgemv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *X, const BLASINT incX, const void *beta,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_cgbmv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const BLASINT KL, const BLASINT KU, const void *alpha,
+ const void *A, const BLASINT lda, const void *X,
+ const BLASINT incX, const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_ctrmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ctbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ctpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *Ap, void *X, const BLASINT incX);
+void BLASNAME(cblas_ctrsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *A, const BLASINT lda, void *X,
+ const BLASINT incX);
+void BLASNAME(cblas_ctbsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ctpsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *Ap, void *X, const BLASINT incX);
+
+void BLASNAME(cblas_zgemv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *X, const BLASINT incX, const void *beta,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_zgbmv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const BLASINT KL, const BLASINT KU, const void *alpha,
+ const void *A, const BLASINT lda, const void *X,
+ const BLASINT incX, const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_ztrmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ztbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ztpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *Ap, void *X, const BLASINT incX);
+void BLASNAME(cblas_ztrsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *A, const BLASINT lda, void *X,
+ const BLASINT incX);
+void BLASNAME(cblas_ztbsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ztpsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *Ap, void *X, const BLASINT incX);
+
+
+/*
+ * Routines with S and D prefixes only
+ */
+void BLASNAME(cblas_ssymv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *A,
+ const BLASINT lda, const float *X, const BLASINT incX,
+ const float beta, float *Y, const BLASINT incY);
+void BLASNAME(cblas_ssbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const BLASINT K, const float alpha, const float *A,
+ const BLASINT lda, const float *X, const BLASINT incX,
+ const float beta, float *Y, const BLASINT incY);
+void BLASNAME(cblas_sspmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *Ap,
+ const float *X, const BLASINT incX,
+ const float beta, float *Y, const BLASINT incY);
+void BLASNAME(cblas_sger)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const float alpha, const float *X, const BLASINT incX,
+ const float *Y, const BLASINT incY, float *A, const BLASINT lda);
+void BLASNAME(cblas_ssyr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, float *A, const BLASINT lda);
+void BLASNAME(cblas_sspr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, float *Ap);
+void BLASNAME(cblas_ssyr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, const float *Y, const BLASINT incY, float *A,
+ const BLASINT lda);
+void BLASNAME(cblas_sspr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, const float *Y, const BLASINT incY, float *A);
+
+void BLASNAME(cblas_dsymv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *A,
+ const BLASINT lda, const double *X, const BLASINT incX,
+ const double beta, double *Y, const BLASINT incY);
+void BLASNAME(cblas_dsbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const BLASINT K, const double alpha, const double *A,
+ const BLASINT lda, const double *X, const BLASINT incX,
+ const double beta, double *Y, const BLASINT incY);
+void BLASNAME(cblas_dspmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *Ap,
+ const double *X, const BLASINT incX,
+ const double beta, double *Y, const BLASINT incY);
+void BLASNAME(cblas_dger)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const double alpha, const double *X, const BLASINT incX,
+ const double *Y, const BLASINT incY, double *A, const BLASINT lda);
+void BLASNAME(cblas_dsyr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, double *A, const BLASINT lda);
+void BLASNAME(cblas_dspr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, double *Ap);
+void BLASNAME(cblas_dsyr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, const double *Y, const BLASINT incY, double *A,
+ const BLASINT lda);
+void BLASNAME(cblas_dspr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, const double *Y, const BLASINT incY, double *A);
+
+
+/*
+ * Routines with C and Z prefixes only
+ */
+void BLASNAME(cblas_chemv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const void *alpha, const void *A,
+ const BLASINT lda, const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_chbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const BLASINT K, const void *alpha, const void *A,
+ const BLASINT lda, const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_chpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const void *alpha, const void *Ap,
+ const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_cgeru)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_cgerc)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_cher)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const void *X, const BLASINT incX,
+ void *A, const BLASINT lda);
+void BLASNAME(cblas_chpr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const void *X,
+ const BLASINT incX, void *A);
+void BLASNAME(cblas_cher2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_chpr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *Ap);
+
+void BLASNAME(cblas_zhemv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const void *alpha, const void *A,
+ const BLASINT lda, const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_zhbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const BLASINT K, const void *alpha, const void *A,
+ const BLASINT lda, const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_zhpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const void *alpha, const void *Ap,
+ const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_zgeru)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_zgerc)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_zher)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const void *X, const BLASINT incX,
+ void *A, const BLASINT lda);
+void BLASNAME(cblas_zhpr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const void *X,
+ const BLASINT incX, void *A);
+void BLASNAME(cblas_zher2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_zhpr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *Ap);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 3 BLAS
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (S, D, C, Z)
+ */
+void BLASNAME(cblas_sgemm)(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const BLASINT M, const BLASINT N,
+ const BLASINT K, const float alpha, const float *A,
+ const BLASINT lda, const float *B, const BLASINT ldb,
+ const float beta, float *C, const BLASINT ldc);
+void BLASNAME(cblas_ssymm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const float alpha, const float *A, const BLASINT lda,
+ const float *B, const BLASINT ldb, const float beta,
+ float *C, const BLASINT ldc);
+void BLASNAME(cblas_ssyrk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const float alpha, const float *A, const BLASINT lda,
+ const float beta, float *C, const BLASINT ldc);
+void BLASNAME(cblas_ssyr2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const float alpha, const float *A, const BLASINT lda,
+ const float *B, const BLASINT ldb, const float beta,
+ float *C, const BLASINT ldc);
+void BLASNAME(cblas_strmm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const float alpha, const float *A, const BLASINT lda,
+ float *B, const BLASINT ldb);
+void BLASNAME(cblas_strsm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const float alpha, const float *A, const BLASINT lda,
+ float *B, const BLASINT ldb);
+
+void BLASNAME(cblas_dgemm)(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const BLASINT M, const BLASINT N,
+ const BLASINT K, const double alpha, const double *A,
+ const BLASINT lda, const double *B, const BLASINT ldb,
+ const double beta, double *C, const BLASINT ldc);
+void BLASNAME(cblas_dsymm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const double alpha, const double *A, const BLASINT lda,
+ const double *B, const BLASINT ldb, const double beta,
+ double *C, const BLASINT ldc);
+void BLASNAME(cblas_dsyrk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const double alpha, const double *A, const BLASINT lda,
+ const double beta, double *C, const BLASINT ldc);
+void BLASNAME(cblas_dsyr2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const double alpha, const double *A, const BLASINT lda,
+ const double *B, const BLASINT ldb, const double beta,
+ double *C, const BLASINT ldc);
+void BLASNAME(cblas_dtrmm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const double alpha, const double *A, const BLASINT lda,
+ double *B, const BLASINT ldb);
+void BLASNAME(cblas_dtrsm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const double alpha, const double *A, const BLASINT lda,
+ double *B, const BLASINT ldb);
+
+void BLASNAME(cblas_cgemm)(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const BLASINT M, const BLASINT N,
+ const BLASINT K, const void *alpha, const void *A,
+ const BLASINT lda, const void *B, const BLASINT ldb,
+ const void *beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_csymm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_csyrk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_csyr2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_ctrmm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ void *B, const BLASINT ldb);
+void BLASNAME(cblas_ctrsm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ void *B, const BLASINT ldb);
+
+void BLASNAME(cblas_zgemm)(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const BLASINT M, const BLASINT N,
+ const BLASINT K, const void *alpha, const void *A,
+ const BLASINT lda, const void *B, const BLASINT ldb,
+ const void *beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_zsymm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_zsyrk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_zsyr2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_ztrmm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ void *B, const BLASINT ldb);
+void BLASNAME(cblas_ztrsm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ void *B, const BLASINT ldb);
+
+
+/*
+ * Routines with prefixes C and Z only
+ */
+void BLASNAME(cblas_chemm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_cherk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const float alpha, const void *A, const BLASINT lda,
+ const float beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_cher2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const float beta,
+ void *C, const BLASINT ldc);
+
+void BLASNAME(cblas_zhemm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_zherk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const double alpha, const void *A, const BLASINT lda,
+ const double beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_zher2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const double beta,
+ void *C, const BLASINT ldc);
+
+void BLASNAME(cblas_xerbla)(BLASINT p, const char *rout, const char *form, ...);
npy_intp i;
npy_intp ntypes = ARRAY_SIZE(_part_map);
- if (which >= NPY_NSELECTS) {
- return NULL;
- }
for (i = 0; i < ntypes; i++) {
if (type == _part_map[i].typenum) {
return _part_map[i].argpart[which];
#include "Python.h"
-
-/*
- * From f2c.h, this should be safe unless fortran is set to use 64
- * bit integers. We don't seem to have any good way to detect that.
- */
-typedef int integer;
+#include "numpy/npy_common.h"
+#include "npy_cblas.h"
/*
From the original manpage:
info: Number of the invalid parameter.
*/
-int xerbla_(char *srname, integer *info)
+CBLAS_INT BLAS_FUNC(xerbla)(char *srname, CBLAS_INT *info)
{
static const char format[] = "On entry to %.*s" \
" parameter number %d had an illegal value";
#ifdef WITH_THREAD
save = PyGILState_Ensure();
#endif
- PyOS_snprintf(buf, sizeof(buf), format, len, srname, *info);
+ PyOS_snprintf(buf, sizeof(buf), format, len, srname, (int)*info);
PyErr_SetString(PyExc_ValueError, buf);
#ifdef WITH_THREAD
PyGILState_Release(save);
*/
cls_array_ufunc = PyArray_LookupSpecial(obj, "__array_ufunc__");
if (cls_array_ufunc == NULL) {
+ if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
return NULL;
}
/* Ignore if the same as ndarray.__array_ufunc__ */
return NULL;
}
+/*
+ * Helper to test fromstring of 0 terminated strings, as the C-API supports
+ * the -1 length identifier.
+ */
+static PyObject *
+fromstring_null_term_c_api(PyObject *dummy, PyObject *byte_obj)
+{
+ char *string;
+ PyArray_Descr *descr;
+
+ string = PyBytes_AsString(byte_obj);
+ if (string == NULL) {
+ return NULL;
+ }
+ descr = PyArray_DescrNewFromType(NPY_FLOAT64);
+ return PyArray_FromString(string, -1, descr, -1, " ");
+}
+
+
/* check no elison for avoided increfs */
static PyObject *
incref_elide(PyObject *dummy, PyObject *args)
Py_RETURN_NONE;
}
+/* used to test PyArray_As1D usage emits not implemented error */
+static PyObject*
+npy_pyarrayas1d_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
+{
+ PyObject *op = Py_BuildValue("i", 42);
+ PyObject *result = op;
+ int dim = 4;
+ double arg[2] = {1, 2};
+ int temp = PyArray_As1D(&result, (char **)&arg, &dim, NPY_DOUBLE);
+ if (temp < 0) {
+ Py_DECREF(op);
+ return NULL;
+ }
+ /* op != result */
+ Py_DECREF(op);
+ return result;
+}
+
+/* used to test PyArray_As2D usage emits not implemented error */
+static PyObject*
+npy_pyarrayas2d_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
+{
+ PyObject *op = Py_BuildValue("i", 42);
+ PyObject *result = op;
+ int dim1 = 4;
+ int dim2 = 6;
+ double arg[2][2] = {{1, 2}, {3, 4}};
+ int temp = PyArray_As2D(&result, (char ***)&arg, &dim1, &dim2, NPY_DOUBLE);
+ if (temp < 0) {
+ Py_DECREF(op);
+ return NULL;
+ }
+ /* op != result */
+ Py_DECREF(op);
+ return result;
+}
+
/* used to create array with WRITEBACKIFCOPY flag */
static PyObject*
npy_create_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args)
get_c_wrapping_array(PyObject* NPY_UNUSED(self), PyObject* arg)
{
int writeable, flags;
+ PyArray_Descr *descr;
npy_intp zero = 0;
writeable = PyObject_IsTrue(arg);
flags = writeable ? NPY_ARRAY_WRITEABLE : 0;
/* Create an empty array (which points to a random place) */
- return PyArray_NewFromDescr(&PyArray_Type, PyArray_DescrFromType(NPY_INTP),
+ descr = PyArray_DescrNewFromType(NPY_INTP);
+ return PyArray_NewFromDescr(&PyArray_Type, descr,
1, &zero, NULL, &zero, flags, NULL);
}
num_dims = PyArray_NDIM(array_obj);
descr = PyArray_DESCR(array_obj);
+ Py_INCREF(descr); /* PyArray_AsCArray steals a reference to this */
switch (num_dims) {
case 1:
PyArray_Free((PyObject *) array_obj, (void *) array3);
break;
default:
+ Py_DECREF(descr);
PyErr_SetString(PyExc_ValueError, "array.ndim not in [1, 3]");
return NULL;
}
}
Py_DECREF(val);
+ Py_DECREF(val_64);
val = tmp;
+ val_64 = NULL;
tmp = PyLong_FromUnsignedLongLong(value.lo);
if (tmp == NULL) {
{"test_inplace_increment",
inplace_increment,
METH_VARARGS, NULL},
+ {"fromstring_null_term_c_api",
+ fromstring_null_term_c_api,
+ METH_O, NULL},
{"incref_elide",
incref_elide,
METH_VARARGS, NULL},
{"npy_updateifcopy_deprecation",
npy_updateifcopy_deprecation,
METH_O, NULL},
+ {"npy_pyarrayas1d_deprecation",
+ npy_pyarrayas1d_deprecation,
+ METH_NOARGS, NULL},
+ {"npy_pyarrayas2d_deprecation",
+ npy_pyarrayas2d_deprecation,
+ METH_NOARGS, NULL},
{"npy_create_writebackifcopy",
npy_create_writebackifcopy,
METH_O, NULL},
get_array_function(PyObject *obj)
{
static PyObject *ndarray_array_function = NULL;
+ PyObject *array_function;
if (ndarray_array_function == NULL) {
ndarray_array_function = get_ndarray_array_function();
return ndarray_array_function;
}
- return PyArray_LookupSpecial(obj, "__array_function__");
+ array_function = PyArray_LookupSpecial(obj, "__array_function__");
+ if (array_function == NULL && PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
+
+ return array_function;
}
PyErr_WriteUnraisable(Py_None);
}
}
-};
+}
/* array object functions */
char const * msg = "WRITEBACKIFCOPY detected in array_dealloc. "
" Required call to PyArray_ResolveWritebackIfCopy or "
"PyArray_DiscardWritebackIfCopy is missing.";
- Py_INCREF(self); /* hold on to self in next call since if
- * refcount == 0 it will recurse back into
- *array_dealloc
- */
+ /*
+ * prevent reaching 0 twice and thus recursing into dealloc.
+ * Increasing sys.gettotalrefcount, but path should not be taken.
+ */
+ Py_INCREF(self);
WARN_IN_DEALLOC(PyExc_RuntimeWarning, msg);
retval = PyArray_ResolveWritebackIfCopy(self);
if (retval < 0)
char const * msg = "UPDATEIFCOPY detected in array_dealloc. "
" Required call to PyArray_ResolveWritebackIfCopy or "
"PyArray_DiscardWritebackIfCopy is missing";
- Py_INCREF(self); /* hold on to self in next call since if
- * refcount == 0 it will recurse back into
- *array_dealloc
- */
+ /*
+ * prevent reaching 0 twice and thus recursing into dealloc.
+ * Increasing sys.gettotalrefcount, but path should not be taken.
+ */
+ Py_INCREF(self);
/* 2017-Nov-10 1.14 */
WARN_IN_DEALLOC(PyExc_DeprecationWarning, msg);
retval = PyArray_ResolveWritebackIfCopy(self);
if ((fa->flags & NPY_ARRAY_OWNDATA) && fa->data) {
/* Free internal references if an Object array */
if (PyDataType_FLAGCHK(fa->descr, NPY_ITEM_REFCOUNT)) {
- Py_INCREF(self); /*hold on to self */
PyArray_XDECREF(self);
- /*
- * Don't need to DECREF -- because we are deleting
- * self already...
- */
}
npy_free_cache(fa->data, PyArray_NBYTES(self));
}
printf(" ndim : %d\n", fobj->nd);
printf(" shape :");
for (i = 0; i < fobj->nd; ++i) {
- printf(" %d", (int)fobj->dimensions[i]);
+ printf(" %" NPY_INTP_FMT, fobj->dimensions[i]);
}
printf("\n");
printf(" data : %p\n", fobj->data);
printf(" strides:");
for (i = 0; i < fobj->nd; ++i) {
- printf(" %d", (int)fobj->strides[i]);
+ printf(" %" NPY_INTP_FMT, fobj->strides[i]);
}
printf("\n");
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT void
-PyArray_SetDatetimeParseFunction(PyObject *op)
+PyArray_SetDatetimeParseFunction(PyObject *NPY_UNUSED(op))
{
}
/*NUMPY_API
*/
NPY_NO_EXPORT int
-PyArray_CompareString(char *s1, char *s2, size_t len)
+PyArray_CompareString(const char *s1, const char *s2, size_t len)
{
const unsigned char *c1 = (unsigned char *)s1;
const unsigned char *c2 = (unsigned char *)s2;
}
}
if (res == NULL && !PyErr_Occurred()) {
- PyErr_SetString(PyExc_ValueError, "No fields found.");
+ /* these dtypes had no fields. Use a MultiIter to broadcast them
+ * to an output array, and fill with True (for EQ)*/
+ PyArrayMultiIterObject *mit = (PyArrayMultiIterObject *)
+ PyArray_MultiIterNew(2, self, other);
+ if (mit == NULL) {
+ return NULL;
+ }
+
+ res = PyArray_NewFromDescr(&PyArray_Type,
+ PyArray_DescrFromType(NPY_BOOL),
+ mit->nd, mit->dimensions,
+ NULL, NULL, 0, NULL);
+ Py_DECREF(mit);
+ if (res) {
+ PyArray_FILLWBYTE((PyArrayObject *)res,
+ cmp_op == Py_EQ ? 1 : 0);
+ }
}
return res;
}
else {
- /*
- * compare as a string. Assumes self and
- * other have same descr->type
- */
+ /* compare as a string. Assumes self and other have same descr->type */
return _strings_richcompare(self, other, cmp_op, 0);
}
}
if (PySequence_NoString_Check(op)) {
PyErr_SetString(PyExc_ValueError,
"setting an array element with a sequence.");
- Py_DECREF(type);
- Py_XDECREF(value);
- Py_XDECREF(traceback);
+ npy_PyErr_ChainExceptionsCause(type, value, traceback);
}
else {
PyErr_Restore(type, value, traceback);
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
* npy_float, npy_double, npy_longdouble,
* npy_datetime, npy_timedelta#
+ * #supports_nat = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1#
*/
/**begin repeat1
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
* npy_float, npy_double, npy_longdouble,
* npy_datetime, npy_timedelta#
+ * #floatingpoint = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0#
*/
static void
@FROMTYPE@_to_@TOTYPE@(void *input, void *output, npy_intp n,
@totype@ *op = output;
while (n--) {
- *op++ = (@totype@)*ip++;
+ @fromtype@ f = *ip++;
+ @totype@ t = (@totype@)f;
+#if @supports_nat@ && @floatingpoint@
+ /* Avoid undefined behaviour for NaN -> NaT */
+ if (npy_isnan(f)) {
+ t = (@totype@)NPY_DATETIME_NAT;
+ }
+#endif
+ *op++ = t;
}
}
/**end repeat1**/
@totype@ *op = output;
while (n--) {
- *op++ = (@totype@)*ip;
+ @fromtype@ f = *ip;
+ @totype@ t = (@totype@)f;
+#if @supports_nat@
+ /* Avoid undefined behaviour for NaN -> NaT */
+ if (npy_isnan(f)) {
+ t = (@totype@)NPY_DATETIME_NAT;
+ }
+#endif
+ *op++ = t;
ip += 2;
}
}
}
/**begin repeat
- * #fname = CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * #fname = CFLOAT, CDOUBLE#
+ * #type = npy_cfloat, npy_cdouble#
+ */
+static int
+@fname@_scan(FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore),
+ PyArray_Descr *NPY_UNUSED(ignored))
+{
+ double result;
+ int ret_real, ret_imag;
+
+ ret_real = NumPyOS_ascii_ftolf(fp, &result);
+ @type@ output;
+ // Peek next character
+ char next = getc(fp);
+ if ((next == '+') || (next == '-')) {
+ // Imaginary component specified
+ output.real = result;
+ // Revert peek and read imaginary component
+ ungetc(next, fp);
+ ret_imag = NumPyOS_ascii_ftolf(fp, &result);
+ // Peak next character
+ next = getc(fp);
+ if ((ret_imag == 1) && (next == 'j')) {
+ // If read is successful and the immediate following char is j
+ output.imag = result;
+ }
+ else {
+ output.imag = 0;
+ // Push an invalid char to trigger the not everything is read error
+ ungetc('a', fp);
+ }
+ }
+ else if (next == 'j') {
+ // Real component not specified
+ output.real = 0;
+ output.imag = result;
+ }
+ else {
+ // Imaginary component not specified
+ output.real = result;
+ output.imag = 0.;
+ // Next character is not + / - / j. Revert peek.
+ ungetc(next, fp);
+ }
+ *(@type@ *)ip = output;
+ return ret_real;
+}
+/**end repeat**/
+
+
+/**begin repeat
+ * #fname = CLONGDOUBLE,
* OBJECT, STRING, UNICODE, VOID,
* DATETIME, TIMEDELTA#
*/
}
/**begin repeat
- * #fname = CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * #fname = CFLOAT, CDOUBLE#
+ * #type = npy_cfloat, npy_cdouble#
+ */
+static int
+@fname@_fromstr(char *str, void *ip, char **endptr,
+ PyArray_Descr *NPY_UNUSED(ignore))
+{
+ double result;
+
+ result = NumPyOS_ascii_strtod(str, endptr);
+ @type@ output;
+
+ if (endptr && ((*endptr[0] == '+') || (*endptr[0] == '-'))) {
+ // Imaginary component specified
+ output.real = result;
+ // Reading imaginary component
+ char **prev = endptr;
+ str = *endptr;
+ result = NumPyOS_ascii_strtod(str, endptr);
+ if (endptr && *endptr[0] == 'j') {
+ // Read is successful if the immediate following char is j
+ output.imag = result;
+ // Skip j
+ ++*endptr;
+ }
+ else {
+ /*
+ * Set endptr to previous char to trigger the not everything is
+ * read error
+ */
+ endptr = prev;
+ output.imag = 0;
+ }
+ }
+ else if (endptr && *endptr[0] == 'j') {
+ // Real component not specified
+ output.real = 0;
+ output.imag = result;
+ // Skip j
+ ++*endptr;
+ }
+ else {
+ // Imaginary component not specified
+ output.real = result;
+ output.imag = 0.;
+ }
+ *(@type@ *)ip = output;
+ return 0;
+}
+/**end repeat**/
+
+
+/**begin repeat
+ * #fname = CLONGDOUBLE,
* OBJECT, STRING, UNICODE, VOID#
*/
* #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*8#
* #iscomplex = 0*14, 1*3, 0*2#
* #incr = ip++*14, ip+=2*3, ip++*2#
+ * #isdatetime = 0*17, 1*2#
*/
static int
@fname@_argmax(@type@ *ip, npy_intp n, npy_intp *max_ind,
return 0;
}
#endif
+#if @isdatetime@
+ if (mp == NPY_DATETIME_NAT) {
+ /* NaT encountered, it's maximal */
+ return 0;
+ }
+#endif
for (i = 1; i < n; i++) {
@incr@;
}
}
#else
+#if @isdatetime@
+ if (*ip == NPY_DATETIME_NAT) {
+ /* NaT encountered, it's maximal */
+ *max_ind = i;
+ break;
+ }
+#endif
if (!@le@(*ip, mp)) { /* negated, for correct nan handling */
mp = *ip;
*max_ind = i;
* #fname = BYTE, UBYTE, SHORT, USHORT, INT, UINT,
* LONG, ULONG, LONGLONG, ULONGLONG,
* HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE#
+ * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * DATETIME, TIMEDELTA#
* #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
* npy_half, npy_float, npy_double, npy_longdouble,
- * npy_float, npy_double, npy_longdouble#
- * #isfloat = 0*10, 1*7#
- * #isnan = nop*10, npy_half_isnan, npy_isnan*6#
- * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*6#
- * #iscomplex = 0*14, 1*3#
- * #incr = ip++*14, ip+=2*3#
+ * npy_float, npy_double, npy_longdouble,
+ * npy_datetime, npy_timedelta#
+ * #isfloat = 0*10, 1*7, 0*2#
+ * #isnan = nop*10, npy_half_isnan, npy_isnan*6, nop*2#
+ * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*8#
+ * #iscomplex = 0*14, 1*3, 0*2#
+ * #incr = ip++*14, ip+=2*3, ip++*2#
+ * #isdatetime = 0*17, 1*2#
*/
static int
@fname@_argmin(@type@ *ip, npy_intp n, npy_intp *min_ind,
return 0;
}
#endif
+#if @isdatetime@
+ if (mp == NPY_DATETIME_NAT) {
+ /* NaT encountered, it's minimal */
+ return 0;
+ }
+#endif
for (i = 1; i < n; i++) {
@incr@;
}
}
#else
+#if @isdatetime@
+ if (*ip == NPY_DATETIME_NAT) {
+ /* NaT encountered, it's minimal */
+ *min_ind = i;
+ break;
+ }
+#endif
if (!@le@(mp, *ip)) { /* negated, for correct nan handling */
mp = *ip;
*min_ind = i;
#undef _LESS_THAN_OR_EQUAL
-/**begin repeat
- *
- * #fname = DATETIME, TIMEDELTA#
- * #type = npy_datetime, npy_timedelta#
- */
-static int
-@fname@_argmin(@type@ *ip, npy_intp n, npy_intp *min_ind,
- PyArrayObject *NPY_UNUSED(aip))
-{
- /* NPY_DATETIME_NAT is smaller than every other value, we skip
- * it for consistency with min().
- */
- npy_intp i;
- @type@ mp = NPY_DATETIME_NAT;
-
- i = 0;
- while (i < n && mp == NPY_DATETIME_NAT) {
- mp = ip[i];
- i++;
- }
- if (i == n) {
- /* All NaTs: return 0 */
- *min_ind = 0;
- return 0;
- }
- *min_ind = i - 1;
- for (; i < n; i++) {
- if (mp > ip[i] && ip[i] != NPY_DATETIME_NAT) {
- mp = ip[i];
- *min_ind = i;
- }
- }
- return 0;
-}
-
-/**end repeat**/
-
static int
OBJECT_argmax(PyObject **ip, npy_intp n, npy_intp *max_ind,
PyArrayObject *NPY_UNUSED(aip))
npy_intp n, void *NPY_UNUSED(ignore))
{
#if defined(HAVE_CBLAS)
- int is1b = blas_stride(is1, sizeof(@type@));
- int is2b = blas_stride(is2, sizeof(@type@));
+ CBLAS_INT is1b = blas_stride(is1, sizeof(@type@));
+ CBLAS_INT is2b = blas_stride(is2, sizeof(@type@));
if (is1b && is2b)
{
double sum = 0.; /* double for stability */
while (n > 0) {
- int chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
+ CBLAS_INT chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
- sum += cblas_@prefix@dot(chunk,
+ sum += CBLAS_FUNC(cblas_@prefix@dot)(chunk,
(@type@ *) ip1, is1b,
(@type@ *) ip2, is2b);
/* use char strides here */
char *op, npy_intp n, void *NPY_UNUSED(ignore))
{
#if defined(HAVE_CBLAS)
- int is1b = blas_stride(is1, sizeof(@ctype@));
- int is2b = blas_stride(is2, sizeof(@ctype@));
+ CBLAS_INT is1b = blas_stride(is1, sizeof(@ctype@));
+ CBLAS_INT is2b = blas_stride(is2, sizeof(@ctype@));
if (is1b && is2b) {
double sum[2] = {0., 0.}; /* double for stability */
while (n > 0) {
- int chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
+ CBLAS_INT chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
@type@ tmp[2];
- cblas_@prefix@dotu_sub((int)n, ip1, is1b, ip2, is2b, tmp);
+ CBLAS_FUNC(cblas_@prefix@dotu_sub)((CBLAS_INT)n, ip1, is1b, ip2, is2b, tmp);
sum[0] += (double)tmp[0];
sum[1] += (double)tmp[1];
/* use char strides here */
if (dtype == NULL) {
goto fail;
}
- Py_INCREF(dtype);
goto promote_types;
}
/* Check if it's a NumPy scalar */
int itemsize;
PyObject *temp;
+ /* dtype is not used in this (string discovery) branch */
+ Py_DECREF(dtype);
+ dtype = NULL;
+
if (string_type == NPY_STRING) {
if ((temp = PyObject_Str(obj)) == NULL) {
goto fail;
}
Py_DECREF(ip);
}
+ else if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
+
/* The array struct interface */
ip = PyArray_LookupSpecial_OnInstance(obj, "__array_struct__");
}
Py_DECREF(ip);
}
+ else if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
/* The old buffer interface */
#if !defined(NPY_PY3K)
goto fail;
}
}
+ else if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
/*
* If we reached the maximum recursion depth without hitting one
*/
if (stride > 0 && npy_is_aligned((void *)stride, itemsize)) {
stride /= itemsize;
+#ifndef HAVE_BLAS_ILP64
if (stride <= INT_MAX) {
+#else
+ if (stride <= NPY_MAX_INT64) {
+#endif
return stride;
}
}
* Define a chunksize for CBLAS. CBLAS counts in integers.
*/
#if NPY_MAX_INTP > INT_MAX
-# define NPY_CBLAS_CHUNK (INT_MAX / 2 + 1)
+# ifndef HAVE_BLAS_ILP64
+# define NPY_CBLAS_CHUNK (INT_MAX / 2 + 1)
+# else
+# define NPY_CBLAS_CHUNK (NPY_MAX_INT64 / 2 + 1)
+# endif
#else
# define NPY_CBLAS_CHUNK NPY_MAX_INTP
#endif
char invalid;
npy_intp j, m;
+ /*
+ * Check for 0-dimensional axes unless there is nothing to do.
+ * An empty array/shape cannot be indexed at all.
+ */
+ if (count != 0) {
+ for (i = 0; i < ravel_ndim; ++i) {
+ if (ravel_dims[i] == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot unravel if shape has zero entries (is empty).");
+ return NPY_FAIL;
+ }
+ }
+ }
+
NPY_BEGIN_ALLOW_THREADS;
invalid = 0;
while (count--) {
}
*sortkind = NPY_QUICKSORT;
-
str = PyBytes_AsString(obj);
if (!str) {
int ret;
tmp = PyUnicode_AsASCIIString(object);
if (tmp == NULL) {
- PyErr_SetString(PyExc_ValueError, "Invalid unicode string passed in "
- "for the array ordering. "
- "Please pass in 'C', 'F', 'A' "
- "or 'K' instead");
+ PyErr_SetString(PyExc_ValueError,
+ "Invalid unicode string passed in for the array ordering. "
+ "Please pass in 'C', 'F', 'A' or 'K' instead");
return NPY_FAIL;
}
ret = PyArray_OrderConverter(tmp, val);
return ret;
}
else if (!PyBytes_Check(object) || PyBytes_GET_SIZE(object) < 1) {
- /* 2015-12-14, 1.11 */
- int ret = DEPRECATE("Non-string object detected for "
- "the array ordering. Please pass "
- "in 'C', 'F', 'A', or 'K' instead");
-
- if (ret < 0) {
- return -1;
- }
-
- if (PyObject_IsTrue(object)) {
- *val = NPY_FORTRANORDER;
- }
- else {
- *val = NPY_CORDER;
- }
- if (PyErr_Occurred()) {
- return NPY_FAIL;
- }
- return NPY_SUCCEED;
+ PyErr_SetString(PyExc_ValueError,
+ "Non-string object detected for the array ordering. "
+ "Please pass in 'C', 'F', 'A', or 'K' instead");
+ return NPY_FAIL;
}
else {
str = PyBytes_AS_STRING(object);
if (strlen(str) != 1) {
- /* 2015-12-14, 1.11 */
- int ret = DEPRECATE("Non length-one string passed "
- "in for the array ordering. "
- "Please pass in 'C', 'F', 'A', "
- "or 'K' instead");
-
- if (ret < 0) {
- return -1;
- }
+ PyErr_SetString(PyExc_ValueError,
+ "Non-string object detected for the array ordering. "
+ "Please pass in 'C', 'F', 'A', or 'K' instead");
+ return NPY_FAIL;
}
if (str[0] == 'C' || str[0] == 'c') {
if (object && (PyTuple_Check(object) || PyList_Check(object))) {
if (PySequence_Size(object) != n) {
PyErr_Format(PyExc_ValueError,
- "list of clipmodes has wrong length (%d instead of %d)",
- (int)PySequence_Size(object), n);
+ "list of clipmodes has wrong length (%zd instead of %d)",
+ PySequence_Size(object), n);
return NPY_FAIL;
}
from_order = dtype_kind_to_ordering(from->kind);
to_order = dtype_kind_to_ordering(to->kind);
- return from_order != -1 && from_order <= to_order;
+ if (to->kind == 'm') {
+ /* both types being timedelta is already handled before. */
+ int integer_order = dtype_kind_to_ordering('i');
+ return (from_order != -1) && (from_order <= integer_order);
+ }
+
+ return (from_order != -1) && (from_order <= to_order);
}
else {
return 0;
* regards to the handling of text representations.
*/
+/*
+ * Scanning function for next element parsing and seperator skipping.
+ * These functions return:
+ * - 0 to indicate more data to read
+ * - -1 when reading stopped at the end of the string/file
+ * - -2 when reading stopped before the end was reached.
+ *
+ * The dtype specific parsing functions may set the python error state
+ * (they have to get the GIL first) additionally.
+ */
typedef int (*next_element)(void **, void *, PyArray_Descr *, void *);
typedef int (*skip_separator)(void **, const char *, void *);
+
+static npy_bool
+string_is_fully_read(char const* start, char const* end) {
+ if (end == NULL) {
+ return *start == '\0'; /* null terminated */
+ }
+ else {
+ return start >= end; /* fixed length */
+ }
+}
+
+
static int
fromstr_next_element(char **s, void *dptr, PyArray_Descr *dtype,
const char *end)
char *e = *s;
int r = dtype->f->fromstr(*s, dptr, &e, dtype);
/*
- * fromstr always returns 0 for basic dtypes
- * s points to the end of the parsed string
- * if an error occurs s is not changed
+ * fromstr always returns 0 for basic dtypes; s points to the end of the
+ * parsed string. If s is not changed an error occurred or the end was
+ * reached.
*/
- if (*s == e) {
- /* Nothing read */
- return -1;
+ if (*s == e || r < 0) {
+ /* Nothing read, could be end of string or an error (or both) */
+ if (string_is_fully_read(*s, end)) {
+ return -1;
+ }
+ return -2;
}
*s = e;
if (end != NULL && *s > end) {
+ /* Stop the iteration if we read far enough */
return -1;
}
- return r;
+ return 0;
}
static int
if (r == 1) {
return 0;
}
- else {
+ else if (r == EOF) {
return -1;
}
+ else {
+ /* unable to read more, but EOF not reached indicating an error. */
+ return -2;
+ }
}
/*
{
char *string = *s;
int result = 0;
+
while (1) {
char c = *string;
- if (c == '\0' || (end != NULL && string >= end)) {
+ if (string_is_fully_read(string, end)) {
result = -1;
break;
}
*/
if (slen != PyArray_DIMS(a)[dim] && slen != 1) {
PyErr_Format(PyExc_ValueError,
- "cannot copy sequence with size %d to array axis "
- "with dimension %d", (int)slen, (int)PyArray_DIMS(a)[dim]);
+ "cannot copy sequence with size %zd to array axis "
+ "with dimension %" NPY_INTP_FMT, slen, PyArray_DIMS(a)[dim]);
goto fail;
}
return 0;
}
}
+ else if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
+
/* obj has the __array_interface__ interface */
e = PyArray_LookupSpecial_OnInstance(obj, "__array_interface__");
return 0;
}
}
+ else if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
seq = PySequence_Fast(obj, "Could not convert object to sequence");
if (seq == NULL) {
return 0;
}
+static PyObject *
+raise_memory_error(int nd, npy_intp *dims, PyArray_Descr *descr)
+{
+ static PyObject *exc_type = NULL;
+
+ npy_cache_import(
+ "numpy.core._exceptions", "_ArrayMemoryError",
+ &exc_type);
+ if (exc_type == NULL) {
+ goto fail;
+ }
+
+ PyObject *shape = PyArray_IntTupleFromIntp(nd, dims);
+ if (shape == NULL) {
+ goto fail;
+ }
+
+ /* produce an error object */
+ PyObject *exc_value = PyTuple_Pack(2, shape, (PyObject *)descr);
+ Py_DECREF(shape);
+ if (exc_value == NULL){
+ goto fail;
+ }
+ PyErr_SetObject(exc_type, exc_value);
+ Py_DECREF(exc_value);
+ return NULL;
+
+fail:
+ /* we couldn't raise the formatted exception for some reason */
+ PyErr_WriteUnraisable(NULL);
+ return PyErr_NoMemory();
+}
+
/*
* Generic new array creation routine.
* Internal variant with calloc argument for PyArray_Zeros.
data = npy_alloc_cache(nbytes);
}
if (data == NULL) {
- static PyObject *exc_type = NULL;
-
- npy_cache_import(
- "numpy.core._exceptions", "_ArrayMemoryError",
- &exc_type);
- if (exc_type == NULL) {
- return NULL;
- }
-
- PyObject *shape = PyArray_IntTupleFromIntp(fa->nd,fa->dimensions);
- if (shape == NULL) {
- return NULL;
- }
-
- /* produce an error object */
- PyObject *exc_value = PyTuple_Pack(2, shape, descr);
- Py_DECREF(shape);
- if (exc_value == NULL){
- return NULL;
- }
- PyErr_SetObject(exc_type, exc_value);
- Py_DECREF(exc_value);
- return NULL;
-
+ return raise_memory_error(fa->nd, fa->dimensions, descr);
}
fa->flags |= NPY_ARRAY_OWNDATA;
if (arr == NULL) {
if ((flags & NPY_ARRAY_WRITEBACKIFCOPY) ||
(flags & NPY_ARRAY_UPDATEIFCOPY)) {
+ Py_DECREF(dtype);
Py_XDECREF(newtype);
PyErr_SetString(PyExc_TypeError,
"WRITEBACKIFCOPY used for non-array input.");
attr = PyArray_LookupSpecial_OnInstance(input, "__array_struct__");
if (attr == NULL) {
- return Py_NotImplemented;
+ if (PyErr_Occurred()) {
+ return NULL;
+ } else {
+ return Py_NotImplemented;
+ }
}
if (!NpyCapsule_Check(attr)) {
goto fail;
iface = PyArray_LookupSpecial_OnInstance(origin,
"__array_interface__");
if (iface == NULL) {
+ if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
return Py_NotImplemented;
}
if (!PyDict_Check(iface)) {
array_meth = PyArray_LookupSpecial_OnInstance(op, "__array__");
if (array_meth == NULL) {
+ if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
return Py_NotImplemented;
}
if (context == NULL) {
/* They all zero-out the memory as previously done */
/* steals reference to descr -- and enforces native byteorder on it.*/
+
/*NUMPY_API
- Like FromDimsAndData but uses the Descr structure instead of typecode
- as input.
+ Deprecated, use PyArray_NewFromDescr instead.
*/
NPY_NO_EXPORT PyObject *
-PyArray_FromDimsAndDataAndDescr(int nd, int *d,
+PyArray_FromDimsAndDataAndDescr(int NPY_UNUSED(nd), int *NPY_UNUSED(d),
PyArray_Descr *descr,
- char *data)
+ char *NPY_UNUSED(data))
{
- PyObject *ret;
- int i;
- npy_intp newd[NPY_MAXDIMS];
- char msg[] = "PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr.";
-
- if (DEPRECATE(msg) < 0) {
- /* 2009-04-30, 1.5 */
- return NULL;
- }
- if (!PyArray_ISNBO(descr->byteorder))
- descr->byteorder = '=';
- for (i = 0; i < nd; i++) {
- newd[i] = (npy_intp) d[i];
- }
- ret = PyArray_NewFromDescr(&PyArray_Type, descr,
- nd, newd,
- NULL, data,
- (data ? NPY_ARRAY_CARRAY : 0), NULL);
- return ret;
+ PyErr_SetString(PyExc_NotImplementedError,
+ "PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr.");
+ Py_DECREF(descr);
+ return NULL;
}
/*NUMPY_API
- Construct an empty array from dimensions and typenum
+ Deprecated, use PyArray_SimpleNew instead.
*/
NPY_NO_EXPORT PyObject *
-PyArray_FromDims(int nd, int *d, int type)
+PyArray_FromDims(int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type))
{
- PyArrayObject *ret;
- char msg[] = "PyArray_FromDims: use PyArray_SimpleNew.";
-
- if (DEPRECATE(msg) < 0) {
- /* 2009-04-30, 1.5 */
- return NULL;
- }
- ret = (PyArrayObject *)PyArray_FromDimsAndDataAndDescr(nd, d,
- PyArray_DescrFromType(type),
- NULL);
- /*
- * Old FromDims set memory to zero --- some algorithms
- * relied on that. Better keep it the same. If
- * Object type, then it's already been set to zero, though.
- */
- if (ret && (PyArray_DESCR(ret)->type_num != NPY_OBJECT)) {
- memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret));
- }
- return (PyObject *)ret;
+ PyErr_SetString(PyExc_NotImplementedError,
+ "PyArray_FromDims: use PyArray_SimpleNew.");
+ return NULL;
}
/* end old calls */
src_size = PyArray_SIZE(src);
if (dst_size != src_size) {
PyErr_Format(PyExc_ValueError,
- "cannot copy from array of size %d into an array "
- "of size %d", (int)src_size, (int)dst_size);
+ "cannot copy from array of size %" NPY_INTP_FMT " into an array "
+ "of size %" NPY_INTP_FMT, src_size, dst_size);
return -1;
}
return NULL;
}
+/* This array creation function steals the reference to dtype. */
static PyArrayObject *
array_fromfile_binary(FILE *fp, PyArray_Descr *dtype, npy_intp num, size_t *nread)
{
PyArrayObject *r;
npy_off_t start, numbytes;
+ int elsize;
if (num < 0) {
int fail = 0;
}
num = numbytes / dtype->elsize;
}
+
/*
- * When dtype->subarray is true, PyArray_NewFromDescr will decref dtype
- * even on success, so make sure it stays around until exit.
+ * Array creation may move sub-array dimensions from the dtype to array
+ * dimensions, so we need to use the original element size when reading.
*/
- Py_INCREF(dtype);
+ elsize = dtype->elsize;
+
r = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &num,
NULL, NULL, 0, NULL);
if (r == NULL) {
- Py_DECREF(dtype);
return NULL;
}
+
NPY_BEGIN_ALLOW_THREADS;
- *nread = fread(PyArray_DATA(r), dtype->elsize, num, fp);
+ *nread = fread(PyArray_DATA(r), elsize, num, fp);
NPY_END_ALLOW_THREADS;
- Py_DECREF(dtype);
return r;
}
/*
* Create an array by reading from the given stream, using the passed
* next_element and skip_separator functions.
+ * As typical for array creation functions, it steals the reference to dtype.
*/
#define FROM_BUFFER_SIZE 4096
static PyArrayObject *
npy_intp i;
char *dptr, *clean_sep, *tmp;
int err = 0;
+ int stop_reading_flag; /* -1 indicates end reached; -2 a parsing error */
npy_intp thisbuf = 0;
npy_intp size;
npy_intp bytes, totalbytes;
size = (num >= 0) ? num : FROM_BUFFER_SIZE;
/*
- * When dtype->subarray is true, PyArray_NewFromDescr will decref dtype
- * even on success, so make sure it stays around until exit.
+ * Array creation may move sub-array dimensions from the dtype to array
+ * dimensions, so we need to use the original dtype when reading.
*/
Py_INCREF(dtype);
+
r = (PyArrayObject *)
PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &size,
NULL, NULL, 0, NULL);
Py_DECREF(dtype);
return NULL;
}
+
clean_sep = swab_separator(sep);
if (clean_sep == NULL) {
err = 1;
NPY_BEGIN_ALLOW_THREADS;
totalbytes = bytes = size * dtype->elsize;
dptr = PyArray_DATA(r);
- for (i= 0; num < 0 || i < num; i++) {
- if (next(&stream, dptr, dtype, stream_data) < 0) {
- /* EOF */
+ for (i = 0; num < 0 || i < num; i++) {
+ stop_reading_flag = next(&stream, dptr, dtype, stream_data);
+ if (stop_reading_flag < 0) {
break;
}
*nread += 1;
dptr = tmp + (totalbytes - bytes);
thisbuf = 0;
}
- if (skip_sep(&stream, clean_sep, stream_data) < 0) {
+ stop_reading_flag = skip_sep(&stream, clean_sep, stream_data);
+ if (stop_reading_flag < 0) {
+ if (num == i + 1) {
+ /* if we read as much as requested sep is optional */
+ stop_reading_flag = -1;
+ }
break;
}
}
}
}
NPY_END_ALLOW_THREADS;
+
free(clean_sep);
+ if (stop_reading_flag == -2) {
+ if (PyErr_Occurred()) {
+ /* If an error is already set (unlikely), do not create new one */
+ Py_DECREF(r);
+ Py_DECREF(dtype);
+ return NULL;
+ }
+ /* 2019-09-12, NumPy 1.18 */
+ if (DEPRECATE(
+ "string or file could not be read to its end due to unmatched "
+ "data; this will raise a ValueError in the future.") < 0) {
+ goto fail;
+ }
+ }
+
fail:
Py_DECREF(dtype);
if (err == 1) {
* Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an
* array corresponding to the data encoded in that file.
*
- * If the dtype is NULL, the default array type is used (double).
- * If non-null, the reference is stolen and if dtype->subarray is true dtype
- * will be decrefed even on success.
+ * The reference to `dtype` is stolen (it is possible that the passed in
+ * dtype is not held on to).
*
* The number of elements to read is given as ``num``; if it is < 0, then
* then as many as possible are read.
(skip_separator) fromfile_skip_separator, NULL);
}
if (ret == NULL) {
- Py_DECREF(dtype);
return NULL;
}
if (((npy_intp) nread) < num) {
s = (npy_intp)ts - offset;
n = (npy_intp)count;
itemsize = type->elsize;
- if (n < 0 ) {
+ if (n < 0) {
+ if (itemsize == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot determine count if itemsize is 0");
+ Py_DECREF(type);
+ return NULL;
+ }
if (s % itemsize != 0) {
PyErr_SetString(PyExc_ValueError,
"buffer size must be a multiple"\
return NULL;
}
}
+ /*
+ * NewFromDescr may replace dtype to absorb subarray shape
+ * into the array, so get size beforehand.
+ */
+ npy_intp size_to_copy = num*dtype->elsize;
ret = (PyArrayObject *)
PyArray_NewFromDescr(&PyArray_Type, dtype,
1, &num, NULL, NULL,
if (ret == NULL) {
return NULL;
}
- memcpy(PyArray_DATA(ret), data, num*dtype->elsize);
+ memcpy(PyArray_DATA(ret), data, size_to_copy);
}
else {
/* read from character-based string */
size_t nread = 0;
char *end;
- if (dtype->f->scanfunc == NULL) {
+ if (dtype->f->fromstr == NULL) {
PyErr_SetString(PyExc_ValueError,
"don't know how to read " \
"character strings with that " \
}
for (i = 0; (i < count || count == -1) &&
(value = PyIter_Next(iter)); i++) {
- if (i >= elcount) {
+ if (i >= elcount && elsize != 0) {
npy_intp nbytes;
/*
Grow PyArray_DATA(ret):
#include "_datetime.h"
#include "datetime_strings.h"
+/*
+ * Computes the python `ret, d = divmod(d, unit)`.
+ *
+ * Note that GCC is smart enough at -O2 to eliminate the `if(*d < 0)` branch
+ * for subsequent calls to this command - it is able to deduce that `*d >= 0`.
+ */
+static inline
+npy_int64 extract_unit_64(npy_int64 *d, npy_int64 unit) {
+ assert(unit > 0);
+ npy_int64 div = *d / unit;
+ npy_int64 mod = *d % unit;
+ if (mod < 0) {
+ mod += unit;
+ div -= 1;
+ }
+ assert(mod >= 0);
+ *d = mod;
+ return div;
+}
+
+static inline
+npy_int32 extract_unit_32(npy_int32 *d, npy_int32 unit) {
+ assert(unit > 0);
+ npy_int32 div = *d / unit;
+ npy_int32 mod = *d % unit;
+ if (mod < 0) {
+ mod += unit;
+ div -= 1;
+ }
+ assert(mod >= 0);
+ *d = mod;
+ return div;
+}
+
/*
* Imports the PyDateTime functions so we can create these objects.
* This is called during module initialization
npy_int64 year;
/* Break down the 400 year cycle to get the year and day within the year */
- if (days >= 0) {
- year = 400 * (days / days_per_400years);
- days = days % days_per_400years;
- }
- else {
- year = 400 * ((days - (days_per_400years - 1)) / days_per_400years);
- days = days % days_per_400years;
- if (days < 0) {
- days += days_per_400years;
- }
- }
+ year = 400 * extract_unit_64(&days, days_per_400years);
/* Work out the year/day within the 400 year cycle */
if (days >= 366) {
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT npy_datetime
-PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d)
+PyArray_DatetimeStructToDatetime(
+ NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d))
{
PyErr_SetString(PyExc_RuntimeError,
"The NumPy PyArray_DatetimeStructToDatetime function has "
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT npy_datetime
-PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT fr, npy_timedeltastruct *d)
+PyArray_TimedeltaStructToTimedelta(
+ NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d))
{
PyErr_SetString(PyExc_RuntimeError,
"The NumPy PyArray_TimedeltaStructToTimedelta function has "
return -1;
}
-/*
- * Computes the python `ret, d = divmod(d, unit)`.
- *
- * Note that GCC is smart enough at -O2 to eliminate the `if(*d < 0)` branch
- * for subsequent calls to this command - it is able to deduce that `*d >= 0`.
- */
-static inline
-npy_int64 extract_unit(npy_datetime *d, npy_datetime unit) {
- assert(unit > 0);
- npy_int64 div = *d / unit;
- npy_int64 mod = *d % unit;
- if (mod < 0) {
- mod += unit;
- div -= 1;
- }
- assert(mod >= 0);
- *d = mod;
- return div;
-}
-
/*
* Converts a datetime based on the given metadata into a datetimestruct
*/
npy_datetime dt,
npy_datetimestruct *out)
{
- npy_int64 perday;
+ npy_int64 days;
/* Initialize the output to all zeros */
memset(out, 0, sizeof(npy_datetimestruct));
break;
case NPY_FR_M:
- out->year = 1970 + extract_unit(&dt, 12);
+ out->year = 1970 + extract_unit_64(&dt, 12);
out->month = dt + 1;
break;
break;
case NPY_FR_h:
- perday = 24LL;
-
- set_datetimestruct_days(extract_unit(&dt, perday), out);
+ days = extract_unit_64(&dt, 24LL);
+ set_datetimestruct_days(days, out);
out->hour = (int)dt;
break;
case NPY_FR_m:
- perday = 24LL * 60;
-
- set_datetimestruct_days(extract_unit(&dt, perday), out);
- out->hour = (int)extract_unit(&dt, 60);
- out->min = (int)dt;
+ days = extract_unit_64(&dt, 60LL*24);
+ set_datetimestruct_days(days, out);
+ out->hour = (int)extract_unit_64(&dt, 60LL);
+ out->min = (int)dt;
break;
case NPY_FR_s:
- perday = 24LL * 60 * 60;
-
- set_datetimestruct_days(extract_unit(&dt, perday), out);
- out->hour = (int)extract_unit(&dt, 60*60);
- out->min = (int)extract_unit(&dt, 60);
+ days = extract_unit_64(&dt, 60LL*60*24);
+ set_datetimestruct_days(days, out);
+ out->hour = (int)extract_unit_64(&dt, 60LL*60);
+ out->min = (int)extract_unit_64(&dt, 60LL);
out->sec = (int)dt;
break;
case NPY_FR_ms:
- perday = 24LL * 60 * 60 * 1000;
-
- set_datetimestruct_days(extract_unit(&dt, perday), out);
- out->hour = (int)extract_unit(&dt, 1000LL*60*60);
- out->min = (int)extract_unit(&dt, 1000LL*60);
- out->sec = (int)extract_unit(&dt, 1000LL);
+ days = extract_unit_64(&dt, 1000LL*60*60*24);
+ set_datetimestruct_days(days, out);
+ out->hour = (int)extract_unit_64(&dt, 1000LL*60*60);
+ out->min = (int)extract_unit_64(&dt, 1000LL*60);
+ out->sec = (int)extract_unit_64(&dt, 1000LL);
out->us = (int)(dt * 1000);
break;
case NPY_FR_us:
- perday = 24LL * 60LL * 60LL * 1000LL * 1000LL;
- set_datetimestruct_days(extract_unit(&dt, perday), out);
- out->hour = (int)extract_unit(&dt, 1000LL*1000*60*60);
- out->min = (int)extract_unit(&dt, 1000LL*1000*60);
- out->sec = (int)extract_unit(&dt, 1000LL*1000);
+ days = extract_unit_64(&dt, 1000LL*1000*60*60*24);
+ set_datetimestruct_days(days, out);
+ out->hour = (int)extract_unit_64(&dt, 1000LL*1000*60*60);
+ out->min = (int)extract_unit_64(&dt, 1000LL*1000*60);
+ out->sec = (int)extract_unit_64(&dt, 1000LL*1000);
out->us = (int)dt;
break;
case NPY_FR_ns:
- perday = 24LL * 60LL * 60LL * 1000LL * 1000LL * 1000LL;
-
- set_datetimestruct_days(extract_unit(&dt, perday), out);
- out->hour = (int)extract_unit(&dt, 1000LL*1000*1000*60*60);
- out->min = (int)extract_unit(&dt, 1000LL*1000*1000*60);
- out->sec = (int)extract_unit(&dt, 1000LL*1000*1000);
- out->us = (int)extract_unit(&dt, 1000LL);
+ days = extract_unit_64(&dt, 1000LL*1000*1000*60*60*24);
+ set_datetimestruct_days(days, out);
+ out->hour = (int)extract_unit_64(&dt, 1000LL*1000*1000*60*60);
+ out->min = (int)extract_unit_64(&dt, 1000LL*1000*1000*60);
+ out->sec = (int)extract_unit_64(&dt, 1000LL*1000*1000);
+ out->us = (int)extract_unit_64(&dt, 1000LL);
out->ps = (int)(dt * 1000);
break;
case NPY_FR_ps:
- perday = 24LL * 60 * 60 * 1000 * 1000 * 1000 * 1000;
-
- set_datetimestruct_days(extract_unit(&dt, perday), out);
- out->hour = (int)extract_unit(&dt, 1000LL*1000*1000*1000*60*60);
- out->min = (int)extract_unit(&dt, 1000LL*1000*1000*1000*60);
- out->sec = (int)extract_unit(&dt, 1000LL*1000*1000*1000);
- out->us = (int)extract_unit(&dt, 1000LL*1000);
+ days = extract_unit_64(&dt, 1000LL*1000*1000*1000*60*60*24);
+ set_datetimestruct_days(days, out);
+ out->hour = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*60*60);
+ out->min = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*60);
+ out->sec = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000);
+ out->us = (int)extract_unit_64(&dt, 1000LL*1000);
out->ps = (int)(dt);
break;
case NPY_FR_fs:
/* entire range is only +- 2.6 hours */
- out->hour = (int)extract_unit(&dt, 1000LL*1000*1000*1000*1000*60*60);
+ out->hour = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*1000*60*60);
if (out->hour < 0) {
out->year = 1969;
out->month = 12;
out->hour += 24;
assert(out->hour >= 0);
}
- out->min = (int)extract_unit(&dt, 1000LL*1000*1000*1000*1000*60);
- out->sec = (int)extract_unit(&dt, 1000LL*1000*1000*1000*1000);
- out->us = (int)extract_unit(&dt, 1000LL*1000*1000);
- out->ps = (int)extract_unit(&dt, 1000LL);
+ out->min = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*1000*60);
+ out->sec = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*1000);
+ out->us = (int)extract_unit_64(&dt, 1000LL*1000*1000);
+ out->ps = (int)extract_unit_64(&dt, 1000LL);
out->as = (int)(dt * 1000);
break;
case NPY_FR_as:
/* entire range is only +- 9.2 seconds */
- out->sec = (int)extract_unit(&dt, 1000LL*1000*1000*1000*1000*1000);
+ out->sec = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*1000*1000);
if (out->sec < 0) {
out->year = 1969;
out->month = 12;
out->sec += 60;
assert(out->sec >= 0);
}
- out->us = (int)extract_unit(&dt, 1000LL*1000*1000*1000);
- out->ps = (int)extract_unit(&dt, 1000LL*1000);
+ out->us = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000);
+ out->ps = (int)extract_unit_64(&dt, 1000LL*1000);
out->as = (int)dt;
break;
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT void
-PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr,
- npy_datetimestruct *result)
+PyArray_DatetimeToDatetimeStruct(
+ npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr),
+ npy_datetimestruct *result)
{
PyErr_SetString(PyExc_RuntimeError,
"The NumPy PyArray_DatetimeToDatetimeStruct function has "
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT void
-PyArray_TimedeltaToTimedeltaStruct(npy_timedelta val, NPY_DATETIMEUNIT fr,
- npy_timedeltastruct *result)
+PyArray_TimedeltaToTimedeltaStruct(
+ npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr),
+ npy_timedeltastruct *result)
{
PyErr_SetString(PyExc_RuntimeError,
"The NumPy PyArray_TimedeltaToTimedeltaStruct function has "
bad_input:
if (metastr != NULL) {
PyErr_Format(PyExc_TypeError,
- "Invalid datetime metadata string \"%s\" at position %d",
- metastr, (int)(substr-metastr));
+ "Invalid datetime metadata string \"%s\" at position %zd",
+ metastr, substr-metastr);
}
else {
PyErr_Format(PyExc_TypeError,
bad_input:
if (substr != metastr) {
PyErr_Format(PyExc_TypeError,
- "Invalid datetime metadata string \"%s\" at position %d",
- metastr, (int)(substr-metastr));
+ "Invalid datetime metadata string \"%s\" at position %zd",
+ metastr, substr - metastr);
}
else {
PyErr_Format(PyExc_TypeError,
return -1;
}
equal_one = PyObject_RichCompareBool(event, one, Py_EQ);
+ Py_DECREF(one);
if (equal_one == -1) {
return -1;
}
int minutes;
dts->sec += seconds;
- if (dts->sec < 0) {
- minutes = dts->sec / 60;
- dts->sec = dts->sec % 60;
- if (dts->sec < 0) {
- --minutes;
- dts->sec += 60;
- }
- add_minutes_to_datetimestruct(dts, minutes);
- }
- else if (dts->sec >= 60) {
- minutes = dts->sec / 60;
- dts->sec = dts->sec % 60;
- add_minutes_to_datetimestruct(dts, minutes);
- }
+ minutes = extract_unit_32(&dts->sec, 60);
+ add_minutes_to_datetimestruct(dts, minutes);
}
/*
{
int isleap;
- /* MINUTES */
dts->min += minutes;
- while (dts->min < 0) {
- dts->min += 60;
- dts->hour--;
- }
- while (dts->min >= 60) {
- dts->min -= 60;
- dts->hour++;
- }
- /* HOURS */
- while (dts->hour < 0) {
- dts->hour += 24;
- dts->day--;
- }
- while (dts->hour >= 24) {
- dts->hour -= 24;
- dts->day++;
- }
+ /* propagate invalid minutes into hour and day changes */
+ dts->hour += extract_unit_32(&dts->min, 60);
+ dts->day += extract_unit_32(&dts->hour, 24);
- /* DAYS */
+ /* propagate invalid days into month and year changes */
if (dts->day < 1) {
dts->month--;
if (dts->month < 1) {
if (DEPRECATE(
"parsing timezone aware datetimes is deprecated; "
"this will raise an error in the future") < 0) {
+ Py_DECREF(tmp);
return -1;
}
* which contains the value we want.
*/
tmp = PyObject_CallMethod(offset, "total_seconds", "");
+ Py_DECREF(offset);
if (tmp == NULL) {
return -1;
}
invalid_date:
PyErr_Format(PyExc_ValueError,
- "Invalid date (%d,%d,%d) when converting to NumPy datetime",
- (int)out->year, (int)out->month, (int)out->day);
+ "Invalid date (%" NPY_INT64_FMT ",%" NPY_INT32_FMT ",%" NPY_INT32_FMT ") when converting to NumPy datetime",
+ out->year, out->month, out->day);
return -1;
invalid_time:
PyErr_Format(PyExc_ValueError,
- "Invalid time (%d,%d,%d,%d) when converting "
+ "Invalid time (%" NPY_INT32_FMT ",%" NPY_INT32_FMT ",%" NPY_INT32_FMT ",%" NPY_INT32_FMT ") when converting "
"to NumPy datetime",
- (int)out->hour, (int)out->min, (int)out->sec, (int)out->us);
+ out->hour, out->min, out->sec, out->us);
return -1;
}
NPY_NO_EXPORT PyObject *
convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta)
{
- PyObject *ret = NULL;
npy_timedelta value;
int days = 0, seconds = 0, useconds = 0;
/* Convert to days/seconds/useconds */
switch (meta->base) {
case NPY_FR_W:
- value *= 7;
+ days = value * 7;
break;
case NPY_FR_D:
+ days = value;
break;
case NPY_FR_h:
- seconds = (int)((value % 24) * (60*60));
- value = value / 24;
+ days = extract_unit_64(&value, 24ULL);
+ seconds = value*60*60;
break;
case NPY_FR_m:
- seconds = (int)(value % (24*60)) * 60;
- value = value / (24*60);
+ days = extract_unit_64(&value, 60ULL*24);
+ seconds = value*60;
break;
case NPY_FR_s:
- seconds = (int)(value % (24*60*60));
- value = value / (24*60*60);
+ days = extract_unit_64(&value, 60ULL*60*24);
+ seconds = value;
break;
case NPY_FR_ms:
- useconds = (int)(value % 1000) * 1000;
- value = value / 1000;
- seconds = (int)(value % (24*60*60));
- value = value / (24*60*60);
+ days = extract_unit_64(&value, 1000ULL*60*60*24);
+ seconds = extract_unit_64(&value, 1000ULL);
+ useconds = value*1000;
break;
case NPY_FR_us:
- useconds = (int)(value % (1000*1000));
- value = value / (1000*1000);
- seconds = (int)(value % (24*60*60));
- value = value / (24*60*60);
+ days = extract_unit_64(&value, 1000ULL*1000*60*60*24);
+ seconds = extract_unit_64(&value, 1000ULL*1000);
+ useconds = value;
break;
default:
+ // unreachable, handled by the `if` above
+ assert(NPY_FALSE);
break;
}
/*
- * 'value' represents days, and seconds/useconds are filled.
- *
* If it would overflow the datetime.timedelta days, return a raw int
*/
- if (value < -999999999 || value > 999999999) {
+ if (days < -999999999 || days > 999999999) {
return PyLong_FromLongLong(td);
}
else {
- days = (int)value;
- ret = PyDelta_FromDSU(days, seconds, useconds);
- if (ret == NULL) {
- return NULL;
- }
+ return PyDelta_FromDSU(days, seconds, useconds);
}
-
- return ret;
}
/*
*/
NPY_NO_EXPORT int
convert_pyobjects_to_datetimes(int count,
- PyObject **objs, int *type_nums,
+ PyObject **objs, const int *type_nums,
NPY_CASTING casting,
npy_int64 *out_values,
PyArray_DatetimeMetaData *inout_meta)
datetime_arange(PyObject *start, PyObject *stop, PyObject *step,
PyArray_Descr *dtype)
{
- PyArray_DatetimeMetaData meta;
- /*
- * Both datetime and timedelta are stored as int64, so they can
- * share value variables.
- */
- npy_int64 values[3];
- PyObject *objs[3];
- int type_nums[3];
-
- npy_intp i, length;
- PyArrayObject *ret;
- npy_int64 *ret_data;
/*
* First normalize the input parameters so there is no Py_None,
/* Check if the units of the given dtype are generic, in which
* case we use the code path that detects the units
*/
+ int type_nums[3];
+ PyArray_DatetimeMetaData meta;
if (dtype != NULL) {
PyArray_DatetimeMetaData *meta_tmp;
}
/* Set up to convert the objects to a common datetime unit metadata */
+ PyObject *objs[3];
objs[0] = start;
objs[1] = stop;
objs[2] = step;
type_nums[2] = NPY_TIMEDELTA;
}
- /* Convert all the arguments */
+ /* Convert all the arguments
+ *
+ * Both datetime and timedelta are stored as int64, so they can
+ * share value variables.
+ */
+ npy_int64 values[3];
if (convert_pyobjects_to_datetimes(3, objs, type_nums,
NPY_SAME_KIND_CASTING, values, &meta) < 0) {
return NULL;
}
+ /* If no start was provided, default to 0 */
+ if (start == NULL) {
+ /* enforced above */
+ assert(type_nums[0] == NPY_TIMEDELTA);
+ values[0] = 0;
+ }
/* If no step was provided, default to 1 */
if (step == NULL) {
}
/* Calculate the array length */
+ npy_intp length;
if (values[2] > 0 && values[1] > values[0]) {
length = (values[1] - values[0] + (values[2] - 1)) / values[2];
}
}
/* Create the result array */
- ret = (PyArrayObject *)PyArray_NewFromDescr(
- &PyArray_Type, dtype, 1, &length, NULL,
- NULL, 0, NULL);
+ PyArrayObject *ret = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, dtype, 1, &length, NULL,
+ NULL, 0, NULL);
+
if (ret == NULL) {
return NULL;
}
if (length > 0) {
/* Extract the data pointer */
- ret_data = (npy_int64 *)PyArray_DATA(ret);
+ npy_int64 *ret_data = (npy_int64 *)PyArray_DATA(ret);
/* Create the timedeltas or datetimes */
- for (i = 0; i < length; ++i) {
+ for (npy_intp i = 0; i < length; ++i) {
*ret_data = values[0];
values[0] += values[2];
ret_data++;
*/
static int
is_holiday(npy_datetime date,
- npy_datetime *holidays_begin, npy_datetime *holidays_end)
+ npy_datetime *holidays_begin, const npy_datetime *holidays_end)
{
npy_datetime *trial;
*/
static npy_datetime *
find_earliest_holiday_on_or_after(npy_datetime date,
- npy_datetime *holidays_begin, npy_datetime *holidays_end)
+ npy_datetime *holidays_begin, const npy_datetime *holidays_end)
{
npy_datetime *trial;
*/
static npy_datetime *
find_earliest_holiday_after(npy_datetime date,
- npy_datetime *holidays_begin, npy_datetime *holidays_end)
+ npy_datetime *holidays_begin, const npy_datetime *holidays_end)
{
npy_datetime *trial;
apply_business_day_roll(npy_datetime date, npy_datetime *out,
int *out_day_of_week,
NPY_BUSDAY_ROLL roll,
- npy_bool *weekmask,
+ const npy_bool *weekmask,
npy_datetime *holidays_begin, npy_datetime *holidays_end)
{
int day_of_week;
static int
apply_business_day_count(npy_datetime date_begin, npy_datetime date_end,
npy_int64 *out,
- npy_bool *weekmask, int busdays_in_weekmask,
+ const npy_bool *weekmask, int busdays_in_weekmask,
npy_datetime *holidays_begin, npy_datetime *holidays_end)
{
npy_int64 count, whole_weeks;
*/
NPY_NO_EXPORT PyArrayObject *
is_business_day(PyArrayObject *dates, PyArrayObject *out,
- npy_bool *weekmask, int busdays_in_weekmask,
+ const npy_bool *weekmask, int busdays_in_weekmask,
npy_datetime *holidays_begin, npy_datetime *holidays_end)
{
PyArray_DatetimeMetaData temp_meta;
parse_error:
PyErr_Format(PyExc_ValueError,
- "Error parsing datetime string \"%s\" at position %d",
- str, (int)(substr-str));
+ "Error parsing datetime string \"%s\" at position %zd",
+ str, substr - str);
return -1;
error:
if (Py_EnterRecursiveCall(
" while trying to convert the given data type from its "
"`.dtype` attribute.") != 0) {
+ Py_DECREF(dtypedescr);
return 1;
}
arg == '|' || arg == '=')
static int
-_check_for_commastring(char *type, Py_ssize_t len)
+_check_for_commastring(const char *type, Py_ssize_t len)
{
Py_ssize_t i;
int sqbracket;
}
Py_DECREF(off);
if (offset < 0) {
- PyErr_Format(PyExc_ValueError, "offset %d cannot be negative",
- (int)offset);
+ PyErr_Format(PyExc_ValueError, "offset %ld cannot be negative",
+ offset);
Py_DECREF(tup);
Py_DECREF(ind);
goto fail;
/* If align=True, enforce field alignment */
if (align && offset % newdescr->alignment != 0) {
PyErr_Format(PyExc_ValueError,
- "offset %d for NumPy dtype with fields is "
+ "offset %ld for NumPy dtype with fields is "
"not divisible by the field alignment %d "
"with align=True",
- (int)offset, (int)newdescr->alignment);
+ offset, newdescr->alignment);
ret = NPY_FAIL;
}
else if (offset + newdescr->elsize > totalsize) {
PyErr_Format(PyExc_ValueError,
"NumPy dtype descriptor requires %d bytes, "
"cannot override to smaller itemsize of %d",
- (int)new->elsize, (int)itemsize);
+ new->elsize, itemsize);
Py_DECREF(new);
goto fail;
}
PyErr_Format(PyExc_ValueError,
"NumPy dtype descriptor requires alignment of %d bytes, "
"which is not divisible into the specified itemsize %d",
- (int)new->alignment, (int)itemsize);
+ new->alignment, itemsize);
Py_DECREF(new);
goto fail;
}
PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
{
int check_num = NPY_NOTYPE + 10;
- PyObject *item;
int elsize = 0;
char endian = '=';
PyErr_Clear();
/* Now check to see if the object is registered in typeDict */
if (typeDict != NULL) {
- item = PyDict_GetItem(typeDict, obj);
+ PyObject *item = NULL;
#if defined(NPY_PY3K)
- if (!item && PyBytes_Check(obj)) {
+ if (PyBytes_Check(obj)) {
PyObject *tmp;
tmp = PyUnicode_FromEncodedObject(obj, "ascii", "strict");
- if (tmp != NULL) {
- item = PyDict_GetItem(typeDict, tmp);
- Py_DECREF(tmp);
+ if (tmp == NULL) {
+ goto fail;
}
+ item = PyDict_GetItem(typeDict, tmp);
+ Py_DECREF(tmp);
+ }
+ else {
+ item = PyDict_GetItem(typeDict, obj);
}
+#else
+ item = PyDict_GetItem(typeDict, obj);
#endif
if (item) {
/* Check for a deprecated Numeric-style typecode */
}
static int
-descr_nonzero(PyObject *self)
+descr_nonzero(PyObject *NPY_UNUSED(self))
{
/* `bool(np.dtype(...)) == True` for all dtypes. Needed to override default
* nonzero implementation, which checks if `len(object) > 0`. */
static PyObject *
-array_priority_get(PyArrayObject *self)
+array_priority_get(PyArrayObject *NPY_UNUSED(self))
{
return PyFloat_FromDouble(NPY_PRIORITY);
}
PyArray_ArgSortFunc *argsort;
PyObject *ret;
- if (which < 0 || which >= NPY_NSELECTS) {
+ /*
+ * As a C-exported function, enum NPY_SELECTKIND loses its enum property
+ * Check the values to make sure they are in range
+ */
+ if ((int)which < 0 || (int)which >= NPY_NSELECTS) {
PyErr_SetString(PyExc_ValueError,
"not a valid partition kind");
return NULL;
PyArrayObject *ret = NULL;
PyObject *ret_tuple;
npy_intp ret_dims[2];
- PyArray_NonzeroFunc *nonzero = PyArray_DESCR(self)->f->nonzero;
+
+ PyArray_NonzeroFunc *nonzero;
+ PyArray_Descr *dtype;
+
npy_intp nonzero_count;
npy_intp added_count = 0;
+ int needs_api;
int is_bool;
NpyIter *iter;
NpyIter_GetMultiIndexFunc *get_multi_index;
char **dataptr;
+ dtype = PyArray_DESCR(self);
+ nonzero = dtype->f->nonzero;
+ needs_api = PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI);
+
/* Special case - nonzero(zero_d) is nonzero(atleast_1d(zero_d)) */
if (ndim == 0) {
char const* msg;
static npy_intp const zero_dim_shape[1] = {1};
static npy_intp const zero_dim_strides[1] = {0};
+ Py_INCREF(PyArray_DESCR(self)); /* array creation steals reference */
PyArrayObject *self_1d = (PyArrayObject *)PyArray_NewFromDescrAndBase(
Py_TYPE(self), PyArray_DESCR(self),
1, zero_dim_shape, zero_dim_strides, PyArray_BYTES(self),
if (self_1d == NULL) {
return NULL;
}
- return PyArray_Nonzero(self_1d);
+ ret_tuple = PyArray_Nonzero(self_1d);
+ Py_DECREF(self_1d);
+ return ret_tuple;
}
/*
goto finish;
}
- NPY_BEGIN_THREADS_THRESHOLDED(count);
+ if (!needs_api) {
+ NPY_BEGIN_THREADS_THRESHOLDED(count);
+ }
/* avoid function call for bool */
if (is_bool) {
}
*multi_index++ = j;
}
+ if (needs_api && PyErr_Occurred()) {
+ break;
+ }
data += stride;
}
}
Py_DECREF(ret);
return NULL;
}
+
+ needs_api = NpyIter_IterationNeedsAPI(iter);
NPY_BEGIN_THREADS_NDITER(iter);
get_multi_index(iter, multi_index);
multi_index += ndim;
}
+ if (needs_api && PyErr_Occurred()) {
+ break;
+ }
} while(iternext(iter));
}
NpyIter_Deallocate(iter);
finish:
+ if (PyErr_Occurred()) {
+ Py_DECREF(ret);
+ return NULL;
+ }
+
/* if executed `nonzero()` check for miscount due to side-effect */
if (!is_bool && added_count != nonzero_count) {
PyErr_SetString(PyExc_RuntimeError,
* array of values, which must be of length PyArray_NDIM(self).
*/
NPY_NO_EXPORT PyObject *
-PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index)
+PyArray_MultiIndexGetItem(PyArrayObject *self, const npy_intp *multi_index)
{
int idim, ndim = PyArray_NDIM(self);
char *data = PyArray_DATA(self);
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index,
+PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index,
PyObject *obj)
{
int idim, ndim = PyArray_NDIM(self);
* array of values, which must be of length PyArray_NDIM(self).
*/
NPY_NO_EXPORT PyObject *
-PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index);
+PyArray_MultiIndexGetItem(PyArrayObject *self, const npy_intp *multi_index);
/*
* Sets a single item in the array, based on a single multi-index
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index,
+PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index,
PyObject *obj);
#endif
/* get the dataptr from its current coordinates for simple iterator */
static char*
-get_ptr_simple(PyArrayIterObject* iter, npy_intp *coordinates)
+get_ptr_simple(PyArrayIterObject* iter, const npy_intp *coordinates)
{
npy_intp i;
char *ret;
* This is common initialization code between PyArrayIterObject and
* PyArrayNeighborhoodIterObject
*
- * Increase ao refcount
+ * Steals a reference to the array object which gets removed at deallocation,
+ * if the iterator is allocated statically and its dealloc not called, it
+ * can be thought of as borrowing the reference.
*/
-static PyObject *
-array_iter_base_init(PyArrayIterObject *it, PyArrayObject *ao)
+NPY_NO_EXPORT void
+PyArray_RawIterBaseInit(PyArrayIterObject *it, PyArrayObject *ao)
{
int nd, i;
else {
it->contiguous = 0;
}
- Py_INCREF(ao);
it->ao = ao;
it->size = PyArray_SIZE(ao);
it->nd_m1 = nd - 1;
it->translate = &get_ptr_simple;
PyArray_ITER_RESET(it);
- return (PyObject *)it;
+ return;
}
static void
NPY_NO_EXPORT PyObject *
PyArray_IterNew(PyObject *obj)
{
+ /*
+ * Note that internall PyArray_RawIterBaseInit may be called directly on a
+ * statically allocated PyArrayIterObject.
+ */
PyArrayIterObject *it;
PyArrayObject *ao;
return NULL;
}
- array_iter_base_init(it, ao);
+ Py_INCREF(ao); /* PyArray_RawIterBaseInit steals a reference */
+ PyArray_RawIterBaseInit(it, ao);
return (PyObject *)it;
}
static void
arrayiter_dealloc(PyArrayIterObject *it)
{
+ /*
+ * Note that it is possible to statically allocate a PyArrayIterObject,
+ * which does not call this function.
+ */
array_iter_base_dealloc(it);
PyArray_free(it);
}
}
n = PySequence_Fast_GET_SIZE(fast_seq);
if (n > NPY_MAXARGS) {
+ Py_DECREF(fast_seq);
return multiiter_wrong_number_of_args();
}
ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq));
/* set the dataptr from its current coordinates */
static char*
-get_ptr_constant(PyArrayIterObject* _iter, npy_intp *coordinates)
+get_ptr_constant(PyArrayIterObject* _iter, const npy_intp *coordinates)
{
int i;
npy_intp bd, _coordinates[NPY_MAXDIMS];
/* set the dataptr from its current coordinates */
static char*
-get_ptr_mirror(PyArrayIterObject* _iter, npy_intp *coordinates)
+get_ptr_mirror(PyArrayIterObject* _iter, const npy_intp *coordinates)
{
int i;
npy_intp bd, _coordinates[NPY_MAXDIMS], lb;
_coordinates[c] = lb + __npy_euclidean_division(bd, p->limits_sizes[c]);
static char*
-get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates)
+get_ptr_circular(PyArrayIterObject* _iter, const npy_intp *coordinates)
{
int i;
npy_intp bd, _coordinates[NPY_MAXDIMS], lb;
* A Neighborhood Iterator object.
*/
NPY_NO_EXPORT PyObject*
-PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp *bounds,
+PyArray_NeighborhoodIterNew(PyArrayIterObject *x, const npy_intp *bounds,
int mode, PyArrayObject* fill)
{
int i;
}
PyObject_Init((PyObject *)ret, &PyArrayNeighborhoodIter_Type);
- array_iter_base_init((PyArrayIterObject*)ret, x->ao);
+ Py_INCREF(x->ao); /* PyArray_RawIterBaseInit steals a reference */
+ PyArray_RawIterBaseInit((PyArrayIterObject*)ret, x->ao);
Py_INCREF(x);
ret->_internal_iter = x;
NPY_NO_EXPORT int
iter_ass_subscript(PyArrayIterObject *, PyObject *, PyObject *);
+NPY_NO_EXPORT void
+PyArray_RawIterBaseInit(PyArrayIterObject *it, PyArrayObject *ao);
+
#endif
/* Unpack a single scalar index, taking a new reference to match unpack_tuple */
static NPY_INLINE npy_intp
-unpack_scalar(PyObject *index, PyObject **result, npy_intp result_n)
+unpack_scalar(PyObject *index, PyObject **result, npy_intp NPY_UNUSED(result_n))
{
Py_INCREF(index);
result[0] = index;
if (size != PyArray_DIMS(v)[0]) {
PyErr_Format(PyExc_ValueError,
"NumPy boolean array indexing assignment "
- "cannot assign %d input values to "
- "the %d output values where the mask is true",
- (int)PyArray_DIMS(v)[0], (int)size);
+ "cannot assign %" NPY_INTP_FMT " input values to "
+ "the %" NPY_INTP_FMT " output values where the mask is true",
+ PyArray_DIMS(v)[0], size);
return -1;
}
v_stride = PyArray_STRIDES(v)[0];
indval = *((npy_intp*)data);
if (check_and_adjust_index(&indval,
outer_dim, outer_axis, _save) < 0) {
+ Py_DECREF(intp_type);
return -1;
}
data += stride;
PyArrayObject *original_extra_op = extra_op;
PyArrayObject *index_arrays[NPY_MAXDIMS];
- PyArray_Descr *dtypes[NPY_MAXDIMS];
+ PyArray_Descr *intp_descr;
+ PyArray_Descr *dtypes[NPY_MAXDIMS]; /* borrowed references */
npy_uint32 op_flags[NPY_MAXDIMS];
npy_uint32 outer_flags;
int nops;
int uses_subspace;
+ intp_descr = PyArray_DescrFromType(NPY_INTP);
+ if (intp_descr == NULL) {
+ return NULL;
+ }
+
/* create new MapIter object */
mit = (PyArrayMapIterObject *)PyArray_malloc(sizeof(PyArrayMapIterObject));
if (mit == NULL) {
+ Py_DECREF(intp_descr);
return NULL;
}
/* set all attributes of mapiter to zero */
mit->nd_fancy = fancy_ndim;
if (mapiter_fill_info(mit, indices, index_num, arr) < 0) {
Py_DECREF(mit);
+ Py_DECREF(intp_descr);
return NULL;
}
for (i=0; i < index_num; i++) {
if (indices[i].type & HAS_FANCY) {
index_arrays[mit->numiter] = (PyArrayObject *)indices[i].object;
- dtypes[mit->numiter] = PyArray_DescrFromType(NPY_INTP);
+ dtypes[mit->numiter] = intp_descr;
op_flags[mit->numiter] = (NPY_ITER_NBO |
NPY_ITER_ALIGNED |
PyArray_DescrFromType(NPY_INTP), 0);
if (index_arrays[0] == NULL) {
Py_DECREF(mit);
+ Py_DECREF(intp_descr);
return NULL;
}
- dtypes[0] = PyArray_DescrFromType(NPY_INTP);
+ dtypes[0] = intp_descr;
op_flags[0] = NPY_ITER_NBO | NPY_ITER_ALIGNED | NPY_ITER_READONLY;
mit->fancy_dims[0] = 1;
nops += 1;
index_arrays[mit->numiter] = extra_op;
- Py_INCREF(extra_op_dtype);
dtypes[mit->numiter] = extra_op_dtype;
op_flags[mit->numiter] = (extra_op_flags |
NPY_ITER_ALLOCATE |
}
/* NpyIter cleanup and information: */
- for (i=0; i < nops; i++) {
- Py_DECREF(dtypes[i]);
- }
if (dummy_array) {
Py_DECREF(index_arrays[0]);
}
/* Can now return early if no subspace is being used */
if (!uses_subspace) {
Py_XDECREF(extra_op);
+ Py_DECREF(intp_descr);
return (PyObject *)mit;
}
}
Py_XDECREF(extra_op);
+ Py_DECREF(intp_descr);
return (PyObject *)mit;
fail:
finish:
Py_XDECREF(extra_op);
+ Py_DECREF(intp_descr);
Py_DECREF(mit);
return NULL;
}
npy_cache_import("numpy.core._internal", "_getfield_is_safe",
&checkfunc);
if (checkfunc == NULL) {
+ Py_DECREF(typed);
return NULL;
}
safe = PyObject_CallFunction(checkfunc, "OOi", PyArray_DESCR(self),
typed, offset);
if (safe == NULL) {
+ Py_DECREF(typed);
return NULL;
}
Py_DECREF(safe);
/* check that values are valid */
if (typed_elsize > self_elsize) {
PyErr_SetString(PyExc_ValueError, "new type is larger than original type");
+ Py_DECREF(typed);
return NULL;
}
if (offset < 0) {
PyErr_SetString(PyExc_ValueError, "offset is negative");
+ Py_DECREF(typed);
return NULL;
}
if (offset > self_elsize - typed_elsize) {
PyErr_SetString(PyExc_ValueError, "new type plus offset is larger than original type");
+ Py_DECREF(typed);
return NULL;
}
int retval = 0;
if (PyArray_FailUnlessWriteable(self, "assignment destination") < 0) {
+ Py_DECREF(dtype);
return -1;
}
return NULL;
}
if (PyBytes_Check(file) || PyUnicode_Check(file)) {
- file = npy_PyFile_OpenFile(file, "wb");
+ Py_SETREF(file, npy_PyFile_OpenFile(file, "wb"));
if (file == NULL) {
return NULL;
}
own = 1;
}
else {
- Py_INCREF(file);
own = 0;
}
NPY_NO_EXPORT PyObject *
-array_ufunc(PyArrayObject *self, PyObject *args, PyObject *kwds)
+array_ufunc(PyArrayObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
{
PyObject *ufunc, *method_name, *normal_args, *ufunc_method;
PyObject *result = NULL;
}
static PyObject *
-array_function(PyArrayObject *self, PyObject *c_args, PyObject *c_kwds)
+array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kwds)
{
PyObject *func, *types, *args, *kwargs, *result;
static char *kwlist[] = {"func", "types", "args", "kwargs", NULL};
return NULL;
}
- ret = PyArray_Resize(self, &newshape, refcheck, NPY_CORDER);
+ ret = PyArray_Resize(self, &newshape, refcheck, NPY_ANYORDER);
npy_free_cache_dim_obj(newshape);
if (ret == NULL) {
return NULL;
}
static PyObject *
-array_reduce_ex_regular(PyArrayObject *self, int protocol)
+array_reduce_ex_regular(PyArrayObject *self, int NPY_UNUSED(protocol))
{
PyObject *subclass_array_reduce = NULL;
PyObject *ret;
PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) ||
(PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) &&
((PyObject*)self)->ob_type != &PyArray_Type) ||
- PyDataType_ISUNSIZED(descr)) {
+ descr->elsize == 0) {
/* The PickleBuffer class from version 5 of the pickle protocol
* can only be used for arrays backed by a contiguous data buffer.
* For all other cases we fallback to the generic array_reduce
#endif
npy_intp num = PyArray_NBYTES(self);
if (num == 0) {
+ Py_DECREF(rawdata);
Py_RETURN_NONE;
}
fa->data = PyDataMem_NEW(num);
static PyObject *
array_conjugate(PyArrayObject *self, PyObject *args)
{
-
PyArrayObject *out = NULL;
if (!PyArg_ParseTuple(args, "|O&:conjugate",
PyArray_OutputConverter,
NPY_NO_EXPORT const char *
npy_casting_to_string(NPY_CASTING casting);
-/* Pathlib support */
+/*
+ * Pathlib support, takes a borrowed reference and returns a new one.
+ * The new object may be the same as the old.
+ */
static inline PyObject *
NpyPath_PathlikeToFspath(PyObject *file)
{
}
if (!PyObject_IsInstance(file, os_PathLike)) {
+ Py_INCREF(file);
return file;
}
return PyObject_CallFunctionObjArgs(os_fspath, file, NULL);
ret = PyArray_LookupSpecial_OnInstance(obj, "__array_priority__");
if (ret == NULL) {
+ if (PyErr_Occurred()) {
+ PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ }
return default_;
}
* Convert to a 1D C-array
*/
NPY_NO_EXPORT int
-PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode)
+PyArray_As1D(PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr),
+ int *NPY_UNUSED(d1), int NPY_UNUSED(typecode))
{
- npy_intp newd1;
- PyArray_Descr *descr;
- static const char msg[] = "PyArray_As1D: use PyArray_AsCArray.";
-
/* 2008-07-14, 1.5 */
- if (DEPRECATE(msg) < 0) {
- return -1;
- }
- descr = PyArray_DescrFromType(typecode);
- if (PyArray_AsCArray(op, (void *)ptr, &newd1, 1, descr) == -1) {
- return -1;
- }
- *d1 = (int) newd1;
- return 0;
+ PyErr_SetString(PyExc_NotImplementedError,
+ "PyArray_As1D: use PyArray_AsCArray.");
+ return -1;
}
/*NUMPY_API
* Convert to a 2D C-array
*/
NPY_NO_EXPORT int
-PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode)
+PyArray_As2D(PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr),
+ int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode))
{
- npy_intp newdims[2];
- PyArray_Descr *descr;
- static const char msg[] = "PyArray_As1D: use PyArray_AsCArray.";
-
/* 2008-07-14, 1.5 */
- if (DEPRECATE(msg) < 0) {
- return -1;
- }
- descr = PyArray_DescrFromType(typecode);
- if (PyArray_AsCArray(op, (void *)ptr, newdims, 2, descr) == -1) {
- return -1;
- }
- *d1 = (int ) newdims[0];
- *d2 = (int ) newdims[1];
- return 0;
+ PyErr_SetString(PyExc_NotImplementedError,
+ "PyArray_As2D: use PyArray_AsCArray.");
+ return -1;
}
/* End Deprecated */
n1 = PyArray_DIMS(ap1)[0];
n2 = PyArray_DIMS(ap2)[0];
+ if (n1 == 0) {
+ PyErr_SetString(PyExc_ValueError, "first array argument cannot be empty");
+ return NULL;
+ }
+ if (n2 == 0) {
+ PyErr_SetString(PyExc_ValueError, "second array argument cannot be empty");
+ return NULL;
+ }
if (n1 < n2) {
ret = ap1;
ap1 = ap2;
if (file == NULL) {
return NULL;
}
-
+
if (offset != 0 && strcmp(sep, "") != 0) {
PyErr_SetString(PyExc_TypeError, "'offset' argument only permitted for binary files");
+ Py_XDECREF(type);
+ Py_DECREF(file);
return NULL;
}
if (PyString_Check(file) || PyUnicode_Check(file)) {
- file = npy_PyFile_OpenFile(file, "rb");
+ Py_SETREF(file, npy_PyFile_OpenFile(file, "rb"));
if (file == NULL) {
+ Py_XDECREF(type);
return NULL;
}
own = 1;
}
else {
- Py_INCREF(file);
own = 0;
}
fp = npy_PyFile_Dup2(file, "rb", &orig_pos);
if (fp == NULL) {
Py_DECREF(file);
+ Py_XDECREF(type);
return NULL;
}
if (npy_fseek(fp, offset, SEEK_CUR) != 0) {
}
meta = get_datetime_metadata_from_dtype(dtype);
- Py_DECREF(dtype);
+ Py_DECREF(dtype);
if (meta == NULL) {
return NULL;
}
}
static PyObject *
-_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
+_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds))
{
PyArrayObject* char_array = NULL;
PyArray_Descr *type;
else {
PyErr_SetString(PyExc_TypeError,
"string operation on non-string array");
+ Py_DECREF(type);
goto err;
}
if (method == NULL) {
+ Py_DECREF(type);
goto err;
}
}
if (errmsg == NULL) {
PyErr_Format(PyExc_ValueError,
- "Out-of-bounds range [%d, %d) passed to "
- "ResetToIterIndexRange", (int)istart, (int)iend);
+ "Out-of-bounds range [%" NPY_INTP_FMT ", %" NPY_INTP_FMT ") passed to "
+ "ResetToIterIndexRange", istart, iend);
}
else {
*errmsg = "Out-of-bounds range passed to ResetToIterIndexRange";
else if (iend < istart) {
if (errmsg == NULL) {
PyErr_Format(PyExc_ValueError,
- "Invalid range [%d, %d) passed to ResetToIterIndexRange",
- (int)istart, (int)iend);
+ "Invalid range [%" NPY_INTP_FMT ", %" NPY_INTP_FMT ") passed to ResetToIterIndexRange",
+ istart, iend);
}
else {
*errmsg = "Invalid range passed to ResetToIterIndexRange";
printf("REUSE_REDUCE_LOOPS ");
printf("\n");
- printf("| NDim: %d\n", (int)ndim);
- printf("| NOp: %d\n", (int)nop);
+ printf("| NDim: %d\n", ndim);
+ printf("| NOp: %d\n", nop);
if (NIT_MASKOP(iter) >= 0) {
printf("| MaskOp: %d\n", (int)NIT_MASKOP(iter));
}
npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags);
static int
npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes,
- npy_intp *itershape);
+ const npy_intp *itershape);
static int
npyiter_calculate_ndim(int nop, PyArrayObject **op_in,
int oa_ndim);
static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
- npy_uint32 *op_flags, int **op_axes,
+ const npy_uint32 *op_flags, int **op_axes,
npy_intp *itershape);
static void
npyiter_replace_axisdata(NpyIter *iter, int iop,
npyiter_find_best_axis_ordering(NpyIter *iter);
static PyArray_Descr *
npyiter_get_common_dtype(int nop, PyArrayObject **op,
- npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
+ const npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
PyArray_Descr **op_request_dtypes,
int only_inputs);
static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
npy_uint32 flags, npyiter_opitflags *op_itflags,
int op_ndim, npy_intp *shape,
- PyArray_Descr *op_dtype, int *op_axes);
+ PyArray_Descr *op_dtype, const int *op_axes);
static int
npyiter_allocate_arrays(NpyIter *iter,
npy_uint32 flags,
PyArray_Descr **op_dtype, PyTypeObject *subtype,
- npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
+ const npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
int **op_axes);
static void
npyiter_get_priority_subtype(int nop, PyArrayObject **op,
- npyiter_opitflags *op_itflags,
+ const npyiter_opitflags *op_itflags,
double *subtype_priority, PyTypeObject **subtype);
static int
npyiter_allocate_transfer_functions(NpyIter *iter);
if (nop > NPY_MAXARGS) {
PyErr_Format(PyExc_ValueError,
"Cannot construct an iterator with more than %d operands "
- "(%d were requested)", (int)NPY_MAXARGS, (int)nop);
+ "(%d were requested)", NPY_MAXARGS, nop);
return NULL;
}
static int
npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes,
- npy_intp *itershape)
+ const npy_intp *itershape)
{
char axes_dupcheck[NPY_MAXDIMS];
int iop, idim;
PyErr_Format(PyExc_ValueError,
"Cannot construct an iterator with more than %d dimensions "
"(%d were requested for op_axes)",
- (int)NPY_MAXDIMS, oa_ndim);
+ NPY_MAXDIMS, oa_ndim);
return 0;
}
if (op_axes == NULL) {
if (axes != NULL) {
memset(axes_dupcheck, 0, NPY_MAXDIMS);
for (idim = 0; idim < oa_ndim; ++idim) {
- npy_intp i = axes[idim];
+ int i = axes[idim];
if (i >= 0) {
if (i >= NPY_MAXDIMS) {
PyErr_Format(PyExc_ValueError,
"The 'op_axes' provided to the iterator "
"constructor for operand %d "
"contained invalid "
- "values %d", (int)iop, (int)i);
+ "values %d", iop, i);
return 0;
}
else if (axes_dupcheck[i] == 1) {
"The 'op_axes' provided to the iterator "
"constructor for operand %d "
"contained duplicate "
- "value %d", (int)iop, (int)i);
+ "value %d", iop, i);
return 0;
}
else {
PyObject *errmsg;
errmsg = PyUString_FromFormat(
"Iterator operand %d dtype could not be cast from ",
- (int)iop);
+ iop);
PyUString_ConcatAndDel(&errmsg,
PyObject_Repr((PyObject *)PyArray_DESCR(op[iop])));
PyUString_ConcatAndDel(&errmsg,
PyUString_ConcatAndDel(&errmsg,
PyUString_FromFormat(", the operand %d dtype, "
"according to the rule %s",
- (int)iop,
+ iop,
npyiter_casting_to_string(casting)));
PyErr_SetObject(PyExc_TypeError, errmsg);
Py_DECREF(errmsg);
static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
- npy_uint32 *op_flags, int **op_axes,
+ const npy_uint32 *op_flags, int **op_axes,
npy_intp *itershape)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
"Iterator input op_axes[%d][%d] (==%d) "
"is not a valid axis of op[%d], which "
"has %d dimensions ",
- (int)iop, (int)(ndim-idim-1), (int)i,
- (int)iop, (int)ondim);
+ iop, (ndim-idim-1), i,
+ iop, ondim);
return 0;
}
}
*/
static PyArray_Descr *
npyiter_get_common_dtype(int nop, PyArrayObject **op,
- npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
+ const npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
PyArray_Descr **op_request_dtypes,
int only_inputs)
{
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
npy_uint32 flags, npyiter_opitflags *op_itflags,
int op_ndim, npy_intp *shape,
- PyArray_Descr *op_dtype, int *op_axes)
+ PyArray_Descr *op_dtype, const int *op_axes)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
npyiter_allocate_arrays(NpyIter *iter,
npy_uint32 flags,
PyArray_Descr **op_dtype, PyTypeObject *subtype,
- npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
+ const npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
int **op_axes)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
*/
static void
npyiter_get_priority_subtype(int nop, PyArrayObject **op,
- npyiter_opitflags *op_itflags,
+ const npyiter_opitflags *op_itflags,
double *subtype_priority,
PyTypeObject **subtype)
{
}
static PyObject *
-npyiter_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
+npyiter_new(PyTypeObject *subtype, PyObject *NPY_UNUSED(args),
+ PyObject *NPY_UNUSED(kwds))
{
NewNpyArrayIterObject *self;
if (*oa_ndim > NPY_MAXDIMS) {
PyErr_SetString(PyExc_ValueError,
"Too many dimensions in op_axes");
+ Py_DECREF(a);
return 0;
}
}
}
Py_DECREF(v);
}
- Py_DECREF(a);
}
+ Py_DECREF(a);
}
if (*oa_ndim == -1) {
if (i < 0 || i >= nop) {
PyErr_Format(PyExc_IndexError,
- "Iterator operand index %d is out of bounds", (int)i_orig);
+ "Iterator operand index %zd is out of bounds", i_orig);
return NULL;
}
*/
if (!self->readflags[i]) {
PyErr_Format(PyExc_RuntimeError,
- "Iterator operand %d is write-only", (int)i);
+ "Iterator operand %zd is write-only", i);
return NULL;
}
#endif
if (i < 0 || i >= nop) {
PyErr_Format(PyExc_IndexError,
- "Iterator operand index %d is out of bounds", (int)i_orig);
+ "Iterator operand index %zd is out of bounds", i_orig);
return -1;
}
if (!self->writeflags[i]) {
PyErr_Format(PyExc_RuntimeError,
- "Iterator operand %d is not writeable", (int)i_orig);
+ "Iterator operand %zd is not writeable", i_orig);
return -1;
}
}
static PyObject *
-npyiter_exit(NewNpyArrayIterObject *self, PyObject *args)
+npyiter_exit(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
/* even if called via exception handling, writeback any data */
return npyiter_close(self);
}
static PyObject *
-array_inplace_matrix_multiply(PyArrayObject *m1, PyObject *m2)
+array_inplace_matrix_multiply(
+ PyArrayObject *NPY_UNUSED(m1), PyObject *NPY_UNUSED(m2))
{
PyErr_SetString(PyExc_TypeError,
"In-place matrix multiplication is not (yet) supported. "
#define _MULTIARRAYMODULE
#include "numpy/arrayobject.h"
#include "numpy/arrayscalars.h"
+#include "iterators.h"
#include "npy_config.h"
npy_intp i, n;
PyObject **data;
PyObject *temp;
- PyArrayIterObject *it;
+ /*
+ * statically allocating it allows this function to not modify the
+ * reference count of the array for use during dealloc.
+ * (statically is not necessary as such)
+ */
+ PyArrayIterObject it;
if (!PyDataType_REFCHK(PyArray_DESCR(mp))) {
return 0;
}
if (PyArray_DESCR(mp)->type_num != NPY_OBJECT) {
- it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp);
- if (it == NULL) {
- return -1;
+ PyArray_RawIterBaseInit(&it, mp);
+ while(it.index < it.size) {
+ PyArray_Item_XDECREF(it.dataptr, PyArray_DESCR(mp));
+ PyArray_ITER_NEXT(&it);
}
- while(it->index < it->size) {
- PyArray_Item_XDECREF(it->dataptr, PyArray_DESCR(mp));
- PyArray_ITER_NEXT(it);
- }
- Py_DECREF(it);
return 0;
}
}
}
else { /* handles misaligned data too */
- it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp);
- if (it == NULL) {
- return -1;
- }
- while(it->index < it->size) {
- NPY_COPY_PYOBJECT_PTR(&temp, it->dataptr);
+ PyArray_RawIterBaseInit(&it, mp);
+ while(it.index < it.size) {
+ NPY_COPY_PYOBJECT_PTR(&temp, it.dataptr);
Py_XDECREF(temp);
- PyArray_ITER_NEXT(it);
+ PyArray_ITER_NEXT(&it);
}
- Py_DECREF(it);
}
return 0;
}
_npy_can_cast_safely_table[_FROM_NUM][NPY_STRING] = 1;
_npy_can_cast_safely_table[_FROM_NUM][NPY_UNICODE] = 1;
- /* Allow casts from any integer to the TIMEDELTA type */
-#if @from_isint@ || @from_isuint@
+#if @from_isint@ && NPY_SIZEOF_TIMEDELTA >= _FROM_BSIZE
+ /* Allow casts from smaller or equal signed integers to the TIMEDELTA type */
+ _npy_can_cast_safely_table[_FROM_NUM][NPY_TIMEDELTA] = 1;
+#elif @from_isuint@ && NPY_SIZEOF_TIMEDELTA > _FROM_BSIZE
+ /* Allow casts from smaller unsigned integers to the TIMEDELTA type */
_npy_can_cast_safely_table[_FROM_NUM][NPY_TIMEDELTA] = 1;
#endif
PyArrayIter_Type.tp_iter = PyObject_SelfIter;
PyArrayMapIter_Type.tp_iter = PyObject_SelfIter;
+
+ /*
+ * Give types different names when they are the same size (gh-9799).
+ * `np.intX` always refers to the first int of that size in the sequence
+ * `['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']`.
+ */
+#if (NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT)
+ PyByteArrType_Type.tp_name = "numpy.byte";
+ PyUByteArrType_Type.tp_name = "numpy.ubyte";
+#endif
+#if (NPY_SIZEOF_SHORT == NPY_SIZEOF_INT)
+ PyShortArrType_Type.tp_name = "numpy.short";
+ PyUShortArrType_Type.tp_name = "numpy.ushort";
+#endif
+#if (NPY_SIZEOF_INT == NPY_SIZEOF_LONG)
+ PyIntArrType_Type.tp_name = "numpy.intc";
+ PyUIntArrType_Type.tp_name = "numpy.uintc";
+#endif
+#if (NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG)
+ PyLongLongArrType_Type.tp_name = "numpy.longlong";
+ PyULongLongArrType_Type.tp_name = "numpy.ulonglong";
+#endif
+
+ /*
+ Do the same for longdouble
+ */
+#if (NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE)
+ PyLongDoubleArrType_Type.tp_name = "numpy.longdouble";
+ PyCLongDoubleArrType_Type.tp_name = "numpy.clongdouble";
+#endif
}
typedef struct {
_fix_unknown_dimension(PyArray_Dims *newshape, PyArrayObject *arr);
static int
-_attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims,
+_attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims,
npy_intp *newstrides, int is_f_order);
static void
*/
NPY_NO_EXPORT PyObject *
PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
- NPY_ORDER order)
+ NPY_ORDER NPY_UNUSED(order))
{
npy_intp oldnbytes, newnbytes;
npy_intp oldsize, newsize;
* stride of the next-fastest index.
*/
static int
-_attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims,
+_attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims,
npy_intp *newstrides, int is_f_order)
{
int oldnd;
* [(2, 12), (0, 4), (1, -2)].
*/
NPY_NO_EXPORT void
-PyArray_CreateSortedStridePerm(int ndim, npy_intp *strides,
+PyArray_CreateSortedStridePerm(int ndim, npy_intp const *strides,
npy_stride_sort_item *out_strideperm)
{
int i;
* from a reduction result once its computation is complete.
*/
NPY_NO_EXPORT void
-PyArray_RemoveAxesInPlace(PyArrayObject *arr, npy_bool *flags)
+PyArray_RemoveAxesInPlace(PyArrayObject *arr, const npy_bool *flags)
{
PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
npy_intp *shape = fa->dimensions, *strides = fa->strides;
char *op, npy_intp n, void *NPY_UNUSED(ignore))
{
#if defined(HAVE_CBLAS)
- int is1b = blas_stride(is1, sizeof(npy_cfloat));
- int is2b = blas_stride(is2, sizeof(npy_cfloat));
+ CBLAS_INT is1b = blas_stride(is1, sizeof(npy_cfloat));
+ CBLAS_INT is2b = blas_stride(is2, sizeof(npy_cfloat));
if (is1b && is2b) {
double sum[2] = {0., 0.}; /* double for stability */
while (n > 0) {
- int chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
+ CBLAS_INT chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
float tmp[2];
- cblas_cdotc_sub((int)n, ip1, is1b, ip2, is2b, tmp);
+ CBLAS_FUNC(cblas_cdotc_sub)((CBLAS_INT)n, ip1, is1b, ip2, is2b, tmp);
sum[0] += (double)tmp[0];
sum[1] += (double)tmp[1];
/* use char strides here */
char *op, npy_intp n, void *NPY_UNUSED(ignore))
{
#if defined(HAVE_CBLAS)
- int is1b = blas_stride(is1, sizeof(npy_cdouble));
- int is2b = blas_stride(is2, sizeof(npy_cdouble));
+ CBLAS_INT is1b = blas_stride(is1, sizeof(npy_cdouble));
+ CBLAS_INT is2b = blas_stride(is2, sizeof(npy_cdouble));
if (is1b && is2b) {
double sum[2] = {0., 0.}; /* double for stability */
while (n > 0) {
- int chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
+ CBLAS_INT chunk = n < NPY_CBLAS_CHUNK ? n : NPY_CBLAS_CHUNK;
double tmp[2];
- cblas_zdotc_sub((int)n, ip1, is1b, ip2, is2b, tmp);
+ CBLAS_FUNC(cblas_zdotc_sub)((CBLAS_INT)n, ip1, is1b, ip2, is2b, tmp);
sum[0] += (double)tmp[0];
sum[1] += (double)tmp[1];
/* use char strides here */
* flag in an efficient way. The flag is IEEE specific. See
* https://github.com/freebsd/freebsd/blob/4c6378299/lib/msun/src/catrig.c#L42
*/
+#if !defined(HAVE_CACOSF) || !defined(HAVE_CACOSL) || !defined(HAVE_CASINHF) || !defined(HAVE_CASINHL)
#define raise_inexact() do { \
volatile npy_float NPY_UNUSED(junk) = 1 + tiny; \
} while (0)
static const volatile npy_float tiny = 3.9443045e-31f;
-
+#endif
/**begin repeat
* #type = npy_float, npy_double, npy_longdouble#
* Constants
*=========================================================*/
static const @ctype@ c_1@c@ = {1.0@C@, 0.0};
-static const @ctype@ c_half@c@ = {0.5@C@, 0.0};
-static const @ctype@ c_i@c@ = {0.0, 1.0@C@};
-static const @ctype@ c_ihalf@c@ = {0.0, 0.5@C@};
/*==========================================================
* Helper functions
* These are necessary because we do not count on using a
* C99 compiler.
*=========================================================*/
-static NPY_INLINE
-@ctype@
-cadd@c@(@ctype@ a, @ctype@ b)
-{
- return npy_cpack@c@(npy_creal@c@(a) + npy_creal@c@(b),
- npy_cimag@c@(a) + npy_cimag@c@(b));
-}
-
-static NPY_INLINE
-@ctype@
-csub@c@(@ctype@ a, @ctype@ b)
-{
- return npy_cpack@c@(npy_creal@c@(a) - npy_creal@c@(b),
- npy_cimag@c@(a) - npy_cimag@c@(b));
-}
-
static NPY_INLINE
@ctype@
cmul@c@(@ctype@ a, @ctype@ b)
}
}
-static NPY_INLINE
-@ctype@
-cneg@c@(@ctype@ a)
-{
- return npy_cpack@c@(-npy_creal@c@(a), -npy_cimag@c@(a));
-}
-
-static NPY_INLINE
-@ctype@
-cmuli@c@(@ctype@ a)
-{
- return npy_cpack@c@(-npy_cimag@c@(a), npy_creal@c@(a));
-}
-
/*==========================================================
* Custom implementation of missing complex C99 functions
*=========================================================*/
return npy_@func@u@c@(a < 0 ? -a : a, b < 0 ? -b : b);
}
/**end repeat**/
+
+/* Unlike LCM and GCD, we need byte and short variants for the shift operators,
+ * since the result is dependent on the width of the type
+ */
+/**begin repeat
+ *
+ * #type = byte, short, int, long, longlong#
+ * #c = hh,h,,l,ll#
+ */
+/**begin repeat1
+ *
+ * #u = u,#
+ * #is_signed = 0,1#
+ */
+NPY_INPLACE npy_@u@@type@
+npy_lshift@u@@c@(npy_@u@@type@ a, npy_@u@@type@ b)
+{
+ if (NPY_LIKELY((size_t)b < sizeof(a) * CHAR_BIT)) {
+ return a << b;
+ }
+ else {
+ return 0;
+ }
+}
+NPY_INPLACE npy_@u@@type@
+npy_rshift@u@@c@(npy_@u@@type@ a, npy_@u@@type@ b)
+{
+ if (NPY_LIKELY((size_t)b < sizeof(a) * CHAR_BIT)) {
+ return a >> b;
+ }
+#if @is_signed@
+ else if (a < 0) {
+ return (npy_@u@@type@)-1; /* preserve the sign bit */
+ }
+#endif
+ else {
+ return 0;
+ }
+}
+/**end repeat1**/
+/**end repeat**/
NPY_INLINE static int
DATETIME_LT(npy_datetime a, npy_datetime b)
{
+ if (a == NPY_DATETIME_NAT) {
+ return 0;
+ }
+
+ if (b == NPY_DATETIME_NAT) {
+ return 1;
+ }
+
return a < b;
}
NPY_INLINE static int
TIMEDELTA_LT(npy_timedelta a, npy_timedelta b)
{
+ if (a == NPY_DATETIME_NAT) {
+ return 0;
+ }
+
+ if (b == NPY_DATETIME_NAT) {
+ return 1;
+ }
+
return a < b;
}
pyrational_str(PyObject* self) {
rational x = ((PyRational*)self)->r;
if (d(x)!=1) {
- return PyString_FromFormat(
+ return PyUString_FromFormat(
"%ld/%ld",(long)x.n,(long)d(x));
}
else {
- return PyString_FromFormat(
+ return PyUString_FromFormat(
"%ld",(long)x.n);
}
}
#endif
}
+static NPY_INLINE
+int cpu_supports_fma(void)
+{
+#ifdef __x86_64__
+ unsigned int feature = 0x01;
+ unsigned int a, b, c, d;
+ __asm__ volatile (
+ "cpuid" "\n\t"
+ : "=a" (a), "=b" (b), "=c" (c), "=d" (d)
+ : "a" (feature));
+ /*
+ * FMA is the 12th bit of ECX
+ */
+ return (c >> 12) & 1;
+#else
+ return 0;
+#endif
+}
+
/*
* Primitive cpu feature detect function
* Currently only supports checking for avx on gcc compatible compilers.
return 0;
#endif
}
+ else if (strcmp(feature, "fma") == 0) {
+ return cpu_supports_fma() && __builtin_cpu_supports("avx2") && os_avx_support();
+ }
else if (strcmp(feature, "avx2") == 0) {
return __builtin_cpu_supports("avx2") && os_avx_support();
}
static PyObject *
npy_ObjectFloor(PyObject *obj) {
- PyObject *math_floor_func = NULL;
+ static PyObject *math_floor_func = NULL;
npy_cache_import("math", "floor", &math_floor_func);
if (math_floor_func == NULL) {
static PyObject *
npy_ObjectCeil(PyObject *obj) {
- PyObject *math_ceil_func = NULL;
+ static PyObject *math_ceil_func = NULL;
npy_cache_import("math", "ceil", &math_ceil_func);
if (math_ceil_func == NULL) {
static PyObject *
npy_ObjectTrunc(PyObject *obj) {
- PyObject *math_trunc_func = NULL;
+ static PyObject *math_trunc_func = NULL;
npy_cache_import("math", "trunc", &math_trunc_func);
if (math_trunc_func == NULL) {
return NULL;
}
/* _gcd has some unusual behaviour regarding sign */
- return PyNumber_Absolute(gcd);
+ Py_SETREF(gcd, PyNumber_Absolute(gcd));
+ return gcd;
}
}
* no remainder
*/
tmp = PyNumber_FloorDivide(i1, gcd);
+ Py_DECREF(gcd);
if(tmp == NULL) {
return NULL;
}
- tmp = PyNumber_Multiply(tmp, i2);
+ Py_SETREF(tmp, PyNumber_Multiply(tmp, i2));
if(tmp == NULL) {
return NULL;
}
/* even though we fix gcd to be positive, we need to do it again here */
- return PyNumber_Absolute(tmp);
+ Py_SETREF(tmp, PyNumber_Absolute(tmp));
+ return tmp;
}
** GENERIC FLOAT LOOPS **
*****************************************************************************/
+/* direct loops using a suitable callback */
-typedef float halfUnaryFunc(npy_half x);
-typedef float floatUnaryFunc(float x);
-typedef double doubleUnaryFunc(double x);
-typedef npy_longdouble longdoubleUnaryFunc(npy_longdouble x);
-typedef npy_half halfBinaryFunc(npy_half x, npy_half y);
-typedef float floatBinaryFunc(float x, float y);
-typedef double doubleBinaryFunc(double x, double y);
-typedef npy_longdouble longdoubleBinaryFunc(npy_longdouble x, npy_longdouble y);
-
+/**begin repeat
+ * #c = e, f, d, g#
+ * #type = npy_half, npy_float, npy_double, npy_longdouble#
+ **/
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_e_e(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c@_@c@(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- halfUnaryFunc *f = (halfUnaryFunc *)func;
+ typedef @type@ func_type(@type@);
+ func_type *f = (func_type *)func;
UNARY_LOOP {
- const npy_half in1 = *(npy_half *)ip1;
- *(npy_half *)op1 = f(in1);
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = f(in1);
}
}
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_e_e_As_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c@@c@_@c@(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- floatUnaryFunc *f = (floatUnaryFunc *)func;
- UNARY_LOOP {
- const float in1 = npy_half_to_float(*(npy_half *)ip1);
- *(npy_half *)op1 = npy_float_to_half(f(in1));
+ typedef @type@ func_type(@type@, @type@);
+ func_type *f = (func_type *)func;
+ BINARY_LOOP {
+ @type@ in1 = *(@type@ *)ip1;
+ @type@ in2 = *(@type@ *)ip2;
+ *(@type@ *)op1 = f(in1, in2);
}
}
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_e_e_As_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleUnaryFunc *f = (doubleUnaryFunc *)func;
- UNARY_LOOP {
- const double in1 = npy_half_to_double(*(npy_half *)ip1);
- *(npy_half *)op1 = npy_double_to_half(f(in1));
- }
-}
+/**end repeat**/
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- floatUnaryFunc *f = (floatUnaryFunc *)func;
- UNARY_LOOP {
- const float in1 = *(float *)ip1;
- *(float *)op1 = f(in1);
- }
-}
+/* indirect loops with casting */
+/**begin repeat
+ * #c1 = e, e, f#
+ * #type1 = npy_half, npy_half, npy_float#
+ * #c2 = f, d, d#
+ * #type2 = npy_float, npy_double, npy_double#
+ *
+ * #conv12 = npy_half_to_float, npy_half_to_double, (double)#
+ * #conv21 = npy_float_to_half, npy_double_to_half, (float)#
+ **/
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_f_f_As_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c1@_@c1@_As_@c2@_@c2@(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- doubleUnaryFunc *f = (doubleUnaryFunc *)func;
+ typedef @type2@ func_type(@type2@);
+ func_type *f = (func_type *)func;
UNARY_LOOP {
- const float in1 = *(float *)ip1;
- *(float *)op1 = (float)f((double)in1);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_ee_e(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- halfBinaryFunc *f = (halfBinaryFunc *)func;
- BINARY_LOOP {
- npy_half in1 = *(npy_half *)ip1;
- npy_half in2 = *(npy_half *)ip2;
- *(npy_half *)op1 = f(in1, in2);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_ee_e_As_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- floatBinaryFunc *f = (floatBinaryFunc *)func;
- BINARY_LOOP {
- float in1 = npy_half_to_float(*(npy_half *)ip1);
- float in2 = npy_half_to_float(*(npy_half *)ip2);
- *(npy_half *)op1 = npy_float_to_half(f(in1, in2));
+ const @type2@ in1 = @conv12@(*(@type1@ *)ip1);
+ *(@type1@ *)op1 = @conv21@(f(in1));
}
}
-
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_ee_e_As_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c1@@c1@_@c1@_As_@c2@@c2@_@c2@(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- doubleBinaryFunc *f = (doubleBinaryFunc *)func;
+ typedef @type2@ func_type(@type2@, @type2@);
+ func_type *f = (func_type *)func;
BINARY_LOOP {
- double in1 = npy_half_to_double(*(npy_half *)ip1);
- double in2 = npy_half_to_double(*(npy_half *)ip2);
- *(npy_half *)op1 = npy_double_to_half(f(in1, in2));
+ const @type2@ in1 = @conv12@(*(@type1@ *)ip1);
+ const @type2@ in2 = @conv12@(*(@type1@ *)ip2);
+ *(@type1@ *)op1 = @conv21@(f(in1, in2));
}
}
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- floatBinaryFunc *f = (floatBinaryFunc *)func;
- BINARY_LOOP {
- float in1 = *(float *)ip1;
- float in2 = *(float *)ip2;
- *(float *)op1 = f(in1, in2);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_ff_f_As_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleBinaryFunc *f = (doubleBinaryFunc *)func;
- BINARY_LOOP {
- float in1 = *(float *)ip1;
- float in2 = *(float *)ip2;
- *(float *)op1 = (double)f((double)in1, (double)in2);
- }
-}
+/**end repeat**/
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleUnaryFunc *f = (doubleUnaryFunc *)func;
- UNARY_LOOP {
- double in1 = *(double *)ip1;
- *(double *)op1 = f(in1);
- }
-}
+/******************************************************************************
+ ** GENERIC COMPLEX LOOPS **
+ *****************************************************************************/
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleBinaryFunc *f = (doubleBinaryFunc *)func;
- BINARY_LOOP {
- double in1 = *(double *)ip1;
- double in2 = *(double *)ip2;
- *(double *)op1 = f(in1, in2);
- }
-}
+/* direct loops using a suitable callback */
+/**begin repeat
+ * #c = F, D, G#
+ * #type = npy_cfloat, npy_cdouble, npy_clongdouble#
+ **/
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_g_g(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c@_@c@(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- longdoubleUnaryFunc *f = (longdoubleUnaryFunc *)func;
+ typedef void func_type(@type@ *, @type@ *);
+ func_type *f = (func_type *)func;
UNARY_LOOP {
- npy_longdouble in1 = *(npy_longdouble *)ip1;
- *(npy_longdouble *)op1 = f(in1);
+ @type@ in1 = *(@type@ *)ip1;
+ @type@ *out = (@type@ *)op1;
+ f(&in1, out);
}
}
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_gg_g(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c@@c@_@c@(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- longdoubleBinaryFunc *f = (longdoubleBinaryFunc *)func;
+ typedef void func_type(@type@ *, @type@ *, @type@ *);
+ func_type *f = (func_type *)func;
BINARY_LOOP {
- npy_longdouble in1 = *(npy_longdouble *)ip1;
- npy_longdouble in2 = *(npy_longdouble *)ip2;
- *(npy_longdouble *)op1 = f(in1, in2);
+ @type@ in1 = *(@type@ *)ip1;
+ @type@ in2 = *(@type@ *)ip2;
+ @type@ *out = (@type@ *)op1;
+ f(&in1, &in2, out);
}
}
+/**end repeat**/
-
-/******************************************************************************
- ** GENERIC COMPLEX LOOPS **
- *****************************************************************************/
-
-
-typedef void cdoubleUnaryFunc(npy_cdouble *x, npy_cdouble *r);
-typedef void cfloatUnaryFunc(npy_cfloat *x, npy_cfloat *r);
-typedef void clongdoubleUnaryFunc(npy_clongdouble *x, npy_clongdouble *r);
-typedef void cdoubleBinaryFunc(npy_cdouble *x, npy_cdouble *y, npy_cdouble *r);
-typedef void cfloatBinaryFunc(npy_cfloat *x, npy_cfloat *y, npy_cfloat *r);
-typedef void clongdoubleBinaryFunc(npy_clongdouble *x, npy_clongdouble *y,
- npy_clongdouble *r);
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_F_F(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- cfloatUnaryFunc *f = (cfloatUnaryFunc *)func;
- UNARY_LOOP {
- npy_cfloat in1 = *(npy_cfloat *)ip1;
- npy_cfloat *out = (npy_cfloat *)op1;
- f(&in1, out);
- }
-}
-
+/* indirect loops with casting */
/*UFUNC_API*/
NPY_NO_EXPORT void
PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func;
+ typedef void func_type(npy_cdouble *, npy_cdouble *);
+ func_type *f = (func_type *)func;
UNARY_LOOP {
npy_cdouble tmp, out;
tmp.real = (double)((float *)ip1)[0];
}
}
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_FF_F(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- cfloatBinaryFunc *f = (cfloatBinaryFunc *)func;
- BINARY_LOOP {
- npy_cfloat in1 = *(npy_cfloat *)ip1;
- npy_cfloat in2 = *(npy_cfloat *)ip2;
- npy_cfloat *out = (npy_cfloat *)op1;
- f(&in1, &in2, out);
- }
-}
-
/*UFUNC_API*/
NPY_NO_EXPORT void
PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func;
+ typedef void func_type(npy_cdouble *, npy_cdouble *, npy_cdouble *);
+ func_type *f = (func_type *)func;
BINARY_LOOP {
npy_cdouble tmp1, tmp2, out;
tmp1.real = (double)((float *)ip1)[0];
}
}
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func;
- UNARY_LOOP {
- npy_cdouble in1 = *(npy_cdouble *)ip1;
- npy_cdouble *out = (npy_cdouble *)op1;
- f(&in1, out);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func;
- BINARY_LOOP {
- npy_cdouble in1 = *(npy_cdouble *)ip1;
- npy_cdouble in2 = *(npy_cdouble *)ip2;
- npy_cdouble *out = (npy_cdouble *)op1;
- f(&in1, &in2, out);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_G_G(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- clongdoubleUnaryFunc *f = (clongdoubleUnaryFunc *)func;
- UNARY_LOOP {
- npy_clongdouble in1 = *(npy_clongdouble *)ip1;
- npy_clongdouble *out = (npy_clongdouble *)op1;
- f(&in1, out);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- clongdoubleBinaryFunc *f = (clongdoubleBinaryFunc *)func;
- BINARY_LOOP {
- npy_clongdouble in1 = *(npy_clongdouble *)ip1;
- npy_clongdouble in2 = *(npy_clongdouble *)ip2;
- npy_clongdouble *out = (npy_clongdouble *)op1;
- f(&in1, &in2, out);
- }
-}
-
/******************************************************************************
** GENERIC OBJECT lOOPS **
i, type->tp_name, meth);
npy_PyErr_ChainExceptionsCause(exc, val, tb);
Py_DECREF(tup);
+ Py_XDECREF(func);
return;
}
ret = PyObject_Call(func, tup, NULL);
* #ftype = npy_float, npy_float, npy_float, npy_float, npy_double, npy_double,
* npy_double, npy_double, npy_double, npy_double#
* #SIGNED = 1, 0, 1, 0, 1, 0, 1, 0, 1, 0#
+ * #c = hh,uhh,h,uh,,u,l,ul,ll,ull#
*/
#define @TYPE@_floor_divide @TYPE@_divide
/**begin repeat2
* Arithmetic
- * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor,
- * left_shift, right_shift#
- * #OP = +, -,*, &, |, ^, <<, >>#
+ * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor#
+ * #OP = +, -, *, &, |, ^#
*/
#if @CHK@
NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
@TYPE@_@kind@@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- if(IS_BINARY_REDUCE) {
+ if (IS_BINARY_REDUCE) {
BINARY_REDUCE_LOOP(@type@) {
io1 @OP@= *(@type@ *)ip2;
}
/**end repeat2**/
+/*
+ * Arithmetic bit shift operations.
+ *
+ * Intel hardware masks bit shift values, so large shifts wrap around
+ * and can produce surprising results. The special handling ensures that
+ * behavior is independent of compiler or hardware.
+ * TODO: We could implement consistent behavior for negative shifts,
+ * which is undefined in C.
+ */
+
+#define INT_left_shift_needs_clear_floatstatus
+#define UINT_left_shift_needs_clear_floatstatus
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_left_shift@isa@(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ BINARY_LOOP_FAST(@type@, @type@, *out = npy_lshift@c@(in1, in2));
+
+#ifdef @TYPE@_left_shift_needs_clear_floatstatus
+ // For some reason, our macOS CI sets an "invalid" flag here, but only
+ // for some types.
+ npy_clear_floatstatus_barrier((char*)dimensions);
+#endif
+}
+
+#undef INT_left_shift_needs_clear_floatstatus
+#undef UINT_left_shift_needs_clear_floatstatus
+
+NPY_NO_EXPORT
+#ifndef NPY_DO_NOT_OPTIMIZE_@TYPE@_right_shift
+NPY_GCC_OPT_3
+#endif
+void
+@TYPE@_right_shift@isa@(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ BINARY_LOOP_FAST(@type@, @type@, *out = npy_rshift@c@(in1, in2));
+}
+
+
/**begin repeat2
* #kind = equal, not_equal, greater, greater_equal, less, less_equal,
* logical_and, logical_or#
}
}
+NPY_NO_EXPORT void
+@TYPE@_isinf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ UNARY_LOOP_FAST(npy_bool, npy_bool, (void)in; *out = NPY_FALSE);
+}
+
NPY_NO_EXPORT void
@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
{
**/
NPY_NO_EXPORT void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ BINARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ const @type@ in2 = *(@type@ *)ip2;
+ if (in1 == NPY_DATETIME_NAT) {
+ *((@type@ *)op1) = in1;
+ }
+ else if (in2 == NPY_DATETIME_NAT) {
+ *((@type@ *)op1) = in2;
+ }
+ else {
+ *((@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2;
+ }
+ }
+}
+/**end repeat1**/
+
+/**begin repeat1
+ * #kind = fmax, fmin#
+ * #OP = >=, <=#
+ **/
+NPY_NO_EXPORT void
+@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
*((@type@ *)op1) = in1;
}
else {
- *((@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2;
+ *((@type@ *)op1) = in1 @OP@ in2 ? in1 : in2;
}
}
}
/**end repeat**/
/**begin repeat
- * #func = exp, log#
- * #scalarf = npy_expf, npy_logf#
+ * #func = rint, ceil, floor, trunc#
+ * #scalarf = npy_rint, npy_ceil, npy_floor, npy_trunc#
+ */
+
+/**begin repeat1
+* #TYPE = FLOAT, DOUBLE#
+* #type = npy_float, npy_double#
+* #typesub = f, #
+*/
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = @scalarf@@typesub@(in1);
+ }
+}
+
+
+/**end repeat1**/
+/**end repeat**/
+
+/**begin repeat
+ * #func = sin, cos, exp, log#
+ * #scalarf = npy_sinf, npy_cosf, npy_expf, npy_logf#
*/
NPY_NO_EXPORT NPY_GCC_OPT_3 void
/**end repeat**/
/**begin repeat
- * #isa = avx512f, avx2#
- * #ISA = AVX512F, AVX2#
+ * #isa = avx512f, fma#
+ * #ISA = AVX512F, FMA#
* #CHK = HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS#
*/
+/**begin repeat1
+ * #TYPE = FLOAT, DOUBLE#
+ * #type = npy_float, npy_double#
+ * #typesub = f, #
+ */
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_sqrt_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_sqrt_@TYPE@(args, dimensions, steps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = npy_sqrt@typesub@(in1);
+ }
+ }
+}
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_absolute_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_absolute_@TYPE@(args, dimensions, steps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ const @type@ tmp = in1 > 0 ? in1 : -in1;
+ /* add 0 to clear -0.0 */
+ *((@type@ *)op1) = tmp + 0;
+ }
+ }
+ npy_clear_floatstatus_barrier((char*)dimensions);
+}
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_square_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_square_@TYPE@(args, dimensions, steps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = in1*in1;
+ }
+ }
+}
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_reciprocal_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_reciprocal_@TYPE@(args, dimensions, steps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = 1.0f/in1;
+ }
+ }
+}
+
+/**begin repeat2
+ * #func = rint, ceil, floor, trunc#
+ * #scalarf = npy_rint, npy_ceil, npy_floor, npy_trunc#
+ */
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_@func@_@TYPE@(args, dimensions, steps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = @scalarf@@typesub@(in1);
+ }
+ }
+}
+
+/**end repeat2**/
+/**end repeat1**/
+
/**begin repeat1
* #func = exp, log#
* #scalarf = npy_expf, npy_logf#
}
}
+/**end repeat1**/
+
+/**begin repeat1
+ * #func = cos, sin#
+ * #enum = npy_compute_cos, npy_compute_sin#
+ * #scalarf = npy_cosf, npy_sinf#
+ */
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_sincos_FLOAT(args, dimensions, steps, @enum@)) {
+ UNARY_LOOP {
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+ @ISA@_sincos_FLOAT((npy_float *)op1, (npy_float *)ip1, 1, steps[0], @enum@);
+#else
+ const npy_float in1 = *(npy_float *)ip1;
+ *(npy_float *)op1 = @scalarf@(in1);
+#endif
+ }
+ }
+}
+
/**end repeat1**/
/**end repeat**/
+
/**begin repeat
* Float types
* #type = npy_float, npy_double, npy_longdouble, npy_float#
#define _NPY_UMATH_LOOPS_H_
#define BOOL_invert BOOL_logical_not
-#define BOOL_negative BOOL_logical_not
#define BOOL_add BOOL_logical_or
#define BOOL_bitwise_and BOOL_logical_and
#define BOOL_bitwise_or BOOL_logical_or
#define BOOL_logical_xor BOOL_not_equal
#define BOOL_bitwise_xor BOOL_logical_xor
#define BOOL_multiply BOOL_logical_and
-#define BOOL_subtract BOOL_logical_xor
#define BOOL_maximum BOOL_logical_or
#define BOOL_minimum BOOL_logical_and
#define BOOL_fmax BOOL_maximum
*/
NPY_NO_EXPORT void
@TYPE@_sqrt(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+/**begin repeat1
+ * #isa = avx512f, fma#
+ */
+
+/**begin repeat2
+ * #func = sqrt, absolute, square, reciprocal#
+ */
+NPY_NO_EXPORT void
+@TYPE@_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+/**end repeat2**/
+/**end repeat1**/
/**end repeat**/
/**begin repeat
- * #func = exp, log#
+ * #func = sin, cos, exp, log#
*/
NPY_NO_EXPORT void
FLOAT_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
/**begin repeat1
- * #isa = avx512f, avx2#
+ * #isa = avx512f, fma#
*/
NPY_NO_EXPORT void
/**end repeat1**/
/**end repeat**/
+/**begin repeat
+ * #func = rint, ceil, floor, trunc#
+ */
+
+/**begin repeat1
+* #TYPE = FLOAT, DOUBLE#
+*/
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+
+/**begin repeat2
+ * #isa = avx512f, fma#
+ */
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+/**end repeat2**/
+/**end repeat1**/
+/**end repeat**/
+
/**begin repeat
* Float types
* #TYPE = HALF, FLOAT, DOUBLE, LONGDOUBLE#
NPY_NO_EXPORT void
@TYPE@_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+@TYPE@_isinf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+#define @TYPE@_isnan @TYPE@_isnat
+
NPY_NO_EXPORT void
@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
/**end repeat1**/
/**begin repeat1
- * #kind = maximum, minimum#
- * #OP = >, <#
+ * #kind = maximum, minimum, fmin, fmax#
**/
NPY_NO_EXPORT void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
#define TIMEDELTA_mq_m_floor_divide TIMEDELTA_mq_m_divide
#define TIMEDELTA_md_m_floor_divide TIMEDELTA_md_m_divide
/* #define TIMEDELTA_mm_d_floor_divide TIMEDELTA_mm_d_divide */
-#define TIMEDELTA_fmin TIMEDELTA_minimum
-#define TIMEDELTA_fmax TIMEDELTA_maximum
-#define DATETIME_fmin DATETIME_minimum
-#define DATETIME_fmax DATETIME_maximum
/*
*****************************************************************************
* -1 to be conservative, in case blas internally uses a for loop with an
* inclusive upper bound
*/
+#ifndef HAVE_BLAS_ILP64
#define BLAS_MAXSIZE (NPY_MAX_INT - 1)
+#else
+#define BLAS_MAXSIZE (NPY_MAX_INT64 - 1)
+#endif
/*
* Determine if a 2d matrix can be used by BLAS
* op: data in c order, m shape
*/
enum CBLAS_ORDER order;
- int M, N, lda;
+ CBLAS_INT M, N, lda;
assert(m <= BLAS_MAXSIZE && n <= BLAS_MAXSIZE);
assert (is_blasable2d(is2_n, sizeof(@typ@), n, 1, sizeof(@typ@)));
- M = (int)m;
- N = (int)n;
+ M = (CBLAS_INT)m;
+ N = (CBLAS_INT)n;
if (is_blasable2d(is1_m, is1_n, m, n, sizeof(@typ@))) {
order = CblasColMajor;
- lda = (int)(is1_m / sizeof(@typ@));
+ lda = (CBLAS_INT)(is1_m / sizeof(@typ@));
}
else {
/* If not ColMajor, caller should have ensured we are RowMajor */
/* will not assert in release mode */
order = CblasRowMajor;
assert(is_blasable2d(is1_n, is1_m, n, m, sizeof(@typ@)));
- lda = (int)(is1_n / sizeof(@typ@));
+ lda = (CBLAS_INT)(is1_n / sizeof(@typ@));
}
- cblas_@prefix@gemv(order, CblasTrans, N, M, @step1@, ip1, lda, ip2,
+ CBLAS_FUNC(cblas_@prefix@gemv)(order, CblasTrans, N, M, @step1@, ip1, lda, ip2,
is2_n / sizeof(@typ@), @step0@, op, op_m / sizeof(@typ@));
}
*/
enum CBLAS_ORDER order = CblasRowMajor;
enum CBLAS_TRANSPOSE trans1, trans2;
- int M, N, P, lda, ldb, ldc;
+ CBLAS_INT M, N, P, lda, ldb, ldc;
assert(m <= BLAS_MAXSIZE && n <= BLAS_MAXSIZE && p <= BLAS_MAXSIZE);
- M = (int)m;
- N = (int)n;
- P = (int)p;
+ M = (CBLAS_INT)m;
+ N = (CBLAS_INT)n;
+ P = (CBLAS_INT)p;
assert(is_blasable2d(os_m, os_p, m, p, sizeof(@typ@)));
- ldc = (int)(os_m / sizeof(@typ@));
+ ldc = (CBLAS_INT)(os_m / sizeof(@typ@));
if (is_blasable2d(is1_m, is1_n, m, n, sizeof(@typ@))) {
trans1 = CblasNoTrans;
- lda = (int)(is1_m / sizeof(@typ@));
+ lda = (CBLAS_INT)(is1_m / sizeof(@typ@));
}
else {
/* If not ColMajor, caller should have ensured we are RowMajor */
/* will not assert in release mode */
assert(is_blasable2d(is1_n, is1_m, n, m, sizeof(@typ@)));
trans1 = CblasTrans;
- lda = (int)(is1_n / sizeof(@typ@));
+ lda = (CBLAS_INT)(is1_n / sizeof(@typ@));
}
if (is_blasable2d(is2_n, is2_p, n, p, sizeof(@typ@))) {
trans2 = CblasNoTrans;
- ldb = (int)(is2_n / sizeof(@typ@));
+ ldb = (CBLAS_INT)(is2_n / sizeof(@typ@));
}
else {
/* If not ColMajor, caller should have ensured we are RowMajor */
/* will not assert in release mode */
assert(is_blasable2d(is2_p, is2_n, p, n, sizeof(@typ@)));
trans2 = CblasTrans;
- ldb = (int)(is2_p / sizeof(@typ@));
+ ldb = (CBLAS_INT)(is2_p / sizeof(@typ@));
}
/*
* Use syrk if we have a case of a matrix times its transpose.
) {
npy_intp i,j;
if (trans1 == CblasNoTrans) {
- cblas_@prefix@syrk(order, CblasUpper, trans1, P, N, @step1@,
- ip1, lda, @step0@, op, ldc);
+ CBLAS_FUNC(cblas_@prefix@syrk)(
+ order, CblasUpper, trans1, P, N, @step1@,
+ ip1, lda, @step0@, op, ldc);
}
else {
- cblas_@prefix@syrk(order, CblasUpper, trans1, P, N, @step1@,
- ip1, ldb, @step0@, op, ldc);
+ CBLAS_FUNC(cblas_@prefix@syrk)(
+ order, CblasUpper, trans1, P, N, @step1@,
+ ip1, ldb, @step0@, op, ldc);
}
/* Copy the triangle */
for (i = 0; i < P; i++) {
}
else {
- cblas_@prefix@gemm(order, trans1, trans2, M, P, N, @step1@, ip1, lda,
- ip2, ldb, @step0@, op, ldc);
+ CBLAS_FUNC(cblas_@prefix@gemm)(
+ order, trans1, trans2, M, P, N, @step1@, ip1, lda,
+ ip2, ldb, @step0@, op, ldc);
}
}
}
else {
/* not a tuple */
- if (nout > 1 && DEPRECATE("passing a single argument to the "
- "'out' keyword argument of a "
- "ufunc with\n"
- "more than one output will "
- "result in an error in the "
- "future") < 0) {
- /*
- * If the deprecation is removed, also remove the loop
- * below setting tuple items to None (but keep this future
- * error message.)
- */
+ if (nout > 1) {
PyErr_SetString(PyExc_TypeError,
"'out' must be a tuple of arguments");
goto fail;
}
if (out != Py_None) {
/* not already a tuple and not None */
- PyObject *out_tuple = PyTuple_New(nout);
+ PyObject *out_tuple = PyTuple_New(1);
if (out_tuple == NULL) {
goto fail;
}
- for (i = 1; i < nout; i++) {
- Py_INCREF(Py_None);
- PyTuple_SET_ITEM(out_tuple, i, Py_None);
- }
/* out was borrowed ref; make it permanent */
Py_INCREF(out);
/* steals reference */
* If 'dtype' isn't NULL, this function steals its reference.
*/
static PyArrayObject *
-allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags,
+allocate_reduce_result(PyArrayObject *arr, const npy_bool *axis_flags,
PyArray_Descr *dtype, int subok)
{
npy_intp strides[NPY_MAXDIMS], stride;
* The return value is a view into 'out'.
*/
static PyArrayObject *
-conform_reduce_result(int ndim, npy_bool *axis_flags,
+conform_reduce_result(int ndim, const npy_bool *axis_flags,
PyArrayObject *out, int keepdims, const char *funcname,
int need_copy)
{
* Count the number of dimensions selected in 'axis_flags'
*/
static int
-count_axes(int ndim, npy_bool *axis_flags)
+count_axes(int ndim, const npy_bool *axis_flags)
{
int idim;
int naxes = 0;
NPY_NO_EXPORT PyArrayObject *
PyArray_InitializeReduceResult(
PyArrayObject *result, PyArrayObject *operand,
- npy_bool *axis_flags,
+ const npy_bool *axis_flags,
npy_intp *out_skip_first_count, const char *funcname)
{
npy_intp *strides, *shape, shape_orig[NPY_MAXDIMS];
NPY_ITER_ALIGNED;
if (wheremask != NULL) {
op[2] = wheremask;
- op_dtypes[2] = PyArray_DescrFromType(NPY_BOOL);
+ /* wheremask is guaranteed to be NPY_BOOL, so borrow its reference */
+ op_dtypes[2] = PyArray_DESCR(wheremask);
+ assert(op_dtypes[2]->type_num == NPY_BOOL);
if (op_dtypes[2] == NULL) {
goto fail;
}
/**end repeat**/
-
-/* QUESTION: Should we check for overflow / underflow in (l,r)shift? */
-
/**begin repeat
* #name = byte, ubyte, short, ushort, int, uint,
* long, ulong, longlong, ulonglong#
* #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong#
+ * #suffix = hh,uhh,h,uh,,u,l,ul,ll,ull#
*/
/**begin repeat1
- * #oper = and, xor, or, lshift, rshift#
- * #op = &, ^, |, <<, >>#
+ * #oper = and, xor, or#
+ * #op = &, ^, |#
*/
#define @name@_ctype_@oper@(arg1, arg2, out) *(out) = (arg1) @op@ (arg2)
/**end repeat1**/
+#define @name@_ctype_lshift(arg1, arg2, out) *(out) = npy_lshift@suffix@(arg1, arg2)
+#define @name@_ctype_rshift(arg1, arg2, out) *(out) = npy_rshift@suffix@(arg1, arg2)
+
/**end repeat**/
/**begin repeat
/**begin repeat
* #name = float, double, longdouble#
* #type = npy_float, npy_double, npy_longdouble#
+ * #c = f,,l#
*/
-static npy_@name@ (*_basic_@name@_pow)(@type@ a, @type@ b);
static void
@name@_ctype_power(@type@ a, @type@ b, @type@ *out)
{
- *out = _basic_@name@_pow(a, b);
+ *out = npy_pow@c@(a, b);
}
+
/**end repeat**/
static void
half_ctype_power(npy_half a, npy_half b, npy_half *out)
{
const npy_float af = npy_half_to_float(a);
const npy_float bf = npy_half_to_float(b);
- const npy_float outf = _basic_float_pow(af,bf);
+ const npy_float outf = npy_powf(af,bf);
*out = npy_float_to_half(outf);
}
}
/**end repeat**/
-/*
- * Get the nc_powf, nc_pow, and nc_powl functions from
- * the data area of the power ufunc in umathmodule.
- */
-
/**begin repeat
* #name = cfloat, cdouble, clongdouble#
* #type = npy_cfloat, npy_cdouble, npy_clongdouble#
+ * #c = f,,l#
*/
static void
@name@_ctype_positive(@type@ a, @type@ *out)
out->imag = a.imag;
}
-static void (*_basic_@name@_pow)(@type@ *, @type@ *, @type@ *);
-
static void
@name@_ctype_power(@type@ a, @type@ b, @type@ *out)
{
- _basic_@name@_pow(&a, &b, out);
+ *out = npy_cpow@c@(a, b);
}
/**end repeat**/
* 1) Convert the types to the common type if both are scalars (0 return)
* 2) If both are not scalars use ufunc machinery (-2 return)
* 3) If both are scalars but cannot be cast to the right type
- * return NotImplmented (-1 return)
+ * return NotImplemented (-1 return)
*
* 4) Perform the function on the C-type.
* 5) If an error condition occurred, check to see
}
/**end repeat**/
+/**begin repeat
+ *
+ * #name = byte, ubyte, short, ushort, int, uint,
+ * long, ulong, longlong, ulonglong,
+ * half, float, double, longdouble,
+ * cfloat, cdouble, clongdouble#
+ * #Name = Byte, UByte, Short, UShort, Int, UInt,
+ * Long, ULong, LongLong, ULongLong,
+ * Half, Float, Double, LongDouble,
+ * CFloat, CDouble, CLongDouble#
+ * #cmplx = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1#
+ * #to_ctype = , , , , , , , , , , npy_half_to_double, , , , , , #
+ * #func = PyFloat_FromDouble*17#
+ */
+static NPY_INLINE PyObject *
+@name@_float(PyObject *obj)
+{
+#if @cmplx@
+ if (emit_complexwarning() < 0) {
+ return NULL;
+ }
+ return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@).real));
+#else
+ return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@)));
+#endif
+}
+/**end repeat**/
+
+
+#if !defined(NPY_PY3K)
+
/**begin repeat
*
* #name = (byte, ubyte, short, ushort, int, uint,
* long, ulong, longlong, ulonglong,
* half, float, double, longdouble,
- * cfloat, cdouble, clongdouble)*2#
+ * cfloat, cdouble, clongdouble)#
* #Name = (Byte, UByte, Short, UShort, Int, UInt,
* Long, ULong, LongLong, ULongLong,
* Half, Float, Double, LongDouble,
- * CFloat, CDouble, CLongDouble)*2#
- * #cmplx = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1)*2#
- * #to_ctype = (, , , , , , , , , , npy_half_to_double, , , , , , )*2#
- * #which = long*17, float*17#
+ * CFloat, CDouble, CLongDouble)#
+ * #cmplx = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1)#
+ * #to_ctype = (, , , , , , , , , , npy_half_to_double, , , , , , )#
* #func = (PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5,
* PyLong_FromDouble*3, npy_longdouble_to_PyLong,
- * PyLong_FromDouble*2, npy_longdouble_to_PyLong,
- * PyFloat_FromDouble*17#
+ * PyLong_FromDouble*2, npy_longdouble_to_PyLong#
*/
static NPY_INLINE PyObject *
-@name@_@which@(PyObject *obj)
+@name@_long(PyObject *obj)
{
#if @cmplx@
if (emit_complexwarning() < 0) {
}
/**end repeat**/
-#if !defined(NPY_PY3K)
-
/**begin repeat
*
* #name = (byte, ubyte, short, ushort, int, uint,
/**end repeat**/
}
-static int
-get_functions(PyObject * mm)
-{
- PyObject *obj;
- void **funcdata;
- char *signatures;
- int i, j;
- int ret = -1;
-
- /* Get the nc_pow functions */
- /* Get the pow functions */
- obj = PyObject_GetAttrString(mm, "power");
- if (obj == NULL) {
- goto fail;
- }
- funcdata = ((PyUFuncObject *)obj)->data;
- signatures = ((PyUFuncObject *)obj)->types;
-
- i = 0;
- j = 0;
- while (signatures[i] != NPY_FLOAT) {
- i += 3;
- j++;
- }
- _basic_float_pow = funcdata[j];
- _basic_double_pow = funcdata[j + 1];
- _basic_longdouble_pow = funcdata[j + 2];
- _basic_cfloat_pow = funcdata[j + 3];
- _basic_cdouble_pow = funcdata[j + 4];
- _basic_clongdouble_pow = funcdata[j + 5];
- Py_DECREF(obj);
-
- return ret = 0;
-
- fail:
- Py_DECREF(mm);
- return ret;
-}
-
NPY_NO_EXPORT int initscalarmath(PyObject * m)
{
- if (get_functions(m) < 0) {
- return -1;
- }
-
add_scalarmath();
return 0;
*/
/**begin repeat
- * #ISA = AVX2, AVX512F#
- * #isa = avx2, avx512f#
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512f#
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
* #REGISTER_SIZE = 32, 64#
*/
/* prototypes */
+/**begin repeat1
+ * #type = npy_float, npy_double#
+ * #TYPE = FLOAT, DOUBLE#
+ */
+
+/**begin repeat2
+ * #func = sqrt, absolute, square, reciprocal, rint, floor, ceil, trunc#
+ */
+
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_INLINE NPY_GCC_TARGET_@ISA@ void
+@ISA@_@func@_@TYPE@(@type@ *, @type@ *, const npy_intp n, const npy_intp stride);
+#endif
+
+static NPY_INLINE int
+run_unary_@isa@_@func@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+ if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(@type@), @REGISTER_SIZE@)) {
+ @ISA@_@func@_@TYPE@((@type@*)args[1], (@type@*)args[0], dimensions[0], steps[0]);
+ return 1;
+ }
+ else
+ return 0;
+#endif
+ return 0;
+}
+
+/**end repeat2**/
+/**end repeat1**/
+
/**begin repeat1
* #func = exp, log#
*/
-#if defined HAVE_ATTRIBUTE_TARGET_@ISA@_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
static NPY_INLINE void
@ISA@_@func@_FLOAT(npy_float *, npy_float *, const npy_intp n, const npy_intp stride);
#endif
static NPY_INLINE int
run_unary_@isa@_@func@_FLOAT(char **args, npy_intp *dimensions, npy_intp *steps)
{
-#if defined HAVE_ATTRIBUTE_TARGET_@ISA@_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_float), @REGISTER_SIZE@)) {
@ISA@_@func@_FLOAT((npy_float*)args[1], (npy_float*)args[0], dimensions[0], steps[0]);
return 1;
/**end repeat1**/
-/**end repeat**/
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_INLINE void
+@ISA@_sincos_FLOAT(npy_float *, npy_float *, const npy_intp n, const npy_intp steps, NPY_TRIG_OP);
+#endif
+static NPY_INLINE int
+run_unary_@isa@_sincos_FLOAT(char **args, npy_intp *dimensions, npy_intp *steps, NPY_TRIG_OP my_trig_op)
+{
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+ if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_float), @REGISTER_SIZE@)) {
+ @ISA@_sincos_FLOAT((npy_float*)args[1], (npy_float*)args[0], dimensions[0], steps[0], my_trig_op);
+ return 1;
+ }
+ else
+ return 0;
+#endif
+ return 0;
+}
+
+/**end repeat**/
/**begin repeat
LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) {
op[i] = @scalarf@(ip[i]);
}
- assert(n < (VECTOR_SIZE_BYTES / sizeof(@type@)) ||
+ assert((npy_uintp)n < (VECTOR_SIZE_BYTES / sizeof(@type@)) ||
npy_is_aligned(&op[i], VECTOR_SIZE_BYTES));
if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) {
LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) {
op[i] = @scalar@_@type@(ip[i]);
}
- assert(n < (VECTOR_SIZE_BYTES / sizeof(@type@)) ||
+ assert((npy_uintp)n < (VECTOR_SIZE_BYTES / sizeof(@type@)) ||
npy_is_aligned(&op[i], VECTOR_SIZE_BYTES));
if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) {
LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
/* Order of operations important for MSVC 2015 */
*op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
}
- assert(n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES));
+ assert((npy_uintp)n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES));
if (i + 3 * stride <= n) {
/* load the first elements */
@vtype@ c1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
/* bunch of helper functions used in ISA_exp/log_FLOAT*/
#if defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_fmadd(__m256 a, __m256 b, __m256 c)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_get_full_load_mask_ps(void)
{
- return _mm256_add_ps(_mm256_mul_ps(a, b), c);
+ return _mm256_set1_ps(-1.0);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_get_full_load_mask(void)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256i
+fma_get_full_load_mask_pd(void)
{
- return _mm256_set1_ps(-1.0);
+ return _mm256_castpd_si256(_mm256_set1_pd(-1.0));
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_get_partial_load_mask(const npy_int num_lanes, const npy_int total_elem)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_get_partial_load_mask_ps(const npy_int num_elem, const npy_int num_lanes)
{
float maskint[16] = {-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,
1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0};
- float* addr = maskint + total_elem - num_lanes;
+ float* addr = maskint + num_lanes - num_elem;
return _mm256_loadu_ps(addr);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_masked_gather(__m256 src,
- npy_float* addr,
- __m256i vindex,
- __m256 mask)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256i
+fma_get_partial_load_mask_pd(const npy_int num_elem, const npy_int num_lanes)
+{
+ npy_int maskint[16] = {-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1};
+ npy_int* addr = maskint + 2*num_lanes - 2*num_elem;
+ return _mm256_loadu_si256((__m256i*) addr);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_masked_gather_ps(__m256 src,
+ npy_float* addr,
+ __m256i vindex,
+ __m256 mask)
{
return _mm256_mask_i32gather_ps(src, addr, vindex, mask, 4);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_masked_load(__m256 mask, npy_float* addr)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256d
+fma_masked_gather_pd(__m256d src,
+ npy_double* addr,
+ __m128i vindex,
+ __m256d mask)
+{
+ return _mm256_mask_i32gather_pd(src, addr, vindex, mask, 8);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_masked_load_ps(__m256 mask, npy_float* addr)
{
return _mm256_maskload_ps(addr, _mm256_cvtps_epi32(mask));
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_set_masked_lanes(__m256 x, __m256 val, __m256 mask)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256d
+fma_masked_load_pd(__m256i mask, npy_double* addr)
+{
+ return _mm256_maskload_pd(addr, mask);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_set_masked_lanes_ps(__m256 x, __m256 val, __m256 mask)
{
return _mm256_blendv_ps(x, val, mask);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_blend(__m256 x, __m256 y, __m256 ymask)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256d
+fma_set_masked_lanes_pd(__m256d x, __m256d val, __m256d mask)
+{
+ return _mm256_blendv_pd(x, val, mask);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_blend(__m256 x, __m256 y, __m256 ymask)
{
return _mm256_blendv_ps(x, y, ymask);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_get_exponent(__m256 x)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_invert_mask_ps(__m256 ymask)
+{
+ return _mm256_andnot_ps(ymask, _mm256_set1_ps(-1.0));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256i
+fma_invert_mask_pd(__m256i ymask)
+{
+ return _mm256_andnot_si256(ymask, _mm256_set1_epi32(0xFFFFFFFF));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_should_calculate_sine(__m256i k, __m256i andop, __m256i cmp)
+{
+ return _mm256_cvtepi32_ps(
+ _mm256_cmpeq_epi32(_mm256_and_si256(k, andop), cmp));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_should_negate(__m256i k, __m256i andop, __m256i cmp)
+{
+ return fma_should_calculate_sine(k, andop, cmp);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_get_exponent(__m256 x)
{
/*
* Special handling of denormals:
return _mm256_blendv_ps(exp, denorm_exp, denormal_mask);
}
-static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_get_mantissa(__m256 x)
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_get_mantissa(__m256 x)
{
/*
* Special handling of denormals:
}
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
-avx2_scalef_ps(__m256 poly, __m256 quadrant)
+fma_scalef_ps(__m256 poly, __m256 quadrant)
{
/*
* Handle denormals (which occur when quadrant <= -125):
}
}
+/**begin repeat
+ * #vsub = ps, pd#
+ * #vtype = __m256, __m256d#
+ */
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@
+fma_abs_@vsub@(@vtype@ x)
+{
+ return _mm256_andnot_@vsub@(_mm256_set1_@vsub@(-0.0), x);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@
+fma_reciprocal_@vsub@(@vtype@ x)
+{
+ return _mm256_div_@vsub@(_mm256_set1_@vsub@(1.0f), x);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@
+fma_rint_@vsub@(@vtype@ x)
+{
+ return _mm256_round_@vsub@(x, _MM_FROUND_TO_NEAREST_INT);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@
+fma_floor_@vsub@(@vtype@ x)
+{
+ return _mm256_round_@vsub@(x, _MM_FROUND_TO_NEG_INF);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@
+fma_ceil_@vsub@(@vtype@ x)
+{
+ return _mm256_round_@vsub@(x, _MM_FROUND_TO_POS_INF);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@
+fma_trunc_@vsub@(@vtype@ x)
+{
+ return _mm256_round_@vsub@(x, _MM_FROUND_TO_ZERO);
+}
+/**end repeat**/
#endif
#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16
-avx512_get_full_load_mask(void)
+avx512_get_full_load_mask_ps(void)
{
return 0xFFFF;
}
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask8
+avx512_get_full_load_mask_pd(void)
+{
+ return 0xFF;
+}
+
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16
-avx512_get_partial_load_mask(const npy_int num_elem, const npy_int total_elem)
+avx512_get_partial_load_mask_ps(const npy_int num_elem, const npy_int total_elem)
{
return (0x0001 << num_elem) - 0x0001;
}
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask8
+avx512_get_partial_load_mask_pd(const npy_int num_elem, const npy_int total_elem)
+{
+ return (0x01 << num_elem) - 0x01;
+}
+
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
-avx512_masked_gather(__m512 src,
- npy_float* addr,
- __m512i vindex,
- __mmask16 kmask)
+avx512_masked_gather_ps(__m512 src,
+ npy_float* addr,
+ __m512i vindex,
+ __mmask16 kmask)
{
return _mm512_mask_i32gather_ps(src, kmask, vindex, addr, 4);
}
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512d
+avx512_masked_gather_pd(__m512d src,
+ npy_double* addr,
+ __m256i vindex,
+ __mmask8 kmask)
+{
+ return _mm512_mask_i32gather_pd(src, kmask, vindex, addr, 8);
+}
+
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
-avx512_masked_load(__mmask16 mask, npy_float* addr)
+avx512_masked_load_ps(__mmask16 mask, npy_float* addr)
{
return _mm512_maskz_loadu_ps(mask, (__m512 *)addr);
}
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512d
+avx512_masked_load_pd(__mmask8 mask, npy_double* addr)
+{
+ return _mm512_maskz_loadu_pd(mask, (__m512d *)addr);
+}
+
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
-avx512_set_masked_lanes(__m512 x, __m512 val, __mmask16 mask)
+avx512_set_masked_lanes_ps(__m512 x, __m512 val, __mmask16 mask)
{
return _mm512_mask_blend_ps(mask, x, val);
}
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512d
+avx512_set_masked_lanes_pd(__m512d x, __m512d val, __mmask8 mask)
+{
+ return _mm512_mask_blend_pd(mask, x, val);
+}
+
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
avx512_blend(__m512 x, __m512 y, __mmask16 ymask)
{
return _mm512_mask_mov_ps(x, ymask, y);
}
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16
+avx512_invert_mask_ps(__mmask16 ymask)
+{
+ return _mm512_knot(ymask);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask8
+avx512_invert_mask_pd(__mmask8 ymask)
+{
+ return _mm512_knot(ymask);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16
+avx512_should_calculate_sine(__m512i k, __m512i andop, __m512i cmp)
+{
+ return _mm512_cmpeq_epi32_mask(_mm512_and_epi32(k, andop), cmp);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16
+avx512_should_negate(__m512i k, __m512i andop, __m512i cmp)
+{
+ return avx512_should_calculate_sine(k, andop, cmp);
+}
+
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
avx512_get_exponent(__m512 x)
{
{
return _mm512_scalef_ps(poly, quadrant);
}
+/**begin repeat
+ * #vsub = ps, pd#
+ * #epi_vsub = epi32, epi64#
+ * #vtype = __m512, __m512d#
+ * #and_const = 0x7fffffff, 0x7fffffffffffffffLL#
+ */
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_abs_@vsub@(@vtype@ x)
+{
+ return (@vtype@) _mm512_and_@epi_vsub@((__m512i) x,
+ _mm512_set1_@epi_vsub@ (@and_const@));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_reciprocal_@vsub@(@vtype@ x)
+{
+ return _mm512_div_@vsub@(_mm512_set1_@vsub@(1.0f), x);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_rint_@vsub@(@vtype@ x)
+{
+ return _mm512_roundscale_@vsub@(x, 0x08);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_floor_@vsub@(@vtype@ x)
+{
+ return _mm512_roundscale_@vsub@(x, 0x09);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_ceil_@vsub@(@vtype@ x)
+{
+ return _mm512_roundscale_@vsub@(x, 0x0A);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_trunc_@vsub@(@vtype@ x)
+{
+ return _mm512_roundscale_@vsub@(x, 0x0B);
+}
+/**end repeat**/
#endif
/**begin repeat
- * #ISA = AVX2, AVX512F#
- * #isa = avx2, avx512#
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512#
* #vtype = __m256, __m512#
* #vsize = 256, 512#
* #or = or_ps, kor#
* #vsub = , _mask#
* #mask = __m256, __mmask16#
- * #fmadd = avx2_fmadd,_mm512_fmadd_ps#
+ * #fmadd = _mm256_fmadd_ps, _mm512_fmadd_ps#
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
**/
-#if defined HAVE_ATTRIBUTE_TARGET_@ISA@_WITH_INTRINSICS
+#if defined @CHK@
+
+/*
+ * Vectorized Cody-Waite range reduction technique
+ * Performs the reduction step x* = x - y*C in three steps:
+ * 1) x* = x - y*c1
+ * 2) x* = x - y*c2
+ * 3) x* = x - y*c3
+ * c1, c2 are exact floating points, c3 = C - c1 - c2 simulates higher precision
+ */
+
static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
@isa@_range_reduction(@vtype@ x, @vtype@ y, @vtype@ c1, @vtype@ c2, @vtype@ c3)
{
reduced_x = @fmadd@(y, c3, reduced_x);
return reduced_x;
}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @mask@
+@isa@_in_range_mask(@vtype@ x, npy_float fmax, npy_float fmin)
+{
+ @mask@ m1 = _mm@vsize@_cmp_ps@vsub@(
+ x, _mm@vsize@_set1_ps(fmax), _CMP_GT_OQ);
+ @mask@ m2 = _mm@vsize@_cmp_ps@vsub@(
+ x, _mm@vsize@_set1_ps(fmin), _CMP_LT_OQ);
+ return _mm@vsize@_@or@(m1,m2);
+}
+
+/*
+ * Approximate cosine algorithm for x \in [-PI/4, PI/4]
+ * Maximum ULP across all 32-bit floats = 0.875
+ */
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
+@isa@_cosine(@vtype@ x2, @vtype@ invf8, @vtype@ invf6, @vtype@ invf4,
+ @vtype@ invf2, @vtype@ invf0)
+{
+ @vtype@ cos = @fmadd@(invf8, x2, invf6);
+ cos = @fmadd@(cos, x2, invf4);
+ cos = @fmadd@(cos, x2, invf2);
+ cos = @fmadd@(cos, x2, invf0);
+ return cos;
+}
+
+/*
+ * Approximate sine algorithm for x \in [-PI/4, PI/4]
+ * Maximum ULP across all 32-bit floats = 0.647
+ */
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
+@isa@_sine(@vtype@ x, @vtype@ x2, @vtype@ invf9, @vtype@ invf7,
+ @vtype@ invf5, @vtype@ invf3,
+ @vtype@ zero)
+{
+ @vtype@ sin = @fmadd@(invf9, x2, invf7);
+ sin = @fmadd@(sin, x2, invf5);
+ sin = @fmadd@(sin, x2, invf3);
+ sin = @fmadd@(sin, x2, zero);
+ sin = @fmadd@(sin, x, x);
+ return sin;
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
+@isa@_sqrt_ps(@vtype@ x)
+{
+ return _mm@vsize@_sqrt_ps(x);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@d
+@isa@_sqrt_pd(@vtype@d x)
+{
+ return _mm@vsize@_sqrt_pd(x);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
+@isa@_square_ps(@vtype@ x)
+{
+ return _mm@vsize@_mul_ps(x,x);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@d
+@isa@_square_pd(@vtype@d x)
+{
+ return _mm@vsize@_mul_pd(x,x);
+}
+
+#endif
+/**end repeat**/
+
+
+/**begin repeat
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512#
+ * #vsize = 256, 512#
+ * #BYTES = 32, 64#
+ * #cvtps_epi32 = _mm256_cvtps_epi32, #
+ * #mask = __m256, __mmask16#
+ * #vsub = , _mask#
+ * #vtype = __m256, __m512#
+ * #cvtps_epi32 = _mm256_cvtps_epi32, #
+ * #masked_store = _mm256_maskstore_ps, _mm512_mask_storeu_ps#
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
+ */
+
+/**begin repeat1
+ * #func = sqrt, absolute, square, reciprocal, rint, ceil, floor, trunc#
+ * #vectorf = sqrt, abs, square, reciprocal, rint, ceil, floor, trunc#
+ * #replace_0_with_1 = 0, 0, 0, 1, 0, 0, 0, 0#
+ */
+
+#if defined @CHK@
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
+@ISA@_@func@_FLOAT(npy_float* op,
+ npy_float* ip,
+ const npy_intp array_size,
+ const npy_intp steps)
+{
+ const npy_intp stride = steps/sizeof(npy_float);
+ const npy_int num_lanes = @BYTES@/sizeof(npy_float);
+ npy_intp num_remaining_elements = array_size;
+ @vtype@ ones_f = _mm@vsize@_set1_ps(1.0f);
+ @mask@ load_mask = @isa@_get_full_load_mask_ps();
+#if @replace_0_with_1@
+ @mask@ inv_load_mask = @isa@_invert_mask_ps(load_mask);
+#endif
+ npy_int indexarr[16];
+ for (npy_int ii = 0; ii < 16; ii++) {
+ indexarr[ii] = ii*stride;
+ }
+ @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]);
+
+ while (num_remaining_elements > 0) {
+ if (num_remaining_elements < num_lanes) {
+ load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements,
+ num_lanes);
+#if @replace_0_with_1@
+ inv_load_mask = @isa@_invert_mask_ps(load_mask);
+#endif
+ }
+ @vtype@ x;
+ if (stride == 1) {
+ x = @isa@_masked_load_ps(load_mask, ip);
+#if @replace_0_with_1@
+ /*
+ * Replace masked elements with 1.0f to avoid divide by zero fp
+ * exception in reciprocal
+ */
+ x = @isa@_set_masked_lanes_ps(x, ones_f, inv_load_mask);
+#endif
+ }
+ else {
+ x = @isa@_masked_gather_ps(ones_f, ip, vindex, load_mask);
+ }
+ @vtype@ out = @isa@_@vectorf@_ps(x);
+ @masked_store@(op, @cvtps_epi32@(load_mask), out);
+
+ ip += num_lanes*stride;
+ op += num_lanes;
+ num_remaining_elements -= num_lanes;
+ }
+}
+#endif
+/**end repeat1**/
+/**end repeat**/
+
+/**begin repeat
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512#
+ * #vsize = 256, 512#
+ * #BYTES = 32, 64#
+ * #cvtps_epi32 = _mm256_cvtps_epi32, #
+ * #mask = __m256i, __mmask8#
+ * #vsub = , _mask#
+ * #vtype = __m256d, __m512d#
+ * #vindextype = __m128i, __m256i#
+ * #vindexsize = 128, 256#
+ * #vindexload = _mm_loadu_si128, _mm256_loadu_si256#
+ * #cvtps_epi32 = _mm256_cvtpd_epi32, #
+ * #castmask = _mm256_castsi256_pd, #
+ * #masked_store = _mm256_maskstore_pd, _mm512_mask_storeu_pd#
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
+ */
+
+/**begin repeat1
+ * #func = sqrt, absolute, square, reciprocal, rint, ceil, floor, trunc#
+ * #vectorf = sqrt, abs, square, reciprocal, rint, ceil, floor, trunc#
+ * #replace_0_with_1 = 0, 0, 0, 1, 0, 0, 0, 0#
+ */
+
+#if defined @CHK@
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
+@ISA@_@func@_DOUBLE(npy_double* op,
+ npy_double* ip,
+ const npy_intp array_size,
+ const npy_intp steps)
+{
+ const npy_intp stride = steps/sizeof(npy_double);
+ const npy_int num_lanes = @BYTES@/sizeof(npy_double);
+ npy_intp num_remaining_elements = array_size;
+ @mask@ load_mask = @isa@_get_full_load_mask_pd();
+#if @replace_0_with_1@
+ @mask@ inv_load_mask = @isa@_invert_mask_pd(load_mask);
+#endif
+ @vtype@ ones_d = _mm@vsize@_set1_pd(1.0f);
+ npy_int indexarr[8];
+ for (npy_int ii = 0; ii < 8; ii++) {
+ indexarr[ii] = ii*stride;
+ }
+ @vindextype@ vindex = @vindexload@((@vindextype@*)&indexarr[0]);
+
+ while (num_remaining_elements > 0) {
+ if (num_remaining_elements < num_lanes) {
+ load_mask = @isa@_get_partial_load_mask_pd(num_remaining_elements,
+ num_lanes);
+#if @replace_0_with_1@
+ inv_load_mask = @isa@_invert_mask_pd(load_mask);
+#endif
+ }
+ @vtype@ x;
+ if (stride == 1) {
+ x = @isa@_masked_load_pd(load_mask, ip);
+#if @replace_0_with_1@
+ /*
+ * Replace masked elements with 1.0f to avoid divide by zero fp
+ * exception in reciprocal
+ */
+ x = @isa@_set_masked_lanes_pd(x, ones_d, @castmask@(inv_load_mask));
#endif
+ }
+ else {
+ x = @isa@_masked_gather_pd(ones_d, ip, vindex, @castmask@(load_mask));
+ }
+ @vtype@ out = @isa@_@vectorf@_pd(x);
+ @masked_store@(op, load_mask, out);
+
+ ip += num_lanes*stride;
+ op += num_lanes;
+ num_remaining_elements -= num_lanes;
+ }
+}
+#endif
+/**end repeat1**/
/**end repeat**/
/**begin repeat
- * #ISA = AVX2, AVX512F#
- * #isa = avx2, avx512#
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512#
* #vtype = __m256, __m512#
* #vsize = 256, 512#
* #BYTES = 32, 64#
* #or_masks =_mm256_or_ps, _mm512_kor#
* #and_masks =_mm256_and_ps, _mm512_kand#
* #xor_masks =_mm256_xor_ps, _mm512_kxor#
- * #fmadd = avx2_fmadd,_mm512_fmadd_ps#
+ * #fmadd = _mm256_fmadd_ps, _mm512_fmadd_ps#
* #mask_to_int = _mm256_movemask_ps, #
* #full_mask= 0xFF, 0xFFFF#
* #masked_store = _mm256_maskstore_ps, _mm512_mask_storeu_ps#
* #cvtps_epi32 = _mm256_cvtps_epi32, #
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
*/
+/*
+ * Vectorized approximate sine/cosine algorithms: The following code is a
+ * vectorized version of the algorithm presented here:
+ * https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751
+ * (1) Load data in ZMM/YMM registers and generate mask for elements that are
+ * within range [-71476.0625f, 71476.0625f] for cosine and [-117435.992f,
+ * 117435.992f] for sine.
+ * (2) For elements within range, perform range reduction using Cody-Waite's
+ * method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, PI/4].
+ * (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the quadrant k =
+ * int(y).
+ * (4) For elements outside that range, Cody-Waite reduction peforms poorly
+ * leading to catastrophic cancellation. We compute cosine by calling glibc in
+ * a scalar fashion.
+ * (5) Vectorized implementation has a max ULP of 1.49 and performs at least
+ * 5-7x faster than scalar implementations when magnitude of all elements in
+ * the array < 71476.0625f (117435.992f for sine). Worst case performance is
+ * when all the elements are large leading to about 1-2% reduction in
+ * performance.
+ */
+
+#if defined @CHK@
+static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
+@ISA@_sincos_FLOAT(npy_float * op,
+ npy_float * ip,
+ const npy_intp array_size,
+ const npy_intp steps,
+ NPY_TRIG_OP my_trig_op)
+{
+ const npy_intp stride = steps/sizeof(npy_float);
+ const npy_int num_lanes = @BYTES@/sizeof(npy_float);
+ npy_float large_number = 71476.0625f;
+ if (my_trig_op == npy_compute_sin) {
+ large_number = 117435.992f;
+ }
+
+ /* Load up frequently used constants */
+ @vtype@i zeros = _mm@vsize@_set1_epi32(0);
+ @vtype@i ones = _mm@vsize@_set1_epi32(1);
+ @vtype@i twos = _mm@vsize@_set1_epi32(2);
+ @vtype@ two_over_pi = _mm@vsize@_set1_ps(NPY_TWO_O_PIf);
+ @vtype@ codyw_c1 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_PI_O_2_HIGHf);
+ @vtype@ codyw_c2 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_PI_O_2_MEDf);
+ @vtype@ codyw_c3 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_PI_O_2_LOWf);
+ @vtype@ cos_invf0 = _mm@vsize@_set1_ps(NPY_COEFF_INVF0_COSINEf);
+ @vtype@ cos_invf2 = _mm@vsize@_set1_ps(NPY_COEFF_INVF2_COSINEf);
+ @vtype@ cos_invf4 = _mm@vsize@_set1_ps(NPY_COEFF_INVF4_COSINEf);
+ @vtype@ cos_invf6 = _mm@vsize@_set1_ps(NPY_COEFF_INVF6_COSINEf);
+ @vtype@ cos_invf8 = _mm@vsize@_set1_ps(NPY_COEFF_INVF8_COSINEf);
+ @vtype@ sin_invf3 = _mm@vsize@_set1_ps(NPY_COEFF_INVF3_SINEf);
+ @vtype@ sin_invf5 = _mm@vsize@_set1_ps(NPY_COEFF_INVF5_SINEf);
+ @vtype@ sin_invf7 = _mm@vsize@_set1_ps(NPY_COEFF_INVF7_SINEf);
+ @vtype@ sin_invf9 = _mm@vsize@_set1_ps(NPY_COEFF_INVF9_SINEf);
+ @vtype@ cvt_magic = _mm@vsize@_set1_ps(NPY_RINT_CVT_MAGICf);
+ @vtype@ zero_f = _mm@vsize@_set1_ps(0.0f);
+ @vtype@ quadrant, reduced_x, reduced_x2, cos, sin;
+ @vtype@i iquadrant;
+ @mask@ nan_mask, glibc_mask, sine_mask, negate_mask;
+ @mask@ load_mask = @isa@_get_full_load_mask_ps();
+ npy_intp num_remaining_elements = array_size;
+ npy_int indexarr[16];
+ for (npy_int ii = 0; ii < 16; ii++) {
+ indexarr[ii] = ii*stride;
+ }
+ @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]);
+
+ while (num_remaining_elements > 0) {
+
+ if (num_remaining_elements < num_lanes) {
+ load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements,
+ num_lanes);
+ }
+
+ @vtype@ x;
+ if (stride == 1) {
+ x = @isa@_masked_load_ps(load_mask, ip);
+ }
+ else {
+ x = @isa@_masked_gather_ps(zero_f, ip, vindex, load_mask);
+ }
+
+ /*
+ * For elements outside of this range, Cody-Waite's range reduction
+ * becomes inaccurate and we will call glibc to compute cosine for
+ * these numbers
+ */
+
+ glibc_mask = @isa@_in_range_mask(x, large_number,-large_number);
+ glibc_mask = @and_masks@(load_mask, glibc_mask);
+ nan_mask = _mm@vsize@_cmp_ps@vsub@(x, x, _CMP_NEQ_UQ);
+ x = @isa@_set_masked_lanes_ps(x, zero_f, @or_masks@(nan_mask, glibc_mask));
+ npy_int iglibc_mask = @mask_to_int@(glibc_mask);
+
+ if (iglibc_mask != @full_mask@) {
+ quadrant = _mm@vsize@_mul_ps(x, two_over_pi);
+
+ /* round to nearest */
+ quadrant = _mm@vsize@_add_ps(quadrant, cvt_magic);
+ quadrant = _mm@vsize@_sub_ps(quadrant, cvt_magic);
+
+ /* Cody-Waite's range reduction algorithm */
+ reduced_x = @isa@_range_reduction(x, quadrant,
+ codyw_c1, codyw_c2, codyw_c3);
+ reduced_x2 = _mm@vsize@_mul_ps(reduced_x, reduced_x);
+
+ /* compute cosine and sine */
+ cos = @isa@_cosine(reduced_x2, cos_invf8, cos_invf6, cos_invf4,
+ cos_invf2, cos_invf0);
+ sin = @isa@_sine(reduced_x, reduced_x2, sin_invf9, sin_invf7,
+ sin_invf5, sin_invf3, zero_f);
+
+ iquadrant = _mm@vsize@_cvtps_epi32(quadrant);
+ if (my_trig_op == npy_compute_cos) {
+ iquadrant = _mm@vsize@_add_epi32(iquadrant, ones);
+ }
+
+ /* blend sin and cos based on the quadrant */
+ sine_mask = @isa@_should_calculate_sine(iquadrant, ones, zeros);
+ cos = @isa@_blend(cos, sin, sine_mask);
+
+ /* multiply by -1 for appropriate elements */
+ negate_mask = @isa@_should_negate(iquadrant, twos, twos);
+ cos = @isa@_blend(cos, _mm@vsize@_sub_ps(zero_f, cos), negate_mask);
+ cos = @isa@_set_masked_lanes_ps(cos, _mm@vsize@_set1_ps(NPY_NANF), nan_mask);
+
+ @masked_store@(op, @cvtps_epi32@(load_mask), cos);
+ }
+
+ /* process elements using glibc for large elements */
+ if (my_trig_op == npy_compute_cos) {
+ for (int ii = 0; iglibc_mask != 0; ii++) {
+ if (iglibc_mask & 0x01) {
+ op[ii] = npy_cosf(ip[ii]);
+ }
+ iglibc_mask = iglibc_mask >> 1;
+ }
+ }
+ else {
+ for (int ii = 0; iglibc_mask != 0; ii++) {
+ if (iglibc_mask & 0x01) {
+ op[ii] = npy_sinf(ip[ii]);
+ }
+ iglibc_mask = iglibc_mask >> 1;
+ }
+ }
+ ip += num_lanes*stride;
+ op += num_lanes;
+ num_remaining_elements -= num_lanes;
+ }
+}
/*
* Vectorized implementation of exp using AVX2 and AVX512:
* same x = 0xc2781e37)
*/
-#if defined HAVE_ATTRIBUTE_TARGET_@ISA@_WITH_INTRINSICS
static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
@ISA@_exp_FLOAT(npy_float * op,
npy_float * ip,
@vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]);
@mask@ xmax_mask, xmin_mask, nan_mask, inf_mask;
- @mask@ overflow_mask = @isa@_get_partial_load_mask(0, num_lanes);
- @mask@ load_mask = @isa@_get_full_load_mask();
+ @mask@ overflow_mask = @isa@_get_partial_load_mask_ps(0, num_lanes);
+ @mask@ load_mask = @isa@_get_full_load_mask_ps();
npy_intp num_remaining_elements = array_size;
while (num_remaining_elements > 0) {
if (num_remaining_elements < num_lanes) {
- load_mask = @isa@_get_partial_load_mask(num_remaining_elements,
- num_lanes);
+ load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements,
+ num_lanes);
}
@vtype@ x;
if (stride == 1) {
- x = @isa@_masked_load(load_mask, ip);
+ x = @isa@_masked_load_ps(load_mask, ip);
}
else {
- x = @isa@_masked_gather(zeros_f, ip, vindex, load_mask);
+ x = @isa@_masked_gather_ps(zeros_f, ip, vindex, load_mask);
}
nan_mask = _mm@vsize@_cmp_ps@vsub@(x, x, _CMP_NEQ_UQ);
- x = @isa@_set_masked_lanes(x, zeros_f, nan_mask);
+ x = @isa@_set_masked_lanes_ps(x, zeros_f, nan_mask);
xmax_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(xmax), _CMP_GE_OQ);
xmin_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(xmin), _CMP_LE_OQ);
overflow_mask = @or_masks@(overflow_mask,
@xor_masks@(xmax_mask, inf_mask));
- x = @isa@_set_masked_lanes(x, zeros_f, @or_masks@(
+ x = @isa@_set_masked_lanes_ps(x, zeros_f, @or_masks@(
@or_masks@(nan_mask, xmin_mask), xmax_mask));
quadrant = _mm@vsize@_mul_ps(x, log2e);
* elem < xmin; return 0.0f
* elem = +/- nan, return nan
*/
- poly = @isa@_set_masked_lanes(poly, _mm@vsize@_set1_ps(NPY_NANF), nan_mask);
- poly = @isa@_set_masked_lanes(poly, inf, xmax_mask);
- poly = @isa@_set_masked_lanes(poly, zeros_f, xmin_mask);
+ poly = @isa@_set_masked_lanes_ps(poly, _mm@vsize@_set1_ps(NPY_NANF), nan_mask);
+ poly = @isa@_set_masked_lanes_ps(poly, inf, xmax_mask);
+ poly = @isa@_set_masked_lanes_ps(poly, zeros_f, xmin_mask);
@masked_store@(op, @cvtps_epi32@(load_mask), poly);
@vtype@ poly, num_poly, denom_poly, exponent;
@mask@ inf_mask, nan_mask, sqrt2_mask, zero_mask, negx_mask;
- @mask@ invalid_mask = @isa@_get_partial_load_mask(0, num_lanes);
+ @mask@ invalid_mask = @isa@_get_partial_load_mask_ps(0, num_lanes);
@mask@ divide_by_zero_mask = invalid_mask;
- @mask@ load_mask = @isa@_get_full_load_mask();
+ @mask@ load_mask = @isa@_get_full_load_mask_ps();
npy_intp num_remaining_elements = array_size;
while (num_remaining_elements > 0) {
if (num_remaining_elements < num_lanes) {
- load_mask = @isa@_get_partial_load_mask(num_remaining_elements,
- num_lanes);
+ load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements,
+ num_lanes);
}
@vtype@ x_in;
if (stride == 1) {
- x_in = @isa@_masked_load(load_mask, ip);
+ x_in = @isa@_masked_load_ps(load_mask, ip);
}
else {
- x_in = @isa@_masked_gather(zeros_f, ip, vindex, load_mask);
+ x_in = @isa@_masked_gather_ps(zeros_f, ip, vindex, load_mask);
}
negx_mask = _mm@vsize@_cmp_ps@vsub@(x_in, zeros_f, _CMP_LT_OQ);
@and_masks@(zero_mask, load_mask));
invalid_mask = @or_masks@(invalid_mask, negx_mask);
- @vtype@ x = @isa@_set_masked_lanes(x_in, zeros_f, negx_mask);
+ @vtype@ x = @isa@_set_masked_lanes_ps(x_in, zeros_f, negx_mask);
/* set x = normalized mantissa */
exponent = @isa@_get_exponent(x);
* x = +/- NAN; return NAN
* x = 0.0f; return -INF
*/
- poly = @isa@_set_masked_lanes(poly, nan, nan_mask);
- poly = @isa@_set_masked_lanes(poly, neg_nan, negx_mask);
- poly = @isa@_set_masked_lanes(poly, neg_inf, zero_mask);
- poly = @isa@_set_masked_lanes(poly, inf, inf_mask);
+ poly = @isa@_set_masked_lanes_ps(poly, nan, nan_mask);
+ poly = @isa@_set_masked_lanes_ps(poly, neg_nan, negx_mask);
+ poly = @isa@_set_masked_lanes_ps(poly, neg_inf, zero_mask);
+ poly = @isa@_set_masked_lanes_ps(poly, inf, inf_mask);
@masked_store@(op, @cvtps_epi32@(load_mask), poly);
}
}
else {
- /*
- * If the deprecated behavior is ever removed,
- * keep only the else branch of this if-else
- */
- if (PyArray_Check(out_kwd) || out_kwd == Py_None) {
- if (DEPRECATE("passing a single array to the "
- "'out' keyword argument of a "
- "ufunc with\n"
- "more than one output will "
- "result in an error in the "
- "future") < 0) {
- /* The future error message */
- PyErr_SetString(PyExc_TypeError,
- "'out' must be a tuple of arrays");
- goto fail;
- }
- if (_set_out_array(out_kwd, out_op+nin) < 0) {
- goto fail;
- }
- }
- else {
- PyErr_SetString(PyExc_TypeError,
- nout > 1 ? "'out' must be a tuple "
- "of arrays" :
- "'out' must be an array or a "
- "tuple of a single array");
- goto fail;
- }
+ PyErr_SetString(PyExc_TypeError,
+ nout > 1 ? "'out' must be a tuple of arrays" :
+ "'out' must be an array or a tuple with "
+ "a single array");
+ goto fail;
}
}
/*
* Returns 0 on success, and -1 on failure
*/
static int
-_parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis,
+_parse_axis_arg(PyUFuncObject *ufunc, const int core_num_dims[], PyObject *axis,
PyArrayObject **op, int broadcast_ndim, int **remap_axis) {
int nop = ufunc->nargs;
int iop, axis_int;
*/
static int
_get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
- int *op_core_num_dims, npy_uint32 *core_dim_flags,
+ const int *op_core_num_dims, npy_uint32 *core_dim_flags,
npy_intp *core_dim_sizes, int **remap_axis) {
int i;
int nin = ufunc->nin;
for (i = 0; i < ind_size; ++i) {
if (reduceat_ind[i] < 0 || reduceat_ind[i] >= red_axis_size) {
PyErr_Format(PyExc_IndexError,
- "index %d out-of-bounds in %s.%s [0, %d)",
- (int)reduceat_ind[i], ufunc_name, opname, (int)red_axis_size);
+ "index %" NPY_INTP_FMT " out-of-bounds in %s.%s [0, %" NPY_INTP_FMT ")",
+ reduceat_ind[i], ufunc_name, opname, red_axis_size);
return NULL;
}
}
wrapped = _apply_array_wrap(wraparr[i], mps[j], &context);
mps[j] = NULL; /* Prevent fail double-freeing this */
if (wrapped == NULL) {
+ for (j = 0; j < i; j++) {
+ Py_DECREF(retobj[j]);
+ }
goto fail;
}
NPY_NO_EXPORT int
PyUFunc_ReplaceLoopBySignature(PyUFuncObject *func,
PyUFuncGenericFunction newfunc,
- int *signature,
+ const int *signature,
PyUFuncGenericFunction *oldfunc)
{
int i, j;
char *types, int ntypes,
int nin, int nout, int identity,
const char *name, const char *doc,
- int unused, const char *signature,
+ const int unused, const char *signature,
PyObject *identity_value)
{
PyUFuncObject *ufunc;
PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
int usertype,
PyUFuncGenericFunction function,
- int *arg_types,
+ const int *arg_types,
void *data)
{
PyArray_Descr *descr;
* Create dtypes array for either one or two input operands.
* The output operand is set to the first input operand
*/
- dtypes[0] = PyArray_DESCR(op1_array);
operands[0] = op1_array;
if (op2_array != NULL) {
- dtypes[1] = PyArray_DESCR(op2_array);
- dtypes[2] = dtypes[0];
operands[1] = op2_array;
operands[2] = op1_array;
nop = 3;
}
else {
- dtypes[1] = dtypes[0];
- dtypes[2] = NULL;
operands[1] = op1_array;
operands[2] = NULL;
nop = 2;
Py_XDECREF(op2_array);
Py_XDECREF(iter);
Py_XDECREF(iter2);
- Py_XDECREF(array_operands[0]);
- Py_XDECREF(array_operands[1]);
- Py_XDECREF(array_operands[2]);
+ for (i = 0; i < 3; i++) {
+ Py_XDECREF(dtypes[i]);
+ Py_XDECREF(array_operands[i]);
+ }
if (needs_api && PyErr_Occurred()) {
return NULL;
Py_XDECREF(op2_array);
Py_XDECREF(iter);
Py_XDECREF(iter2);
- Py_XDECREF(array_operands[0]);
- Py_XDECREF(array_operands[1]);
- Py_XDECREF(array_operands[2]);
+ for (i = 0; i < 3; i++) {
+ Py_XDECREF(dtypes[i]);
+ Py_XDECREF(array_operands[i]);
+ }
return NULL;
}
}
out_dtypes[0] = ensure_dtype_nbo(dtype);
+ Py_DECREF(dtype);
if (out_dtypes[0] == NULL) {
return -1;
}
/* The type resolver would have upcast already */
if (out_dtypes[0]->type_num == NPY_BOOL) {
PyErr_Format(PyExc_TypeError,
- "numpy boolean subtract, the `-` operator, is deprecated, "
+ "numpy boolean subtract, the `-` operator, is not supported, "
"use the bitwise_xor, the `^` operator, or the logical_xor "
"function instead.");
return -1;
npy_intp i, j, nin = self->nin, nop = nin + self->nout;
int types[NPY_MAXARGS];
const char *ufunc_name;
- int no_castable_output, use_min_scalar;
+ int no_castable_output = 0;
+ int use_min_scalar;
/* For making a better error message on coercion error */
char err_dst_typecode = '-', err_src_typecode = '-';
out_dtypes[1] = out_dtypes[0];
Py_INCREF(out_dtypes[1]);
out_dtypes[2] = PyArray_DescrFromType(NPY_LONGLONG);
- Py_INCREF(out_dtypes[2]);
out_dtypes[3] = out_dtypes[0];
Py_INCREF(out_dtypes[3]);
}
--- /dev/null
+"""
+Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
+"""
+import numpy as np
+
+_ArrayMemoryError = np.core._exceptions._ArrayMemoryError
+
+class TestArrayMemoryError:
+ def test_str(self):
+ e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
+ str(e) # not crashing is enough
+
+ # testing these properties is easier than testing the full string repr
+ def test__size_to_string(self):
+ """ Test e._size_to_string """
+ f = _ArrayMemoryError._size_to_string
+ Ki = 1024
+ assert f(0) == '0 bytes'
+ assert f(1) == '1 bytes'
+ assert f(1023) == '1023 bytes'
+ assert f(Ki) == '1.00 KiB'
+ assert f(Ki+1) == '1.00 KiB'
+ assert f(10*Ki) == '10.0 KiB'
+ assert f(int(999.4*Ki)) == '999. KiB'
+ assert f(int(1023.4*Ki)) == '1023. KiB'
+ assert f(int(1023.5*Ki)) == '1.00 MiB'
+ assert f(Ki*Ki) == '1.00 MiB'
+
+ # 1023.9999 Mib should round to 1 GiB
+ assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
+ assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
+ # larger than sys.maxsize, adding larger prefices isn't going to help
+ # anyway.
+ assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
+
+ def test__total_size(self):
+ """ Test e._total_size """
+ e = _ArrayMemoryError((1,), np.dtype(np.uint8))
+ assert e._total_size == 1
+
+ e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
+ assert e._total_size == 1024
)
def test_array_astype_warning(t):
# test ComplexWarning when casting from complex to float or int
- a = np.array(10, dtype=np.complex)
+ a = np.array(10, dtype=np.complex_)
assert_warns(np.ComplexWarning, a.astype, t)
def test_copyto_fromscalar():
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
- # check for backcompat that using FloatFormat works and emits warning
- with assert_warns(DeprecationWarning):
- fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False)
- assert_equal(np.array2string(x, formatter={'float_kind': fmt}),
- '[0. 1. 2.]')
def test_structure_format(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
)
def test_bad_args(self):
- assert_raises(ValueError, np.set_printoptions, threshold='nan')
- assert_raises(ValueError, np.set_printoptions, threshold=u'1')
- assert_raises(ValueError, np.set_printoptions, threshold=b'1')
+ assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
+ assert_raises(TypeError, np.set_printoptions, threshold='1')
+ assert_raises(TypeError, np.set_printoptions, threshold=b'1')
def test_unicode_object_array():
import sys
# Can cast safely/same_kind from integer to timedelta
assert_(np.can_cast('i8', 'm8', casting='same_kind'))
assert_(np.can_cast('i8', 'm8', casting='safe'))
+ assert_(np.can_cast('i4', 'm8', casting='same_kind'))
+ assert_(np.can_cast('i4', 'm8', casting='safe'))
+ assert_(np.can_cast('u4', 'm8', casting='same_kind'))
+ assert_(np.can_cast('u4', 'm8', casting='safe'))
+
+ # Cannot cast safely from unsigned integer of the same size, which
+ # could overflow
+ assert_(np.can_cast('u8', 'm8', casting='same_kind'))
+ assert_(not np.can_cast('u8', 'm8', casting='safe'))
# Cannot cast safely/same_kind from float to timedelta
assert_(not np.can_cast('f4', 'm8', casting='same_kind'))
assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
+ @pytest.mark.parametrize("size", [
+ 3, 21, 217, 1000])
+ def test_datetime_nat_argsort_stability(self, size):
+ # NaT < NaT should be False internally for
+ # sort stability
+ expected = np.arange(size)
+ arr = np.tile(np.datetime64('NaT'), size)
+ assert_equal(np.argsort(arr, kind='mergesort'), expected)
+
+ @pytest.mark.parametrize("size", [
+ 3, 21, 217, 1000])
+ def test_timedelta_nat_argsort_stability(self, size):
+ # NaT < NaT should be False internally for
+ # sort stability
+ expected = np.arange(size)
+ arr = np.tile(np.timedelta64('NaT'), size)
+ assert_equal(np.argsort(arr, kind='mergesort'), expected)
+
+ @pytest.mark.parametrize("arr, expected", [
+ # the example provided in gh-12629
+ (['NaT', 1, 2, 3],
+ [1, 2, 3, 'NaT']),
+ # multiple NaTs
+ (['NaT', 9, 'NaT', -707],
+ [-707, 9, 'NaT', 'NaT']),
+ # this sort explores another code path for NaT
+ ([1, -2, 3, 'NaT'],
+ [-2, 1, 3, 'NaT']),
+ # 2-D array
+ ([[51, -220, 'NaT'],
+ [-17, 'NaT', -90]],
+ [[-220, 51, 'NaT'],
+ [-90, -17, 'NaT']]),
+ ])
+ @pytest.mark.parametrize("dtype", [
+ 'M8[ns]', 'M8[us]',
+ 'm8[ns]', 'm8[us]'])
+ def test_datetime_timedelta_sort_nat(self, arr, expected, dtype):
+ # fix for gh-12629 and gh-15063; NaT sorting to end of array
+ arr = np.array(arr, dtype=dtype)
+ expected = np.array(expected, dtype=dtype)
+ arr.sort()
+ assert_equal(arr, expected)
+
def test_datetime_scalar_construction(self):
# Construct with different units
assert_equal(np.datetime64('1950-03-12', 'D'),
assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
+ # NaN -> NaT
+ nan = np.array([np.nan] * 8)
+ fnan = nan.astype('f')
+ lnan = nan.astype('g')
+ cnan = nan.astype('D')
+ cfnan = nan.astype('F')
+ clnan = nan.astype('G')
+
+ nat = np.array([np.datetime64('NaT')] * 8)
+ assert_equal(nan.astype('M8[ns]'), nat)
+ assert_equal(fnan.astype('M8[ns]'), nat)
+ assert_equal(lnan.astype('M8[ns]'), nat)
+ assert_equal(cnan.astype('M8[ns]'), nat)
+ assert_equal(cfnan.astype('M8[ns]'), nat)
+ assert_equal(clnan.astype('M8[ns]'), nat)
+
+ nat = np.array([np.timedelta64('NaT')] * 8)
+ assert_equal(nan.astype('timedelta64[ns]'), nat)
+ assert_equal(fnan.astype('timedelta64[ns]'), nat)
+ assert_equal(lnan.astype('timedelta64[ns]'), nat)
+ assert_equal(cnan.astype('timedelta64[ns]'), nat)
+ assert_equal(cfnan.astype('timedelta64[ns]'), nat)
+ assert_equal(clnan.astype('timedelta64[ns]'), nat)
+
def test_days_creation(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 - 365)
# Interaction with NaT
a = np.array('1999-03-12T13', dtype='M8[2m]')
dtnat = np.array('NaT', dtype='M8[h]')
- assert_equal(np.minimum(a, dtnat), a)
- assert_equal(np.minimum(dtnat, a), a)
- assert_equal(np.maximum(a, dtnat), a)
- assert_equal(np.maximum(dtnat, a), a)
+ assert_equal(np.minimum(a, dtnat), dtnat)
+ assert_equal(np.minimum(dtnat, a), dtnat)
+ assert_equal(np.maximum(a, dtnat), dtnat)
+ assert_equal(np.maximum(dtnat, a), dtnat)
+ assert_equal(np.fmin(dtnat, a), a)
+ assert_equal(np.fmin(a, dtnat), a)
+ assert_equal(np.fmax(dtnat, a), a)
+ assert_equal(np.fmax(a, dtnat), a)
# Also do timedelta
a = np.array(3, dtype='m8[h]')
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
- assert_raises(ValueError, np.arange, d)
+ assert_equal(np.arange(d), np.arange(0, d))
def test_datetime_maximum_reduce(self):
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')
continue
assert_raises(TypeError, np.isnat, np.zeros(10, t))
- def test_isfinite(self):
+ def test_isfinite_scalar(self):
assert_(not np.isfinite(np.datetime64('NaT', 'ms')))
assert_(not np.isfinite(np.datetime64('NaT', 'ns')))
assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))
assert_(not np.isfinite(np.timedelta64('NaT', "ms")))
assert_(np.isfinite(np.timedelta64(34, "ms")))
- res = np.array([True, True, False])
- for unit in ['Y', 'M', 'W', 'D',
- 'h', 'm', 's', 'ms', 'us',
- 'ns', 'ps', 'fs', 'as']:
- arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit)
- assert_equal(np.isfinite(arr), res)
- arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit)
- assert_equal(np.isfinite(arr), res)
- arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit)
- assert_equal(np.isfinite(arr), res)
- arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit)
- assert_equal(np.isfinite(arr), res)
+ @pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
+ 'us', 'ns', 'ps', 'fs', 'as'])
+ @pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',
+ '<timedelta64[%s]', '>timedelta64[%s]'])
+ def test_isfinite_isinf_isnan_units(self, unit, dstr):
+ '''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes
+ '''
+ arr_val = [123, -321, "NaT"]
+ arr = np.array(arr_val, dtype= dstr % unit)
+ pos = np.array([True, True, False])
+ neg = np.array([False, False, True])
+ false = np.array([False, False, False])
+ assert_equal(np.isfinite(arr), pos)
+ assert_equal(np.isinf(arr), false)
+ assert_equal(np.isnan(arr), neg)
+
+ def test_assert_equal(self):
+ assert_raises(AssertionError, assert_equal,
+ np.datetime64('nat'), np.timedelta64('nat'))
def test_corecursive_input(self):
# construct a co-recursive list
import operator
import warnings
import pytest
+import shutil
+import tempfile
import numpy as np
from numpy.testing import (
- assert_raises, assert_warns, assert_
+ assert_raises, assert_warns, assert_, assert_array_equal
)
+from numpy.core._multiarray_tests import fromstring_null_term_c_api
+
try:
import pytz
_has_pytz = True
a[[0, 1]]
-class TestRankDeprecation(_DeprecationTestCase):
- """Test that np.rank is deprecated. The function should simply be
- removed. The VisibleDeprecationWarning may become unnecessary.
- """
-
- def test(self):
- a = np.arange(10)
- assert_warns(np.VisibleDeprecationWarning, np.rank, a)
-
-
class TestComparisonDeprecations(_DeprecationTestCase):
"""This tests the deprecation, for non-element-wise comparison logic.
This used to mean that when an error occurred during element-wise comparison
# (warning is issued a couple of times here)
self.assert_deprecated(op, args=(a, a[:-1]), num=None)
- # Element comparison error (numpy array can't be compared).
+ # ragged array comparison returns True/False
a = np.array([1, np.array([1,2,3])], dtype=object)
b = np.array([1, np.array([1,2,3])], dtype=object)
self.assert_deprecated(op, args=(a, b), num=None)
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
-class TestInvalidOrderParameterInputForFlattenArrayDeprecation(_DeprecationTestCase):
- """Invalid arguments to the ORDER parameter in array.flatten() should not be
- allowed and should raise an error. However, in the interests of not breaking
- code that may inadvertently pass invalid arguments to this parameter, a
- DeprecationWarning will be issued instead for the time being to give developers
- time to refactor relevant code.
- """
-
- def test_flatten_array_non_string_arg(self):
- x = np.zeros((3, 5))
- self.message = ("Non-string object detected for "
- "the array ordering. Please pass "
- "in 'C', 'F', 'A', or 'K' instead")
- self.assert_deprecated(x.flatten, args=(np.pi,))
-
- def test_flatten_array_invalid_string_arg(self):
- # Tests that a DeprecationWarning is raised
- # when a string of length greater than one
- # starting with "C", "F", "A", or "K" (case-
- # and unicode-insensitive) is passed in for
- # the ORDER parameter. Otherwise, a TypeError
- # will be raised!
-
- x = np.zeros((3, 5))
- self.message = ("Non length-one string passed "
- "in for the array ordering. Please "
- "pass in 'C', 'F', 'A', or 'K' instead")
- self.assert_deprecated(x.flatten, args=("FACK",))
-
-
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
out in gh-7093. Eventually, such assignment should NOT be allowed, but
self.assert_deprecated(a.__setattr__, args=('data', b.data))
-class TestLinspaceInvalidNumParameter(_DeprecationTestCase):
- """Argument to the num parameter in linspace that cannot be
- safely interpreted as an integer is deprecated in 1.12.0.
-
- Argument to the num parameter in linspace that cannot be
- safely interpreted as an integer should not be allowed.
- In the interest of not breaking code that passes
- an argument that could still be interpreted as an integer, a
- DeprecationWarning will be issued for the time being to give
- developers time to refactor relevant code.
- """
- def test_float_arg(self):
- # 2016-02-25, PR#7328
- self.assert_deprecated(np.linspace, args=(0, 10, 2.5))
-
-
class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
"""
If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
assert_(npy_char_deprecation() == 'S1')
+class TestPyArray_AS1D(_DeprecationTestCase):
+ def test_npy_pyarrayas1d_deprecation(self):
+ from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
+ assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
+
+
+class TestPyArray_AS2D(_DeprecationTestCase):
+ def test_npy_pyarrayas2d_deprecation(self):
+ from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
+ assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
+
+
class Test_UPDATEIFCOPY(_DeprecationTestCase):
"""
v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
+class TestAlen(_DeprecationTestCase):
+ # 2019-08-02, 1.18.0
+ def test_alen(self):
+ self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3])))
+
+
class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
def test_generator_sum(self):
def test_positive_on_non_number(self):
self.assert_deprecated(operator.pos, args=(np.array('foo'),))
+
class TestFromstring(_DeprecationTestCase):
# 2017-10-19, 1.14
def test_fromstring(self):
self.assert_deprecated(np.fromstring, args=('\x00'*80,))
+
+class TestFromStringAndFileInvalidData(_DeprecationTestCase):
+ # 2019-06-08, 1.17.0
+ # Tests should be moved to real tests when deprecation is done.
+ message = "string or file could not be read to its end"
+
+ @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
+ def test_deprecate_unparsable_data_file(self, invalid_str):
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
+
+ with tempfile.TemporaryFile(mode="w") as f:
+ x.tofile(f, sep=',', format='%.2f')
+ f.write(invalid_str)
+
+ f.seek(0)
+ self.assert_deprecated(lambda: np.fromfile(f, sep=","))
+ f.seek(0)
+ self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
+ # Should not raise:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+ f.seek(0)
+ res = np.fromfile(f, sep=",", count=4)
+ assert_array_equal(res, x)
+
+ @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
+ def test_deprecate_unparsable_string(self, invalid_str):
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
+ x_str = "1.51,2,3.51,4{}".format(invalid_str)
+
+ self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
+ self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
+
+ # The C-level API can use not fixed size, but 0 terminated strings,
+ # so test that as well:
+ bytestr = x_str.encode("ascii")
+ self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
+
+ with assert_warns(DeprecationWarning):
+ # this is slightly strange, in that fromstring leaves data
+ # potentially uninitialized (would be good to error when all is
+ # read, but count is larger then actual data maybe).
+ res = np.fromstring(x_str, sep=",", count=5)
+ assert_array_equal(res[:-1], x)
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+
+ # Should not raise:
+ res = np.fromstring(x_str, sep=",", count=4)
+ assert_array_equal(res, x)
+
+
class Test_GetSet_NumericOps(_DeprecationTestCase):
# 2018-09-20, 1.16.0
def test_get_numeric_ops(self):
from numpy.core._multiarray_tests import getset_numericops
self.assert_deprecated(getset_numericops, num=2)
-
+
# empty kwargs prevents any state actually changing which would break
# other tests.
self.assert_deprecated(np.set_numeric_ops, kwargs={})
class TestBuiltin(object):
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
- np.unicode])
+ np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
assert_raises(ValueError, np.dtype,
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
+ def test_fieldless_views(self):
+ a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
+ 'itemsize':8})
+ assert_raises(ValueError, a.view, np.dtype([]))
+
+ d = np.dtype((np.dtype([]), 10))
+ assert_equal(d.shape, (10,))
+ assert_equal(d.itemsize, 0)
+ assert_equal(d.base, np.dtype([]))
+
+ arr = np.fromiter((() for i in range(10)), [])
+ assert_equal(arr.dtype, np.dtype([]))
+ assert_raises(ValueError, np.frombuffer, b'', dtype=[])
+ assert_equal(np.frombuffer(b'', dtype=[], count=2),
+ np.empty(2, dtype=[]))
+
+ assert_raises(ValueError, np.dtype, ([], 'f8'))
+ assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
+
+ assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
+ np.ones(2, dtype=bool))
+
+ assert_equal(np.zeros((1, 2), dtype=[]) == a,
+ np.ones((1, 2), dtype=bool))
+
class TestSubarray(object):
def test_single_subarray(self):
new_dtype = np.dtype(dtype.descr)
assert_equal(new_dtype.itemsize, 16)
- @pytest.mark.parametrize('t', np.typeDict.values())
- def test_name_builtin(self, t):
- name = t.__name__
- if name.endswith('_'):
- name = name[:-1]
- assert_equal(np.dtype(t).name, name)
-
def test_name_dtype_subclass(self):
# Ticket #4357
class user_def_subcls(np.void):
assert_equal(x[0], y[0])
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
- np.unicode, bool])
+ np.compat.unicode, bool])
def test_builtin(self, t):
self.check_pickling(np.dtype(t))
assert_(len(y) == 50)
y = logspace(0, 6, num=100)
assert_(y[-1] == 10 ** 6)
- y = logspace(0, 6, endpoint=0)
+ y = logspace(0, 6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
assert_(len(y) == 50)
y = linspace(2, 10, num=100)
assert_(y[-1] == 10)
- y = linspace(2, 10, endpoint=0)
+ y = linspace(2, 10, endpoint=False)
assert_(y[-1] < 10)
assert_raises(ValueError, linspace, 0, 10, num=-1)
def test_corner(self):
y = list(linspace(0, 1, 1))
assert_(y == [0.0], y)
- with suppress_warnings() as sup:
- sup.filter(DeprecationWarning, ".*safely interpreted as an integer")
- y = list(linspace(0, 1, 2.5))
- assert_(y == [0.0, 1.0])
+ assert_raises(TypeError, linspace, 0, 1, num=2.5)
def test_type(self):
t1 = linspace(0, 1, 0).dtype
arange(j+1, dtype=int))
def test_retstep(self):
- y = linspace(0, 1, 2, retstep=True)
- assert_(isinstance(y, tuple) and len(y) == 2)
- for num in (0, 1):
- for ept in (False, True):
+ for num in [0, 1, 2]:
+ for ept in [False, True]:
y = linspace(0, 1, num, endpoint=ept, retstep=True)
- assert_(isinstance(y, tuple) and len(y) == 2 and
- len(y[0]) == num and isnan(y[1]),
- 'num={0}, endpoint={1}'.format(num, ept))
+ assert isinstance(y, tuple) and len(y) == 2
+ if num == 2:
+ y0_expect = [0.0, 1.0] if ept else [0.0, 0.5]
+ assert_array_equal(y[0], y0_expect)
+ assert_equal(y[1], y0_expect[1])
+ elif num == 1 and not ept:
+ assert_array_equal(y[0], [0.0])
+ assert_equal(y[1], 1.0)
+ else:
+ assert_array_equal(y[0], [0.0][:num])
+ assert isnan(y[1])
def test_object(self):
start = array(1, dtype='O')
--- /dev/null
+import pytest
+import warnings
+import numpy as np
+
+
+class Wrapper:
+ def __init__(self, array):
+ self.array = array
+
+ def __len__(self):
+ return len(self.array)
+
+ def __getitem__(self, item):
+ return type(self)(self.array[item])
+
+ def __getattr__(self, name):
+ if name.startswith("__array_"):
+ warnings.warn("object got converted", UserWarning, stacklevel=1)
+
+ return getattr(self.array, name)
+
+ def __repr__(self):
+ return "<Wrapper({self.array})>".format(self=self)
+
+@pytest.mark.filterwarnings("error")
+def test_getattr_warning():
+ array = Wrapper(np.arange(10))
+ with pytest.raises(UserWarning, match="object got converted"):
+ np.asarray(array)
import numpy as np
from numpy.testing import (
- assert_, assert_equal, assert_raises, assert_array_equal, temppath,
+ assert_, assert_equal, assert_raises, assert_warns, assert_array_equal,
+ temppath,
)
from numpy.core.tests._locales import CommaDecimalPointLocale
err_msg="reading '%s'" % s)
+def test_fromstring_complex():
+ for ctype in ["complex", "cdouble", "cfloat"]:
+ # Check spacing between separator
+ assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype),
+ np.array([1., 2., 3., 4.]))
+ # Real component not specified
+ assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype),
+ np.array([1.j, -2.j, 3.j, 40.j]))
+ # Both components specified
+ assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype),
+ np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
+ # Spaces at wrong places
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+j", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","),
+ np.array([1j]))
+
+
def test_fromstring_bogus():
- assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
- np.array([1., 2., 3.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
+ np.array([1., 2., 3.]))
def test_fromstring_empty():
- assert_equal(np.fromstring("xxxxx", sep="x"),
- np.array([]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("xxxxx", sep="x"),
+ np.array([]))
def test_fromstring_missing():
- assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
- np.array([1]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
+ np.array([1]))
class TestFileBased(object):
with temppath() as path:
with open(path, 'wt') as f:
f.write("1. 2. 3. flop 4.\n")
- res = np.fromfile(path, dtype=float, sep=" ")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=float, sep=" ")
assert_equal(res, np.array([1., 2., 3.]))
+ def test_fromfile_complex(self):
+ for ctype in ["complex", "cdouble", "cfloat"]:
+ # Check spacing between separator and only real component specified
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1, 2 , 3 ,4\n")
+
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1., 2., 3., 4.]))
+
+ # Real component not specified
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1j, -2j, 3j, 4e1j\n")
+
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j]))
+
+ # Both components specified
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+1j,2-2j, -3+3j, -4e1+4j\n")
+
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+2 j,3\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+ 2j,3\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1 +2j,3\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+j\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1j+1\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.j]))
+
+
+
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_fromfile(self):
assert_equal(a[0], f)
def test_fromstring_best_effort_float(self):
- assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
- np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
+ np.array([1.]))
def test_fromstring_best_effort(self):
- assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
- np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
+ np.array([1.]))
def test_fromstring_foreign(self):
s = "1.234"
assert_array_equal(a, b)
def test_fromstring_foreign_value(self):
- b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
- assert_array_equal(b[0], 1)
+ with assert_warns(DeprecationWarning):
+ b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
+ assert_array_equal(b[0], 1)
+
@pytest.mark.parametrize("int_val", [
# cases discussed in gh-10723
import weakref
import pytest
from contextlib import contextmanager
+from test.support import no_tracing
from numpy.compat import pickle
# Ensure that any base being writeable is sufficient to change flag;
# this is especially interesting for arrays from an array interface.
arr = np.arange(10)
-
+
class subclass(np.ndarray):
pass
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_sequence_long(self):
- assert_equal(np.array([long(4), long(4)]).dtype, np.long)
+ assert_equal(np.array([long(4), long(4)]).dtype, long)
assert_equal(np.array([long(4), 2**80]).dtype, object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
assert_equal(np.array([2**80, long(4)]).dtype, object)
# test unicode sorts.
s = 'aaaaaaaa'
- a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
+ a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "unicode sort, kind=%s" % kind
# test unicode argsorts.
s = 'aaaaaaaa'
- a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
+ a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
- a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
+ a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode_)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
- dtype=np.unicode)
+ dtype=np.unicode_)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
+ def test_flatten_invalid_order(self):
+ # invalid after gh-14596
+ for order in ['Z', 'c', False, True, 0, 8]:
+ x = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
+ assert_raises(ValueError, x.flatten, {"order": order})
+
@pytest.mark.parametrize('func', (np.dot, np.matmul))
def test_arr_mult(self, func):
a = np.array([[1, 0], [0, 1]])
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', DeprecationWarning)
- assert_equal(np.modf(dummy, out=a), (0,))
- assert_(w[0].category is DeprecationWarning)
+ with assert_raises(TypeError):
+ # Out argument must be tuple, since there are multiple outputs
+ np.modf(dummy, out=a)
+
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
def test_datetime64_byteorder(self):
original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]')
-
+
original_byte_reversed = original.copy(order='K')
original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S')
original_byte_reversed.byteswap(inplace=True)
new = pickle.loads(pickle.dumps(original_byte_reversed))
-
+
assert_equal(original.dtype, new.dtype)
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
- np.datetime64('1932-10-10T03:50:30')], 4),
+ np.datetime64('1932-10-10T03:50:30')], 0),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
- np.datetime64('2013-05-08T18:15:23')], 0),
+ np.datetime64('2013-05-08T18:15:23')], 2),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
- np.timedelta64(3, 's')], 3),
+ np.timedelta64(3, 's')], 2),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
+ @pytest.mark.leaks_references(reason="replaces None with NULL.")
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
- np.datetime64('1932-10-10T03:50:30')], 5),
+ np.datetime64('1932-10-10T03:50:30')], 0),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
- np.datetime64('2013-05-08T18:15:23')], 4),
+ np.datetime64('2013-05-08T18:15:23')], 2),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
- np.timedelta64(3, 's')], 1),
+ np.timedelta64(3, 's')], 2),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
+ @pytest.mark.leaks_references(reason="replaces None with NULL.")
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
- # NaTs are ignored
+ # Do not ignore NaT
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
- a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
- a[0] = 'NaT'
- assert_equal(np.amin(a), a[1])
- assert_equal(np.amax(a), a[9])
- a.fill('NaT')
- assert_equal(np.amin(a), a[0])
- assert_equal(np.amax(a), a[0])
+ a[3] = 'NaT'
+ assert_equal(np.amin(a), a[3])
+ assert_equal(np.amax(a), a[3])
class TestNewaxis(object):
offset_bytes = self.dtype.itemsize
z = np.fromfile(f, dtype=self.dtype, offset=offset_bytes)
assert_array_equal(z, self.x.flat[offset_items+count_items+1:])
-
+
with open(self.filename, 'wb') as f:
self.x.tofile(f, sep=",")
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
- self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
+ with assert_warns(DeprecationWarning):
+ self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
self.test_tofile_sep()
self.test_tofile_format()
+ def test_fromfile_subarray_binary(self):
+ # Test subarray dtypes which are absorbed into the shape
+ x = np.arange(24, dtype="i4").reshape(2, 3, 4)
+ x.tofile(self.filename)
+ res = np.fromfile(self.filename, dtype="(3,4)i4")
+ assert_array_equal(x, res)
+
+ x_str = x.tobytes()
+ with assert_warns(DeprecationWarning):
+ # binary fromstring is deprecated
+ res = np.fromstring(x_str, dtype="(3,4)i4")
+ assert_array_equal(x, res)
+
class TestFromBuffer(object):
@pytest.mark.parametrize('byteorder', ['<', '>'])
class TestResize(object):
+
+ @no_tracing
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
+ @no_tracing
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
+ @no_tracing
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
+ @no_tracing
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
+ @no_tracing
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
r3 = np.matmul(args[0].copy(), args[1].copy())
assert_equal(r1, r3)
-
+
def test_matmul_object(self):
import fractions
f = np.vectorize(fractions.Fraction)
def random_ints():
return np.random.randint(1, 1000, size=(10, 3, 3))
- M1 = f(random_ints(), random_ints())
+ M1 = f(random_ints(), random_ints())
M2 = f(random_ints(), random_ints())
M3 = self.matmul(M1, M2)
class TestAlen(object):
def test_basic(self):
- m = np.array([1, 2, 3])
- assert_equal(np.alen(m), 3)
+ with pytest.warns(DeprecationWarning):
+ m = np.array([1, 2, 3])
+ assert_equal(np.alen(m), 3)
- m = np.array([[1, 2, 3], [4, 5, 7]])
- assert_equal(np.alen(m), 2)
+ m = np.array([[1, 2, 3], [4, 5, 7]])
+ assert_equal(np.alen(m), 2)
- m = [1, 2, 3]
- assert_equal(np.alen(m), 3)
+ m = [1, 2, 3]
+ assert_equal(np.alen(m), 3)
- m = [[1, 2, 3], [4, 5, 7]]
- assert_equal(np.alen(m), 2)
+ m = [[1, 2, 3], [4, 5, 7]]
+ assert_equal(np.alen(m), 2)
def test_singleton(self):
- assert_equal(np.alen(5), 1)
+ with pytest.warns(DeprecationWarning):
+ assert_equal(np.alen(5), 1)
class TestChoose(object):
RuntimeError, "ndim",
np.array, m)
+ # The above seems to create some deep cycles, clean them up for
+ # easier reference count debugging:
+ del c_u8_33d, m
+ for i in range(33):
+ if gc.collect() == 0:
+ break
+
def test_error_pointer_type(self):
# gh-6741
m = memoryview(ctypes.pointer(ctypes.c_uint8()))
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
+ @no_tracing
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
class TestUnicodeArrayNonzero(object):
def test_empty_ustring_array_is_falsey(self):
- assert_(not np.array([''], dtype=np.unicode))
+ assert_(not np.array([''], dtype=np.unicode_))
def test_whitespace_ustring_array_is_falsey(self):
- a = np.array(['eggs'], dtype=np.unicode)
+ a = np.array(['eggs'], dtype=np.unicode_)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_ustring_array_is_falsey(self):
- a = np.array(['eggs'], dtype=np.unicode)
+ a = np.array(['eggs'], dtype=np.unicode_)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_ustring_array_is_truthy(self):
- a = np.array(['eggs'], dtype=np.unicode)
+ a = np.array(['eggs'], dtype=np.unicode_)
a[0] = ' \0 \0'
assert_(a)
arr_wb[...] = 100
assert_equal(arr, -100)
+ @pytest.mark.leaks_references(
+ reason="increments self in dealloc; ignore since deprecated path.")
def test_dealloc_warning(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_equal(i[0], b'abc')
assert_equal(i[0].dtype, np.dtype('S6'))
- a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode)
+ a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_)
assert_equal(a.dtype, np.dtype('U4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='U2')
a = np.array([[False], [TrueThenFalse()]])
assert_raises(RuntimeError, np.nonzero, a)
+ def test_nonzero_exception_safe(self):
+ # gh-13930
+
+ class ThrowsAfter:
+ def __init__(self, iters):
+ self.iters_left = iters
+
+ def __bool__(self):
+ if self.iters_left == 0:
+ raise ValueError("called `iters` times")
+
+ self.iters_left -= 1
+ return True
+
+ """
+ Test that a ValueError is raised instead of a SystemError
+
+ If the __bool__ function is called after the error state is set,
+ Python (cpython) will raise a SystemError.
+ """
+
+ # assert that an exception in first pass is handled correctly
+ a = np.array([ThrowsAfter(5)]*10)
+ assert_raises(ValueError, np.nonzero, a)
+
+ # raise exception in second pass for 1-dimensional loop
+ a = np.array([ThrowsAfter(15)]*10)
+ assert_raises(ValueError, np.nonzero, a)
+
+ # raise exception in second pass for n-dimensional loop
+ a = np.array([[ThrowsAfter(15)]]*10)
+ assert_raises(ValueError, np.nonzero, a)
+
class TestIndex(object):
def test_boolean(self):
exp = '1' + (width - 1) * '0'
assert_equal(np.binary_repr(num, width=width), exp)
+ def test_large_neg_int64(self):
+ # See gh-14289.
+ assert_equal(np.binary_repr(np.int64(-2**62), width=64),
+ '11' + '0'*62)
+
class TestBaseRepr(object):
def test_base3(self):
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
+ def test_zero_size(self):
+ with pytest.raises(ValueError):
+ np.correlate(np.array([]), np.ones(1000), mode='full')
+ with pytest.raises(ValueError):
+ np.correlate(np.ones(1000), np.array([]), mode='full')
class TestConvolve(object):
def test_object(self):
class TestArgwhere(object):
+
+ @pytest.mark.parametrize('nd', [0, 1, 2])
+ def test_nd(self, nd):
+ # get an nd array with multiple elements in every dimension
+ x = np.empty((2,)*nd, bool)
+
+ # none
+ x[...] = False
+ assert_equal(np.argwhere(x).shape, (0, nd))
+
+ # only one
+ x[...] = False
+ x.flat[0] = True
+ assert_equal(np.argwhere(x).shape, (1, nd))
+
+ # all but one
+ x[...] = True
+ x.flat[0] = False
+ assert_equal(np.argwhere(x).shape, (x.size - 1, nd))
+
+ # all
+ x[...] = True
+ assert_equal(np.argwhere(x).shape, (x.size, nd))
+
def test_2D(self):
x = np.arange(6).reshape((2, 3))
assert_array_equal(np.argwhere(x > 1),
assert_array_equal(x, np.array([[0], [1], [2], [3]]))
assert_array_equal(y, np.array([[0, 1, 2]]))
- @pytest.mark.parametrize("dtype", [np.int, np.float32, np.float64])
+ @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("dims", [(), (0,), (4, 3)])
def test_return_type(self, dtype, dims):
inds = np.indices(dims, dtype=dtype)
assert_('int64' in np.int_.__doc__)
elif np.int64 is np.longlong:
assert_('int64' in np.longlong.__doc__)
+
+
+class TestScalarTypeNames:
+ # gh-9799
+
+ numeric_types = [
+ np.byte, np.short, np.intc, np.int_, np.longlong,
+ np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong,
+ np.half, np.single, np.double, np.longdouble,
+ np.csingle, np.cdouble, np.clongdouble,
+ ]
+
+ def test_names_are_unique(self):
+ # none of the above may be aliases for each other
+ assert len(set(self.numeric_types)) == len(self.numeric_types)
+
+ # names must be unique
+ names = [t.__name__ for t in self.numeric_types]
+ assert len(set(names)) == len(names)
+
+ @pytest.mark.parametrize('t', numeric_types)
+ def test_names_reflect_attributes(self, t):
+ """ Test that names correspond to where the type is under ``np.`` """
+ assert getattr(np, t.__name__) is t
+
+ @pytest.mark.parametrize('t', numeric_types)
+ def test_names_are_undersood_by_dtype(self, t):
+ """ Test the dtype constructor maps names back to the type """
+ assert np.dtype(t.__name__).type is t
_assert_valid_refcount, HAS_REFCOUNT,
)
from numpy.compat import asbytes, asunicode, long, pickle
+from test.support import no_tracing
try:
RecursionError
assert_(pickle.loads(
pickle.dumps(test_record, protocol=proto)) == test_record)
+ @no_tracing
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
min //= -1
with np.errstate(divide="ignore"):
- for t in (np.int8, np.int16, np.int32, np.int64, int, np.long):
+ for t in (np.int8, np.int16, np.int32, np.int64, int, np.compat.long):
test_type(t)
def test_buffer_hashlib(self):
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
- np.fromstring(b'aa, aa, 1.0', sep=',')
+ with assert_warns(DeprecationWarning):
+ np.fromstring(b'aa, aa, 1.0', sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
- a = np.array(['abc'], dtype=np.unicode)[0]
+ a = np.array(['abc'], dtype=np.unicode_)[0]
del a
def test_refcount_error_in_clip(self):
np.array([T()])
- def test_2d__array__shape(self):
- class T(object):
- def __array__(self):
- return np.ndarray(shape=(0,0))
-
- # Make sure __array__ is used instead of Sequence methods.
- def __iter__(self):
- return iter([])
-
- def __getitem__(self, idx):
- raise AssertionError("__getitem__ was called")
-
- def __len__(self):
- return 0
-
-
- t = T()
- #gh-13659, would raise in broadcasting [x=t for x in result]
- np.array([t])
-
@pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
@pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8),
reason='overflows on windows, fixed in bpo-16865')
def test_char_repeat(self):
np_s = np.string_('abc')
np_u = np.unicode_('abc')
- np_i = np.int(5)
res_s = b'abc' * 5
res_u = u'abc' * 5
- assert_(np_s * np_i == res_s)
- assert_(np_u * np_i == res_u)
+ assert_(np_s * 5 == res_s)
+ assert_(np_u * 5 == res_u)
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
- assert_warns
+ assert_warns, assert_raises_regex,
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
rem = operator.mod(finf, fone)
assert_(np.isnan(rem), 'dt: %s' % dt)
+ def test_inplace_floordiv_handling(self):
+ # issue gh-12927
+ # this only applies to in-place floordiv //=, because the output type
+ # promotes to float which does not fit
+ a = np.array([1, 2], np.int64)
+ b = np.array([1, 2], np.uint64)
+ pattern = 'could not be coerced to provided output parameter'
+ with assert_raises_regex(TypeError, pattern):
+ a //= b
+
class TestComplexDivision(object):
def test_zero_division(self):
def test_numpy_abs(self):
self._test_abs_func(np.abs)
+
+
+class TestBitShifts(object):
+
+ @pytest.mark.parametrize('type_code', np.typecodes['AllInteger'])
+ @pytest.mark.parametrize('op',
+ [operator.rshift, operator.lshift], ids=['>>', '<<'])
+ def test_shift_all_bits(self, type_code, op):
+ """ Shifts where the shift amount is the width of the type or wider """
+ # gh-2449
+ dt = np.dtype(type_code)
+ nbits = dt.itemsize * 8
+ for val in [5, -5]:
+ for shift in [nbits, nbits + 4]:
+ val_scl = dt.type(val)
+ shift_scl = dt.type(shift)
+ res_scl = op(val_scl, shift_scl)
+ if val_scl < 0 and op is operator.rshift:
+ # sign bit is preserved
+ assert_equal(res_scl, -1)
+ else:
+ assert_equal(res_scl, 0)
+
+ # Result on scalars should be the same as on arrays
+ val_arr = np.array([val]*32, dtype=dt)
+ shift_arr = np.array([shift]*32, dtype=dt)
+ res_arr = op(val_arr, shift_arr)
+ assert_equal(res_arr, res_scl)
assert_array_equal(out, mm_row_col_vec.squeeze())
def test_matrix_multiply(self):
- self.compare_matrix_multiply_results(np.long)
+ self.compare_matrix_multiply_results(np.int64)
self.compare_matrix_multiply_results(np.double)
def test_matrix_multiply_umath_empty(self):
arr0d = np.array(HasComparisons())
assert_equal(arr0d == arr0d, True)
assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast
- assert_equal(np.equal(arr0d, arr0d, dtype=object), '==')
arr1d = np.array([HasComparisons()])
assert_equal(arr1d == arr1d, np.array([True]))
assert_(r1 is o1)
assert_(r2 is o2)
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', DeprecationWarning)
+ with assert_raises(TypeError):
+ # Out argument must be tuple, since there are multiple outputs.
r1, r2 = np.frexp(d, out=o1, subok=subok)
- assert_(r1 is o1)
- assert_(w[0].category is DeprecationWarning)
assert_raises(ValueError, np.add, a, 2, o, o, subok=subok)
assert_raises(ValueError, np.add, a, 2, o, out=o, subok=subok)
else:
assert_(type(r1) == np.ndarray)
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', DeprecationWarning)
+ with assert_raises(TypeError):
+ # Out argument must be tuple, since there are multiple outputs.
r1, r2 = np.frexp(d, out=o1, subok=subok)
- if subok:
- assert_(isinstance(r2, ArrayWrap))
- else:
- assert_(type(r2) == np.ndarray)
- assert_(w[0].category is DeprecationWarning)
class TestComparisons(object):
def test_ignore_object_identity_in_equal(self):
- # Check error raised when comparing identical objects whose comparison
+ # Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
a = np.array([np.array([1, 2, 3]), None], dtype=object)
assert_raises(ValueError, np.equal, a, a)
assert_equal(np.equal(a, a), [False])
def test_ignore_object_identity_in_not_equal(self):
- # Check error raised when comparing identical objects whose comparison
+ # Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
a = np.array([np.array([1, 2, 3]), None], dtype=object)
assert_raises(ValueError, np.not_equal, a, a)
assert_raises(FloatingPointError, np.log, np.float32(-np.inf))
assert_raises(FloatingPointError, np.log, np.float32(-1.0))
-class TestExpLogFloat32(object):
+ def test_sincos_values(self):
+ with np.errstate(all='ignore'):
+ x = [np.nan, np.nan, np.nan, np.nan]
+ y = [np.nan, -np.nan, np.inf, -np.inf]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.sin(yf), xf)
+ assert_equal(np.cos(yf), xf)
+
+ with np.errstate(invalid='raise'):
+ assert_raises(FloatingPointError, np.sin, np.float32(-np.inf))
+ assert_raises(FloatingPointError, np.sin, np.float32(np.inf))
+ assert_raises(FloatingPointError, np.cos, np.float32(-np.inf))
+ assert_raises(FloatingPointError, np.cos, np.float32(np.inf))
+
+ def test_sqrt_values(self):
+ with np.errstate(all='ignore'):
+ x = [np.nan, np.nan, np.inf, np.nan, 0.]
+ y = [np.nan, -np.nan, np.inf, -np.inf, 0.]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.sqrt(yf), xf)
+
+ #with np.errstate(invalid='raise'):
+ # for dt in ['f', 'd', 'g']:
+ # assert_raises(FloatingPointError, np.sqrt, np.array(-100., dtype=dt))
+
+ def test_abs_values(self):
+ x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0]
+ y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.abs(yf), xf)
+
+ def test_square_values(self):
+ x = [np.nan, np.nan, np.inf, np.inf]
+ y = [np.nan, -np.nan, np.inf, -np.inf]
+ with np.errstate(all='ignore'):
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.square(yf), xf)
+
+ with np.errstate(over='raise'):
+ assert_raises(FloatingPointError, np.square, np.array(1E32, dtype='f'))
+ assert_raises(FloatingPointError, np.square, np.array(1E200, dtype='d'))
+
+ def test_reciprocal_values(self):
+ with np.errstate(all='ignore'):
+ x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf]
+ y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.reciprocal(yf), xf)
+
+ with np.errstate(divide='raise'):
+ for dt in ['f', 'd', 'g']:
+ assert_raises(FloatingPointError, np.reciprocal, np.array(-0.0, dtype=dt))
+
+# func : [maxulperror, low, high]
+avx_ufuncs = {'sqrt' :[1, 0., 100.],
+ 'absolute' :[0, -100., 100.],
+ 'reciprocal' :[1, 1., 100.],
+ 'square' :[1, -100., 100.],
+ 'rint' :[0, -100., 100.],
+ 'floor' :[0, -100., 100.],
+ 'ceil' :[0, -100., 100.],
+ 'trunc' :[0, -100., 100.]}
+
+class TestAVXUfuncs(object):
+ def test_avx_based_ufunc(self):
+ strides = np.array([-4,-3,-2,-1,1,2,3,4])
+ np.random.seed(42)
+ for func, prop in avx_ufuncs.items():
+ maxulperr = prop[0]
+ minval = prop[1]
+ maxval = prop[2]
+ # various array sizes to ensure masking in AVX is tested
+ for size in range(1,32):
+ myfunc = getattr(np, func)
+ x_f32 = np.float32(np.random.uniform(low=minval, high=maxval,
+ size=size))
+ x_f64 = np.float64(x_f32)
+ x_f128 = np.longdouble(x_f32)
+ y_true128 = myfunc(x_f128)
+ if maxulperr == 0:
+ assert_equal(myfunc(x_f32), np.float32(y_true128))
+ assert_equal(myfunc(x_f64), np.float64(y_true128))
+ else:
+ assert_array_max_ulp(myfunc(x_f32), np.float32(y_true128),
+ maxulp=maxulperr)
+ assert_array_max_ulp(myfunc(x_f64), np.float64(y_true128),
+ maxulp=maxulperr)
+ # various strides to test gather instruction
+ if size > 1:
+ y_true32 = myfunc(x_f32)
+ y_true64 = myfunc(x_f64)
+ for jj in strides:
+ assert_equal(myfunc(x_f64[::jj]), y_true64[::jj])
+ assert_equal(myfunc(x_f32[::jj]), y_true32[::jj])
+
+class TestAVXFloat32Transcendental(object):
def test_exp_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000))
x_f64 = np.float64(x_f32)
- assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=2.6)
+ assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3)
def test_log_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000))
x_f64 = np.float64(x_f32)
- assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=3.9)
+ assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4)
+
+ def test_sincos_float32(self):
+ np.random.seed(42)
+ N = 1000000
+ M = np.int_(N/20)
+ index = np.random.randint(low=0, high=N, size=M)
+ x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N))
+ # test coverage for elements > 117435.992f for which glibc is used
+ x_f32[index] = np.float32(10E+10*np.random.rand(M))
+ x_f64 = np.float64(x_f32)
+ assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2)
+ assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2)
- def test_strided_exp_log_float32(self):
+ def test_strided_float32(self):
np.random.seed(42)
- strides = np.random.randint(low=-100, high=100, size=100)
- sizes = np.random.randint(low=1, high=2000, size=100)
+ strides = np.array([-4,-3,-2,-1,1,2,3,4])
+ sizes = np.arange(2,100)
for ii in sizes:
x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii))
exp_true = np.exp(x_f32)
log_true = np.log(x_f32)
+ sin_true = np.sin(x_f32)
+ cos_true = np.cos(x_f32)
for jj in strides:
assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.sin(x_f32[::jj]), sin_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.cos(x_f32[::jj]), cos_true[::jj], nulp=2)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self):
ok = np.empty(1).view(Ok)
bad = np.empty(1).view(Bad)
-
# double-free (segfault) of "ok" if "bad" raises an exception
for i in range(10):
assert_raises(RuntimeError, ncu.frexp, 1, ok, bad)
assert_(np.modf(a, None) == {})
assert_(np.modf(a, None, None) == {})
assert_(np.modf(a, out=(None, None)) == {})
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', DeprecationWarning)
- assert_(np.modf(a, out=None) == {})
- assert_(w[0].category is DeprecationWarning)
+ with assert_raises(TypeError):
+ # Out argument must be tuple, since there are multiple outputs.
+ np.modf(a, out=None)
# don't give positional and output argument, or too many arguments.
# wrong number of arguments in the tuple is an error too.
for filename in files:
data_dir = path.join(path.dirname(__file__), 'data')
filepath = path.join(data_dir, filename)
- file_without_comments = (r for r in open(filepath) if not r[0] in ('$', '#'))
+ with open(filepath) as fid:
+ file_without_comments = (r for r in fid if not r[0] in ('$', '#'))
data = np.genfromtxt(file_without_comments,
- dtype=('|S39','|S39','|S39',np.int),
+ dtype=('|S39','|S39','|S39',int),
names=('type','input','output','ulperr'),
delimiter=',',
skip_header=1)
"""
if ctypes.__version__ < '1.0.1':
import warnings
- warnings.warn("All features of ctypes interface may not work " \
+ warnings.warn("All features of ctypes interface may not work "
"with ctypes < 1.0.1", stacklevel=2)
ext = os.path.splitext(libname)[1]
+"""
+An enhanced distutils, providing support for Fortran compilers, for BLAS,
+LAPACK and other common libraries for numerical computing, and more.
+
+Public submodules are::
+
+ misc_util
+ system_info
+ cpu_info
+ log
+ exec_command
+
+For details, please see the *Packaging* and *NumPy Distutils User Guide*
+sections of the NumPy Reference Guide.
+
+For configuring the preference for and location of libraries like BLAS and
+LAPACK, and for setting include paths and similar build options, please see
+``site.cfg.example`` in the root of the NumPy repository or sdist.
+
+"""
+
from __future__ import division, absolute_import, print_function
-from .__version__ import version as __version__
# Must import local ccompiler ASAP in order to get
# customized CCompiler.spawn effective.
from . import ccompiler
from . import unixccompiler
-from .info import __doc__
from .npy_pkg_config import *
# If numpy is installed, add distutils.test()
c.customize()
return c
-def customized_ccompiler(plat=None, compiler=None):
- c = ccompiler.new_compiler(plat=plat, compiler=compiler)
+def customized_ccompiler(plat=None, compiler=None, verbose=1):
+ c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose)
c.customize('')
return c
+++ /dev/null
-from __future__ import division, absolute_import, print_function
-
-major = 0
-minor = 4
-micro = 0
-version = '%(major)d.%(minor)d.%(micro)d' % (locals())
display = ' '.join(list(display))
log.info(display)
try:
- subprocess.check_output(cmd)
+ if self.verbose:
+ subprocess.check_output(cmd)
+ else:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
o = exc.output
s = exc.returncode
if is_sequence(cmd):
cmd = ' '.join(list(cmd))
- forward_bytes_to_stdout(o)
+ if self.verbose:
+ forward_bytes_to_stdout(o)
if re.search(b'Too many open files', o):
msg = '\nTry rerunning setup command until build succeeds.'
'g++' in self.compiler[0] or
'clang' in self.compiler[0]):
self._auto_depends = True
+ if 'gcc' in self.compiler[0]:
+ # add std=c99 flag for gcc
+ # TODO: does this need to be more specific?
+ self.compiler.append('-std=c99')
+ self.compiler_so.append('-std=c99')
elif os.name == 'posix':
import tempfile
import shutil
_distutils_new_compiler = new_compiler
def new_compiler (plat=None,
compiler=None,
- verbose=0,
+ verbose=None,
dry_run=0,
force=0):
# Try first C compilers from numpy.distutils.
+ if verbose is None:
+ verbose = log.get_threshold() <= log.INFO
if plat is None:
plat = os.name
try:
raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name))
compiler = klass(None, dry_run, force)
+ compiler.verbose = verbose
log.debug('new_compiler returns %s' % (klass))
return compiler
user_options = old_build.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
- ('parallel=', 'j',
- "number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
]
help_options = old_build.help_options + [
def initialize_options(self):
old_build.initialize_options(self)
self.fcompiler = None
- self.parallel = None
+ self.warn_error = False
def finalize_options(self):
- if self.parallel:
- try:
- self.parallel = int(self.parallel)
- except ValueError:
- raise ValueError("--parallel/-j argument must be an integer")
build_scripts = self.build_scripts
old_build.finalize_options(self)
- plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
+ plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
if build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts' + plat_specifier)
('inplace', 'i', 'Build in-place'),
('parallel=', 'j',
"number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
]
- boolean_options = old_build_clib.boolean_options + ['inplace']
+ boolean_options = old_build_clib.boolean_options + ['inplace', 'warn-error']
def initialize_options(self):
old_build_clib.initialize_options(self)
self.fcompiler = None
self.inplace = 0
self.parallel = None
+ self.warn_error = None
def finalize_options(self):
if self.parallel:
except ValueError:
raise ValueError("--parallel/-j argument must be an integer")
old_build_clib.finalize_options(self)
- self.set_undefined_options('build', ('parallel', 'parallel'))
+ self.set_undefined_options('build',
+ ('parallel', 'parallel'),
+ ('warn_error', 'warn_error'),
+ )
def have_f_sources(self):
for (lib_name, build_info) in self.libraries:
self.compiler.customize(self.distribution,
need_cxx=self.have_cxx_sources())
+ if self.warn_error:
+ self.compiler.compiler.append('-Werror')
+ self.compiler.compiler_so.append('-Werror')
+
libraries = self.libraries
self.libraries = None
self.compiler.customize_cmd(self)
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
]
help_options = old_build_ext.help_options + [
show_fortran_compilers),
]
+ boolean_options = old_build_ext.boolean_options + ['warn-error']
+
def initialize_options(self):
old_build_ext.initialize_options(self)
self.fcompiler = None
self.parallel = None
+ self.warn_error = None
def finalize_options(self):
if self.parallel:
self.include_dirs.extend(incl_dirs)
old_build_ext.finalize_options(self)
- self.set_undefined_options('build', ('parallel', 'parallel'))
+ self.set_undefined_options('build',
+ ('parallel', 'parallel'),
+ ('warn_error', 'warn_error'),
+ )
def run(self):
if not self.extensions:
force=self.force)
self.compiler.customize(self.distribution)
self.compiler.customize_cmd(self)
+
+ if self.warn_error:
+ self.compiler.compiler.append('-Werror')
+ self.compiler.compiler_so.append('-Werror')
+
self.compiler.show_customization()
# Setup directory for storing generated extra DLL files on Windows
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
+ ('verbose-cfg', None,
+ "change logging level from WARN to INFO which will show all " +
+ "compiler output")
]
- boolean_options = ['force', 'inplace']
+ boolean_options = ['force', 'inplace', 'verbose-cfg']
help_options = []
self.swig_opts = None
self.swig_cpp = None
self.swig = None
+ self.verbose_cfg = None
def finalize_options(self):
self.set_undefined_options('build',
self.data_files = self.distribution.data_files or []
if self.build_src is None:
- plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
+ plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
self.build_src = os.path.join(self.build_base, 'src'+plat_specifier)
# py_modules_dict is used in build_py.find_package_modules
# incl_dirs = extension.include_dirs
#if self.build_src not in incl_dirs:
# incl_dirs.append(self.build_src)
- build_dir = os.path.join(*([self.build_src]\
+ build_dir = os.path.join(*([self.build_src]
+name.split('.')[:-1]))
self.mkpath(build_dir)
+
+ if self.verbose_cfg:
+ new_level = log.INFO
+ else:
+ new_level = log.WARN
+ old_level = log.set_threshold(new_level)
+
for func in func_sources:
source = func(extension, build_dir)
if not source:
else:
log.info(" adding '%s' to sources." % (source,))
new_sources.append(source)
-
+ log.set_threshold(old_level)
return new_sources
def filter_py_files(self, sources):
if is_sequence(extension):
name = extension[0]
else: name = extension.name
- target_dir = os.path.join(*([self.build_src]\
+ target_dir = os.path.join(*([self.build_src]
+name.split('.')[:-1]))
target_file = os.path.join(target_dir, ext_name + 'module.c')
new_sources.append(target_file)
of the program and its output.
"""
# 2008-11-16, RemoveMe
- warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
- "Usage of get_output is deprecated: please do not \n" \
- "use it anymore, and avoid configuration checks \n" \
- "involving running executable on the target machine.\n" \
+ warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n"
+ "Usage of get_output is deprecated: please do not \n"
+ "use it anymore, and avoid configuration checks \n"
+ "involving running executable on the target machine.\n"
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning, stacklevel=2)
self._check_compiler()
return self.is_PentiumIV() and self.has_sse3()
def _is_Nocona(self):
- return self.is_Intel() \
- and (self.info[0]['cpu family'] == '6' \
- or self.info[0]['cpu family'] == '15' ) \
- and (self.has_sse3() and not self.has_ssse3())\
- and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None
+ return (self.is_Intel()
+ and (self.info[0]['cpu family'] == '6'
+ or self.info[0]['cpu family'] == '15')
+ and (self.has_sse3() and not self.has_ssse3())
+ and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None)
def _is_Core2(self):
- return self.is_64bit() and self.is_Intel() and \
- re.match(r'.*?Core\(TM\)2\b', \
- self.info[0]['model name']) is not None
+ return (self.is_64bit() and self.is_Intel() and
+ re.match(r'.*?Core\(TM\)2\b',
+ self.info[0]['model name']) is not None)
def _is_Itanium(self):
return re.match(r'.*?Itanium\b',
def _has_sse(self):
if self.is_Intel():
- return (self.info[0]['Family']==6 and \
- self.info[0]['Model'] in [7, 8, 9, 10, 11]) \
- or self.info[0]['Family']==15
+ return ((self.info[0]['Family']==6 and
+ self.info[0]['Model'] in [7, 8, 9, 10, 11])
+ or self.info[0]['Family']==15)
elif self.is_AMD():
- return (self.info[0]['Family']==6 and \
- self.info[0]['Model'] in [6, 7, 8, 10]) \
- or self.info[0]['Family']==15
+ return ((self.info[0]['Family']==6 and
+ self.info[0]['Model'] in [6, 7, 8, 10])
+ or self.info[0]['Family']==15)
else:
return False
# 2019-01-30, 1.17
warnings.warn('exec_command is deprecated since NumPy v1.17, use '
'subprocess.Popen instead', DeprecationWarning, stacklevel=1)
- log.debug('exec_command(%r,%s)' % (command,\
+ log.debug('exec_command(%r,%s)' % (command,
','.join(['%s=%r'%kv for kv in env.items()])))
if use_tee is None:
cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
+
class Extension(old_Extension):
- def __init__ (
+ """
+ Parameters
+ ----------
+ name : str
+ Extension name.
+ sources : list of str
+ List of source file locations relative to the top directory of
+ the package.
+ extra_compile_args : list of str
+ Extra command line arguments to pass to the compiler.
+ extra_f77_compile_args : list of str
+ Extra command line arguments to pass to the fortran77 compiler.
+ extra_f90_compile_args : list of str
+ Extra command line arguments to pass to the fortran90 compiler.
+ """
+ def __init__(
self, name, sources,
include_dirs=None,
define_macros=None,
raise e
except ValueError:
e = get_exception()
- if not "path']" in str(e):
+ if not "'path'" in str(e):
print("Unexpected ValueError in", __file__)
raise e
if envvar_contents is not None:
envvar_contents = convert(envvar_contents)
if var and append:
- if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':
+ if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1':
var.extend(envvar_contents)
else:
+ # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0
+ # to keep old (overwrite flags rather than append to
+ # them) behavior
var = envvar_contents
- if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys():
- msg = "{} is used as is, not appended ".format(envvar) + \
- "to flags already defined " + \
- "by numpy.distutils! Use NPY_DISTUTILS_APPEND_FLAGS=1 " + \
- "to obtain appending behavior instead (this " + \
- "behavior will become default in a future release)."
- warnings.warn(msg, UserWarning, stacklevel=3)
else:
var = envvar_contents
if confvar is not None and self._conf:
+++ /dev/null
-"""
-Enhanced distutils with Fortran compilers support and more.
-"""
-from __future__ import division, absolute_import, print_function
-
-postpone_import = True
' %s to %s' % (prev_level, level))
return prev_level
+def get_threshold():
+ return _global_log.threshold
def set_verbosity(v, force=False):
prev_level = _global_log.threshold
print(message)
def warn(self, message):
- sys.stderr.write('Warning: %s' % (message,))
+ sys.stderr.write('Warning: %s\n' % (message,))
def set_options(self, **options):
"""
and will be installed as foo.ini in the 'lib' subpath.
+ When cross-compiling with numpy distutils, it might be necessary to
+ use modified npy-pkg-config files. Using the default/generated files
+ will link with the host libraries (i.e. libnpymath.a). For
+ cross-compilation you of-course need to link with target libraries,
+ while using the host Python installation.
+
+ You can copy out the numpy/core/lib/npy-pkg-config directory, add a
+ pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment
+ variable to point to the directory with the modified npy-pkg-config
+ files.
+
+ Example npymath.ini modified for cross-compilation::
+
+ [meta]
+ Name=npymath
+ Description=Portable, core math library implementing C99 standard
+ Version=0.1
+
+ [variables]
+ pkgname=numpy.core
+ pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
+ prefix=${pkgdir}
+ libdir=${prefix}/lib
+ includedir=${prefix}/include
+
+ [default]
+ Libs=-L${libdir} -lnpymath
+ Cflags=-I${includedir}
+ Requires=mlib
+
+ [msvc]
+ Libs=/LIBPATH:${libdir} npymath.lib
+ Cflags=/INCLUDE:${includedir}
+ Requires=mlib
+
"""
if subst_dict is None:
subst_dict = {}
return include_dirs
def get_npy_pkg_dir():
- """Return the path where to find the npy-pkg-config directory."""
+ """Return the path where to find the npy-pkg-config directory.
+
+ If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that
+ is returned. Otherwise, a path inside the location of the numpy module is
+ returned.
+
+ The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining
+ customized npy-pkg-config .ini files for the cross-compilation
+ environment, and using them when cross-compiling.
+
+ """
# XXX: import here for bootstrapping reasons
import numpy
+ d = os.environ.get('NPY_PKG_CONFIG_PATH')
+ if d is not None:
+ return d
d = os.path.join(os.path.dirname(numpy.__file__),
'core', 'lib', 'npy-pkg-config')
return d
if options.define_variable:
m = re.search(r'([\S]+)=([\S]+)', options.define_variable)
if not m:
- raise ValueError("--define-variable option should be of " \
+ raise ValueError("--define-variable option should be of "
"the form --define-variable=foo=bar")
else:
name = m.group(1)
blas_info
lapack_info
openblas_info
+ openblas64__info
+ openblas_ilp64_info
blis_info
blas_opt_info # usage recommended
lapack_opt_info # usage recommended
+ blas_ilp64_opt_info # usage recommended (general ILP64 BLAS)
+ lapack_ilp64_opt_info # usage recommended (general ILP64 LAPACK)
+ blas_ilp64_plain_opt_info # usage recommended (general ILP64 BLAS, no symbol suffix)
+ lapack_ilp64_plain_opt_info # usage recommended (general ILP64 LAPACK, no symbol suffix)
+ blas64__opt_info # usage recommended (general ILP64 BLAS, 64_ symbol suffix)
+ lapack64__opt_info # usage recommended (general ILP64 LAPACK, 64_ symbol suffix)
fftw_info,dfftw_info,sfftw_info
fftw_threads_info,dfftw_threads_info,sfftw_threads_info
djbfft_info
from distutils.errors import DistutilsError
from distutils.dist import Distribution
import distutils.sysconfig
-from distutils import log
+from numpy.distutils import log
from distutils.util import get_platform
from numpy.distutils.exec_command import (
get_shared_lib_extension)
from numpy.distutils.command.config import config as cmd_config
from numpy.distutils.compat import get_exception
-from numpy.distutils import customized_ccompiler
+from numpy.distutils import customized_ccompiler as _customized_ccompiler
from numpy.distutils import _shell_utils
import distutils.ccompiler
import tempfile
platform_bits = _bits[platform.architecture()[0]]
+global_compiler = None
+
+def customized_ccompiler():
+ global global_compiler
+ if not global_compiler:
+ global_compiler = _customized_ccompiler()
+ return global_compiler
+
+
def _c_string_literal(s):
"""
Convert a python string into a literal suitable for inclusion into C code
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
'accelerate': accelerate_info, # use blas_opt instead
+ 'openblas64_': openblas64__info,
+ 'openblas64__lapack': openblas64__lapack_info,
+ 'openblas_ilp64': openblas_ilp64_info,
+ 'openblas_ilp64_lapack': openblas_ilp64_lapack_info,
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
'numarray': numarray_info,
'numerix': numerix_info,
'lapack_opt': lapack_opt_info,
+ 'lapack_ilp64_opt': lapack_ilp64_opt_info,
+ 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info,
+ 'lapack64__opt': lapack64__opt_info,
'blas_opt': blas_opt_info,
+ 'blas_ilp64_opt': blas_ilp64_opt_info,
+ 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info,
+ 'blas64__opt': blas64__opt_info,
'boost_python': boost_python_info,
'agg2': agg2_info,
'wx': wx_info,
class AtlasNotFoundError(NotFoundError):
"""
- Atlas (http://math-atlas.sourceforge.net/) libraries not found.
+ Atlas (http://github.com/math-atlas/math-atlas) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [atlas]) or by setting
the ATLAS environment variable."""
the LAPACK_SRC environment variable."""
+class LapackILP64NotFoundError(NotFoundError):
+ """
+ 64-bit Lapack libraries not found.
+ Known libraries in numpy/distutils/site.cfg file are:
+ openblas64_, openblas_ilp64
+ """
+
class BlasOptNotFoundError(NotFoundError):
"""
Optimized (vendor) Blas libraries are not found.
numpy/distutils/site.cfg file (section [blas]) or by setting
the BLAS environment variable."""
+class BlasILP64NotFoundError(NotFoundError):
+ """
+ 64-bit Blas libraries not found.
+ Known libraries in numpy/distutils/site.cfg file are:
+ openblas64_, openblas_ilp64
+ """
class BlasSrcNotFoundError(BlasNotFoundError):
"""
dir_env_var = None
search_static_first = 0 # XXX: disabled by default, may disappear in
# future unless it is proved to be useful.
- verbosity = 1
saved_results = {}
notfounderror = NotFoundError
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
- verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
log.info(' FOUND:')
res = self.saved_results.get(self.__class__.__name__)
- if self.verbosity > 0 and flag:
+ if log.get_threshold() <= log.INFO and flag:
for k, v in res.items():
v = str(v)
if k in ['sources', 'libraries'] and len(v) > 270:
"""Return a list of existing paths composed by all combinations
of items from the arguments.
"""
- return combine_paths(*args, **{'verbosity': self.verbosity})
+ return combine_paths(*args)
class fft_opt_info(system_info):
try:
s, o = c.get_output(atlas_version_c_text,
libraries=libraries, library_dirs=library_dirs,
- use_tee=(system_info.verbosity > 0))
+ )
if s and re.search(r'undefined reference to `_gfortran', o, re.M):
s, o = c.get_output(atlas_version_c_text,
libraries=libraries + ['gfortran'],
library_dirs=library_dirs,
- use_tee=(system_info.verbosity > 0))
+ )
if not s:
warnings.warn(textwrap.dedent("""
*****************************************************
log.info('Status: %d', s)
log.info('Output: %s', o)
- if atlas_version == '3.2.1_pre3.3.6':
+ elif atlas_version == '3.2.1_pre3.3.6':
dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
else:
dict_append(info, define_macros=[(
class lapack_opt_info(system_info):
-
notfounderror = LapackNotFoundError
- # Default order of LAPACK checks
+ # List of all known BLAS libraries, in the default order
lapack_order = ['mkl', 'openblas', 'flame', 'atlas', 'accelerate', 'lapack']
+ order_env_var_name = 'NPY_LAPACK_ORDER'
def _calc_info_mkl(self):
info = get_info('lapack_mkl')
return True
return False
+ def _calc_info(self, name):
+ return getattr(self, '_calc_info_{}'.format(name))()
+
def calc_info(self):
- user_order = os.environ.get('NPY_LAPACK_ORDER', None)
+ user_order = os.environ.get(self.order_env_var_name, None)
if user_order is None:
lapack_order = self.lapack_order
else:
"values: {}".format(non_existing))
for lapack in lapack_order:
- if getattr(self, '_calc_info_{}'.format(lapack))():
+ if self._calc_info(lapack):
return
if 'lapack' not in lapack_order:
warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2)
-class blas_opt_info(system_info):
+class _ilp64_opt_info_mixin:
+ symbol_suffix = None
+ symbol_prefix = None
+
+ def _check_info(self, info):
+ macros = dict(info.get('define_macros', []))
+ prefix = macros.get('BLAS_SYMBOL_PREFIX', '')
+ suffix = macros.get('BLAS_SYMBOL_SUFFIX', '')
+
+ if self.symbol_prefix not in (None, prefix):
+ return False
+
+ if self.symbol_suffix not in (None, suffix):
+ return False
+
+ return bool(info)
+
+
+class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin):
+ notfounderror = LapackILP64NotFoundError
+ lapack_order = ['openblas64_', 'openblas_ilp64']
+ order_env_var_name = 'NPY_LAPACK_ILP64_ORDER'
+
+ def _calc_info(self, name):
+ info = get_info(name + '_lapack')
+ if self._check_info(info):
+ self.set_info(**info)
+ return True
+ return False
+
+
+class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info):
+ # Same as lapack_ilp64_opt_info, but fix symbol names
+ symbol_prefix = ''
+ symbol_suffix = ''
+
+
+class lapack64__opt_info(lapack_ilp64_opt_info):
+ symbol_prefix = ''
+ symbol_suffix = '64_'
+
+class blas_opt_info(system_info):
notfounderror = BlasNotFoundError
- # Default order of BLAS checks
+ # List of all known BLAS libraries, in the default order
blas_order = ['mkl', 'blis', 'openblas', 'atlas', 'accelerate', 'blas']
+ order_env_var_name = 'NPY_BLAS_ORDER'
def _calc_info_mkl(self):
info = get_info('blas_mkl')
self.set_info(**info)
return True
+ def _calc_info(self, name):
+ return getattr(self, '_calc_info_{}'.format(name))()
+
def calc_info(self):
- user_order = os.environ.get('NPY_BLAS_ORDER', None)
+ user_order = os.environ.get(self.order_env_var_name, None)
if user_order is None:
blas_order = self.blas_order
else:
raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(non_existing))
for blas in blas_order:
- if getattr(self, '_calc_info_{}'.format(blas))():
+ if self._calc_info(blas):
return
if 'blas' not in blas_order:
warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2)
+class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin):
+ notfounderror = BlasILP64NotFoundError
+ blas_order = ['openblas64_', 'openblas_ilp64']
+ order_env_var_name = 'NPY_BLAS_ILP64_ORDER'
+
+ def _calc_info(self, name):
+ info = get_info(name)
+ if self._check_info(info):
+ self.set_info(**info)
+ return True
+ return False
+
+
+class blas_ilp64_plain_opt_info(blas_ilp64_opt_info):
+ symbol_prefix = ''
+ symbol_suffix = ''
+
+
+class blas64__opt_info(blas_ilp64_opt_info):
+ symbol_prefix = ''
+ symbol_suffix = '64_'
+
+
class blas_info(system_info):
section = 'blas'
dir_env_var = 'BLAS'
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
return libs
- # This breaks the for loop
- break
except distutils.ccompiler.LinkError:
pass
finally:
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
+ _require_symbols = []
notfounderror = BlasNotFoundError
- def check_embedded_lapack(self, info):
- return True
+ @property
+ def symbol_prefix(self):
+ try:
+ return self.cp.get(self.section, 'symbol_prefix')
+ except NoOptionError:
+ return ''
- def calc_info(self):
+ @property
+ def symbol_suffix(self):
+ try:
+ return self.cp.get(self.section, 'symbol_suffix')
+ except NoOptionError:
+ return ''
+
+ def _calc_info(self):
c = customized_ccompiler()
lib_dirs = self.get_lib_dirs()
# Try gfortran-compatible library files
info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs)
# Skip lapack check, we'd need build_ext to do it
- assume_lapack = True
+ skip_symbol_check = True
elif info:
- assume_lapack = False
+ skip_symbol_check = False
info['language'] = 'c'
if info is None:
- return
+ return None
# Add extra info for OpenBLAS
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
- if not (assume_lapack or self.check_embedded_lapack(info)):
- return
+ if not (skip_symbol_check or self.check_symbols(info)):
+ return None
info['define_macros'] = [('HAVE_CBLAS', None)]
- self.set_info(**info)
+ if self.symbol_prefix:
+ info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)]
+ if self.symbol_suffix:
+ info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)]
+
+ return info
+
+ def calc_info(self):
+ info = self._calc_info()
+ if info is not None:
+ self.set_info(**info)
def check_msvc_gfortran_libs(self, library_dirs, libraries):
# First, find the full path to each library directory
return info
-class openblas_lapack_info(openblas_info):
- section = 'openblas'
- dir_env_var = 'OPENBLAS'
- _lib_names = ['openblas']
- notfounderror = BlasNotFoundError
-
- def check_embedded_lapack(self, info):
+ def check_symbols(self, info):
res = False
c = customized_ccompiler()
tmpdir = tempfile.mkdtemp()
+
+ prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix,
+ symbol_name,
+ self.symbol_suffix)
+ for symbol_name in self._require_symbols)
+ calls = "\n".join("%s%s%s();" % (self.symbol_prefix,
+ symbol_name,
+ self.symbol_suffix)
+ for symbol_name in self._require_symbols)
s = textwrap.dedent("""\
- void zungqr_();
+ %(prototypes)s
int main(int argc, const char *argv[])
{
- zungqr_();
+ %(calls)s
return 0;
- }""")
+ }""") % dict(prototypes=prototypes, calls=calls)
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
shutil.rmtree(tmpdir)
return res
+class openblas_lapack_info(openblas_info):
+ section = 'openblas'
+ dir_env_var = 'OPENBLAS'
+ _lib_names = ['openblas']
+ _require_symbols = ['zungqr_']
+ notfounderror = BlasNotFoundError
+
class openblas_clapack_info(openblas_lapack_info):
_lib_names = ['openblas', 'lapack']
+class openblas_ilp64_info(openblas_info):
+ section = 'openblas_ilp64'
+ dir_env_var = 'OPENBLAS_ILP64'
+ _lib_names = ['openblas64']
+ _require_symbols = ['dgemm_', 'cblas_dgemm']
+ notfounderror = BlasILP64NotFoundError
+
+ def _calc_info(self):
+ info = super()._calc_info()
+ if info is not None:
+ info['define_macros'] += [('HAVE_BLAS_ILP64', None)]
+ return info
+
+class openblas_ilp64_lapack_info(openblas_ilp64_info):
+ _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr']
+
+ def _calc_info(self):
+ info = super()._calc_info()
+ if info:
+ info['define_macros'] += [('HAVE_LAPACKE', None)]
+ return info
+
+class openblas64__info(openblas_ilp64_info):
+ # ILP64 Openblas, with default symbol suffix
+ section = 'openblas64_'
+ dir_env_var = 'OPENBLAS64_'
+ _lib_names = ['openblas64_']
+ symbol_suffix = '64_'
+ symbol_prefix = ''
+
+class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info):
+ pass
+
class blis_info(blas_info):
section = 'blis'
dir_env_var = 'BLIS'
else:
assert_(new_flags == prev_flags + [new_flag])
-
-def test_fcompiler_flags_append_warning(monkeypatch):
- # Test to check that the warning for append behavior changing in future
- # is triggered. Need to use a real compiler instance so that we have
- # non-empty flags to start with (otherwise the "if var and append" check
- # will always be false).
- try:
- with suppress_warnings() as sup:
- sup.record()
- fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
- fc.customize()
- except numpy.distutils.fcompiler.CompilerNotFound:
- pytest.skip("gfortran not found, so can't execute this test")
-
- # Ensure NPY_DISTUTILS_APPEND_FLAGS not defined
- monkeypatch.delenv('NPY_DISTUTILS_APPEND_FLAGS', raising=False)
-
- for opt, envvar in customizable_flags:
- new_flag = '-dummy-{}-flag'.format(opt)
- with suppress_warnings() as sup:
- sup.record()
- prev_flags = getattr(fc.flag_vars, opt)
-
- monkeypatch.setenv(envvar, new_flag)
- with suppress_warnings() as sup:
- sup.record()
- new_flags = getattr(fc.flag_vars, opt)
- if prev_flags:
- # Check that warning was issued
- assert len(sup.log) == 1
-
- monkeypatch.delenv(envvar)
- assert_(new_flags == [new_flag])
-
- C type
- Description
- * - `np.bool`
+ * - `np.bool_`
- ``bool``
- Boolean (True or False) stored as a byte
minimum or maximum values of NumPy integer and floating point values
respectively ::
- >>> np.iinfo(np.int) # Bounds of the default integer on this system.
+ >>> np.iinfo(int) # Bounds of the default integer on this system.
iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
>>> np.iinfo(np.int32) # Bounds of a 32-bit integer
iinfo(min=-2147483648, max=2147483647, dtype=int32)
``np.clongdouble`` for the complex numbers). You can find out what your
numpy provides with ``np.finfo(np.longdouble)``.
-NumPy does not provide a dtype with more precision than C
-``long double``\\s; in particular, the 128-bit IEEE quad precision
+NumPy does not provide a dtype with more precision than C's
+``long double``\\; in particular, the 128-bit IEEE quad precision
data type (FORTRAN's ``REAL*16``\\) is not available.
For efficient memory alignment, ``np.longdouble`` is usually stored
operation into an array with the same shape as ``a``. The new elements in
``b`` are simply copies of the original scalar. The stretching analogy is
only conceptual. NumPy is smart enough to use the original scalar value
-without actually making copies, so that broadcasting operations are as
+without actually making copies so that broadcasting operations are as
memory and computationally efficient as possible.
The code in the second example is more efficient than that in the first
General Broadcasting Rules
==========================
When operating on two arrays, NumPy compares their shapes element-wise.
-It starts with the trailing dimensions, and works its way forward. Two
+It starts with the trailing dimensions and works its way forward. Two
dimensions are compatible when
1) they are equal, or
If these conditions are not met, a
``ValueError: operands could not be broadcast together`` exception is
thrown, indicating that the arrays have incompatible shapes. The size of
-the resulting array is the maximum size along each dimension of the input
-arrays.
+the resulting array is the size that is not 1 along each axis of the inputs.
Arrays do not need to have the same *number* of dimensions. For example,
if you have a ``256x256x3`` array of RGB values, and you want to scale
- ``inputs``, which could be a mixture of different types
- ``kwargs``, keyword arguments passed to the function
-For this example we will only handle the method ``'__call__``.
+For this example we will only handle the method ``__call__``.
>>> from numbers import Number
>>> class DiagonalArray:
calls ``numpy.sum(self)``, and the same for ``mean``.
>>> @implements(np.sum)
-... def sum(a):
+... def sum(arr):
... "Implementation of np.sum for DiagonalArray objects"
... return arr._i * arr._N
...
def __new__(cls, *args):
print('Cls in __new__:', cls)
print('Args in __new__:', args)
- return object.__new__(cls, *args)
+ # The `object` type __new__ method takes a single argument.
+ return object.__new__(cls)
def __init__(self, *args):
print('type(self) in __init__:', type(self))
>>> np.array([0,2,3,4]) + np.array([1,1,-1,2])
array([1, 3, 2, 6])
-The unfunc module lists all the available ufuncs in numpy. Documentation on
+The ufunc module lists all the available ufuncs in numpy. Documentation on
the specific ufuncs may be found in those modules. This documentation is
-intended to address the more general aspects of unfuncs common to most of
+intended to address the more general aspects of ufuncs common to most of
them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.)
have equivalent functions defined (e.g. add() for +)
output = ''
else:
status = 0
+ output = output.decode()
if verbose:
print(output)
finally:
'character': {'': 'string'}
}
-if os.path.isfile('.f2py_f2cmap'):
+f2cmap_default = copy.deepcopy(f2cmap_all)
+
+
+def load_f2cmap_file(f2cmap_file):
+ global f2cmap_all
+
+ f2cmap_all = copy.deepcopy(f2cmap_default)
+
+ if f2cmap_file is None:
+ # Default value
+ f2cmap_file = '.f2py_f2cmap'
+ if not os.path.isfile(f2cmap_file):
+ return
+
# User defined additions to f2cmap_all.
- # .f2py_f2cmap must contain a dictionary of dictionaries, only. For
+ # f2cmap_file must contain a dictionary of dictionaries, only. For
# example, {'real':{'low':'float'}} means that Fortran 'real(low)' is
# interpreted as C 'float'. This feature is useful for F90/95 users if
# they use PARAMETERSs in type specifications.
try:
- outmess('Reading .f2py_f2cmap ...\n')
- f = open('.f2py_f2cmap', 'r')
- d = eval(f.read(), {}, {})
- f.close()
+ outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file))
+ with open(f2cmap_file, 'r') as f:
+ d = eval(f.read(), {}, {})
for k, d1 in list(d.items()):
for k1 in list(d1.keys()):
d1[k1.lower()] = d1[k1]
else:
errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" % (
k, k1, d[k][k1], d[k][k1], list(c2py_map.keys())))
- outmess('Successfully applied user defined changes from .f2py_f2cmap\n')
+ outmess('Successfully applied user defined f2cmap changes\n')
except Exception as msg:
errmess(
- 'Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg))
+ 'Failed to apply user defined f2cmap changes: %s. Skipping.\n' % (msg))
cformat_map = {'double': '%g',
'float': '%g',
'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
cppmacros['OLDPYNUM'] = """\
#ifdef OLDPYNUM
-#error You need to install Numeric Python version 13 or higher. Get it from http:/sourceforge.net/project/?group_id=1369
+#error You need to install NumPy version 13 or higher. See https://scipy.org/install.html
#endif
"""
################# C functions ###############
CFUNCSMESS(\"create_cb_arglist\\n\");
tot=opt=ext=siz=0;
/* Get the total number of arguments */
- if (PyFunction_Check(fun))
+ if (PyFunction_Check(fun)) {
tmp_fun = fun;
+ Py_INCREF(tmp_fun);
+ }
else {
di = 1;
if (PyObject_HasAttrString(fun,\"im_func\")) {
tmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
else {
tmp_fun = fun; /* built-in function */
+ Py_INCREF(tmp_fun);
tot = maxnofargs;
if (xa != NULL)
tot += PyTuple_Size((PyObject *)xa);
if (xa != NULL)
tot += PyTuple_Size((PyObject *)xa);
tmp_fun = fun;
+ Py_INCREF(tmp_fun);
}
else if (F2PyCapsule_Check(fun)) {
tot = maxnofargs;
goto capi_fail;
}
tmp_fun = fun;
+ Py_INCREF(tmp_fun);
}
}
if (tmp_fun==NULL) {
}
#if PY_VERSION_HEX >= 0x03000000
if (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
- if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\"))
+ if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) {
#else
if (PyObject_HasAttrString(tmp_fun,\"func_code\")) {
- if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\"))
+ if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) {
#endif
- tot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di;
- Py_XDECREF(tmp);
+ PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\");
+ Py_DECREF(tmp);
+ if (tmp_argcount == NULL) {
+ goto capi_fail;
+ }
+ tot = PyInt_AsLong(tmp_argcount) - di;
+ Py_DECREF(tmp_argcount);
+ }
}
/* Get the number of optional arguments */
#if PY_VERSION_HEX >= 0x03000000
PyTuple_SET_ITEM(*args,i,tmp);
}
CFUNCSMESS(\"create_cb_arglist-end\\n\");
+ Py_DECREF(tmp_fun);
return 1;
capi_fail:
if ((PyErr_Occurred())==NULL)
PyErr_SetString(#modulename#_error,errmess);
+ Py_XDECREF(tmp_fun);
return 0;
}
"""
cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'
% (F_FUNC, lower_name, name.upper(), name))
cadd('}\n')
- iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % (
- name, name, name))
+ iadd('\ttmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' % (name, name))
+ iadd('\tF2PyDict_SetItemString(d, \"%s\", tmp);' % name)
+ iadd('\tPy_DECREF(tmp);')
tname = name.replace('_', '\\_')
dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname))
dadd('\\begin{description}')
from . import cfuncs
from . import f90mod_rules
from . import __version__
+from . import capi_maps
f2py_version = __version__.version
errmess = sys.stderr.write
--link-<resource> switch below. [..] is optional list
of resources names. E.g. try 'f2py --help-link lapack_opt'.
+ --f2cmap <filename> Load Fortran-to-Python KIND specification from the given
+ file. Default: .f2py_f2cmap in current directory.
+
--quiet Run quietly.
--verbose Run with extra verbosity.
-v Print f2py version ID and exit.
def scaninputline(inputline):
files, skipfuncs, onlyfuncs, debug = [], [], [], []
- f, f2, f3, f5, f6, f7, f8, f9 = 1, 0, 0, 0, 0, 0, 0, 0
+ f, f2, f3, f5, f6, f7, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0, 0
verbose = 1
dolc = -1
dolatexdoc = 0
f8 = 1
elif l == '--f2py-wrapper-output':
f9 = 1
+ elif l == '--f2cmap':
+ f10 = 1
elif l == '--overwrite-signature':
options['h-overwrite'] = 1
elif l == '-h':
elif f9:
f9 = 0
options["f2py_wrapper_output"] = l
+ elif f10:
+ f10 = 0
+ options["f2cmap_file"] = l
elif f == 1:
try:
with open(l):
options['wrapfuncs'] = wrapfuncs
options['buildpath'] = buildpath
options['include_paths'] = include_paths
+ options.setdefault('f2cmap_file', None)
return files, options
fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c')
files, options = scaninputline(comline_list)
auxfuncs.options = options
+ capi_maps.load_f2cmap_file(options['f2cmap_file'])
postlist = callcrackfortran(files, options)
isusedby = {}
for i in range(len(postlist)):
modulename = 'untitled'
sources = sys.argv[1:]
- for optname in ['--include_paths', '--include-paths']:
+ for optname in ['--include_paths', '--include-paths', '--f2cmap']:
if optname in sys.argv:
i = sys.argv.index(optname)
f2py_flags.extend(sys.argv[i:i + 2])
+++ /dev/null
-"""Fortran to Python Interface Generator.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-postpone_import = True
\td = PyModule_GetDict(m);
\ts = PyString_FromString(\"$R""" + """evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
+\tPy_DECREF(s);
#if PY_VERSION_HEX >= 0x03000000
\ts = PyUnicode_FromString(
#else
#endif
\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
\tPyDict_SetItemString(d, \"__doc__\", s);
-\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
\tPy_DECREF(s);
+\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
+\t/*
+\t * Store the error object inside the dict, so that it could get deallocated.
+\t * (in practice, this is a module, so it likely will not and cannot.)
+\t */
+\tPyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error);
+\tPy_DECREF(#modulename#_error);
\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) {
\t\ttmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]);
\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name, tmp);
\tif (! PyErr_Occurred())
\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
-
\treturn RETVAL;
}
#ifdef __cplusplus
{
extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
- PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
+ tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
+ PyObject_SetAttrString(o,"_cpointer", tmp);
+ Py_DECREF(tmp);
#if PY_VERSION_HEX >= 0x03000000
- PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
+ s = PyUnicode_FromString("#name#");
#else
- PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
+ s = PyString_FromString("#name#");
#endif
+ PyObject_SetAttrString(o,"__name__", s);
+ Py_DECREF(s);
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
{
extern void #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
- PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
+ tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
+ PyObject_SetAttrString(o,"_cpointer", tmp);
+ Py_DECREF(tmp);
#if PY_VERSION_HEX >= 0x03000000
- PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
+ s = PyUnicode_FromString("#name#");
#else
- PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
+ s = PyString_FromString("#name#");
#endif
+ PyObject_SetAttrString(o,"__name__", s);
+ Py_DECREF(s);
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
if (#varname#_xa_capi==NULL) {
if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
- if (capi_tmp)
+ if (capi_tmp) {
#varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
- else
+ Py_DECREF(capi_tmp);
+ }
+ else {
#varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
+ }
if (#varname#_xa_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
return NULL;
setup.py for installing F2PY
Usage:
- python setup.py install
+ pip install .
Copyright 2001-2005 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
int i;
PyFortranObject *fp = NULL;
PyObject *v = NULL;
- if (init!=NULL) /* Initialize F90 module objects */
+ if (init!=NULL) { /* Initialize F90 module objects */
(*(init))();
- if ((fp = PyObject_New(PyFortranObject, &PyFortran_Type))==NULL) return NULL;
- if ((fp->dict = PyDict_New())==NULL) return NULL;
+ }
+ fp = PyObject_New(PyFortranObject, &PyFortran_Type);
+ if (fp == NULL) {
+ return NULL;
+ }
+ if ((fp->dict = PyDict_New()) == NULL) {
+ Py_DECREF(fp);
+ return NULL;
+ }
fp->len = 0;
- while (defs[fp->len].name != NULL) fp->len++;
- if (fp->len == 0) goto fail;
+ while (defs[fp->len].name != NULL) {
+ fp->len++;
+ }
+ if (fp->len == 0) {
+ goto fail;
+ }
fp->defs = defs;
- for (i=0;i<fp->len;i++)
+ for (i=0;i<fp->len;i++) {
if (fp->defs[i].rank == -1) { /* Is Fortran routine */
v = PyFortranObject_NewAsAttr(&(fp->defs[i]));
- if (v==NULL) return NULL;
+ if (v==NULL) {
+ goto fail;
+ }
PyDict_SetItemString(fp->dict,fp->defs[i].name,v);
+ Py_XDECREF(v);
} else
if ((fp->defs[i].data)!=NULL) { /* Is Fortran variable or array (not allocatable) */
if (fp->defs[i].type == NPY_STRING) {
fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY,
NULL);
}
- if (v==NULL) return NULL;
+ if (v==NULL) {
+ goto fail;
+ }
PyDict_SetItemString(fp->dict,fp->defs[i].name,v);
+ Py_XDECREF(v);
}
- Py_XDECREF(v);
+ }
return (PyObject *)fp;
fail:
- Py_XDECREF(v);
+ Py_XDECREF(fp);
return NULL;
}
void initfoo() {
int i;
- PyObject *m, *d, *s;
+ PyObject *m, *d, *s, *tmp;
import_array();
m = Py_InitModule("foo", foo_module_methods);
PyDict_SetItemString(d, "__doc__", s);
/* Fortran objects: */
- PyDict_SetItemString(d, "mod", PyFortranObject_New(f2py_mod_def,f2py_init_mod));
- PyDict_SetItemString(d, "foodata", PyFortranObject_New(f2py_foodata_def,f2py_init_foodata));
- for(i=0;f2py_routines_def[i].name!=NULL;i++)
- PyDict_SetItemString(d, f2py_routines_def[i].name,
- PyFortranObject_NewAsAttr(&f2py_routines_def[i]));
+ tmp = PyFortranObject_New(f2py_mod_def,f2py_init_mod);
+ PyDict_SetItemString(d, "mod", tmp);
+ Py_DECREF(tmp);
+ tmp = PyFortranObject_New(f2py_foodata_def,f2py_init_foodata);
+ PyDict_SetItemString(d, "foodata", tmp);
+ Py_DECREF(tmp);
+ for(i=0;f2py_routines_def[i].name!=NULL;i++) {
+ tmp = PyFortranObject_NewAsAttr(&f2py_routines_def[i]);
+ PyDict_SetItemString(d, f2py_routines_def[i].name, tmp);
+ Py_DECREF(tmp);
+ }
Py_DECREF(s);
return NULL;
rank = PySequence_Length(dims_capi);
dims = malloc(rank*sizeof(npy_intp));
- for (i=0;i<rank;++i)
- dims[i] = (npy_intp)PyInt_AsLong(PySequence_GetItem(dims_capi,i));
-
+ for (i=0;i<rank;++i) {
+ PyObject *tmp;
+ tmp = PySequence_GetItem(dims_capi, i);
+ if (tmp == NULL) {
+ goto fail;
+ }
+ dims[i] = (npy_intp)PyInt_AsLong(tmp);
+ Py_DECREF(tmp);
+ if (dims[i] == -1 && PyErr_Occurred()) {
+ goto fail;
+ }
+ }
capi_arr_tmp = array_from_pyobj(type_num,dims,rank,intent|F2PY_INTENT_OUT,arr_capi);
if (capi_arr_tmp == NULL) {
free(dims);
capi_buildvalue = Py_BuildValue("N",capi_arr_tmp);
free(dims);
return capi_buildvalue;
+
+fail:
+ free(dims);
+ return NULL;
}
static char doc_f2py_rout_wrap_attrs[] = "\
PyTuple_SetItem(dimensions,i,PyInt_FromLong(PyArray_DIM(arr,i)));
PyTuple_SetItem(strides,i,PyInt_FromLong(PyArray_STRIDE(arr,i)));
}
- return Py_BuildValue("siOOO(cciii)ii",s,PyArray_NDIM(arr),
+ return Py_BuildValue("siNNO(cciii)ii",s,PyArray_NDIM(arr),
dimensions,strides,
(PyArray_BASE(arr)==NULL?Py_None:PyArray_BASE(arr)),
PyArray_DESCR(arr)->kind,
PyDict_SetItemString(d, "__doc__", s);
wrap_error = PyErr_NewException ("wrap.error", NULL, NULL);
Py_DECREF(s);
- PyDict_SetItemString(d, "F2PY_INTENT_IN", PyInt_FromLong(F2PY_INTENT_IN));
- PyDict_SetItemString(d, "F2PY_INTENT_INOUT", PyInt_FromLong(F2PY_INTENT_INOUT));
- PyDict_SetItemString(d, "F2PY_INTENT_OUT", PyInt_FromLong(F2PY_INTENT_OUT));
- PyDict_SetItemString(d, "F2PY_INTENT_HIDE", PyInt_FromLong(F2PY_INTENT_HIDE));
- PyDict_SetItemString(d, "F2PY_INTENT_CACHE", PyInt_FromLong(F2PY_INTENT_CACHE));
- PyDict_SetItemString(d, "F2PY_INTENT_COPY", PyInt_FromLong(F2PY_INTENT_COPY));
- PyDict_SetItemString(d, "F2PY_INTENT_C", PyInt_FromLong(F2PY_INTENT_C));
- PyDict_SetItemString(d, "F2PY_OPTIONAL", PyInt_FromLong(F2PY_OPTIONAL));
- PyDict_SetItemString(d, "F2PY_INTENT_INPLACE", PyInt_FromLong(F2PY_INTENT_INPLACE));
- PyDict_SetItemString(d, "NPY_BOOL", PyInt_FromLong(NPY_BOOL));
- PyDict_SetItemString(d, "NPY_BYTE", PyInt_FromLong(NPY_BYTE));
- PyDict_SetItemString(d, "NPY_UBYTE", PyInt_FromLong(NPY_UBYTE));
- PyDict_SetItemString(d, "NPY_SHORT", PyInt_FromLong(NPY_SHORT));
- PyDict_SetItemString(d, "NPY_USHORT", PyInt_FromLong(NPY_USHORT));
- PyDict_SetItemString(d, "NPY_INT", PyInt_FromLong(NPY_INT));
- PyDict_SetItemString(d, "NPY_UINT", PyInt_FromLong(NPY_UINT));
- PyDict_SetItemString(d, "NPY_INTP", PyInt_FromLong(NPY_INTP));
- PyDict_SetItemString(d, "NPY_UINTP", PyInt_FromLong(NPY_UINTP));
- PyDict_SetItemString(d, "NPY_LONG", PyInt_FromLong(NPY_LONG));
- PyDict_SetItemString(d, "NPY_ULONG", PyInt_FromLong(NPY_ULONG));
- PyDict_SetItemString(d, "NPY_LONGLONG", PyInt_FromLong(NPY_LONGLONG));
- PyDict_SetItemString(d, "NPY_ULONGLONG", PyInt_FromLong(NPY_ULONGLONG));
- PyDict_SetItemString(d, "NPY_FLOAT", PyInt_FromLong(NPY_FLOAT));
- PyDict_SetItemString(d, "NPY_DOUBLE", PyInt_FromLong(NPY_DOUBLE));
- PyDict_SetItemString(d, "NPY_LONGDOUBLE", PyInt_FromLong(NPY_LONGDOUBLE));
- PyDict_SetItemString(d, "NPY_CFLOAT", PyInt_FromLong(NPY_CFLOAT));
- PyDict_SetItemString(d, "NPY_CDOUBLE", PyInt_FromLong(NPY_CDOUBLE));
- PyDict_SetItemString(d, "NPY_CLONGDOUBLE", PyInt_FromLong(NPY_CLONGDOUBLE));
- PyDict_SetItemString(d, "NPY_OBJECT", PyInt_FromLong(NPY_OBJECT));
- PyDict_SetItemString(d, "NPY_STRING", PyInt_FromLong(NPY_STRING));
- PyDict_SetItemString(d, "NPY_UNICODE", PyInt_FromLong(NPY_UNICODE));
- PyDict_SetItemString(d, "NPY_VOID", PyInt_FromLong(NPY_VOID));
- PyDict_SetItemString(d, "NPY_NTYPES", PyInt_FromLong(NPY_NTYPES));
- PyDict_SetItemString(d, "NPY_NOTYPE", PyInt_FromLong(NPY_NOTYPE));
- PyDict_SetItemString(d, "NPY_USERDEF", PyInt_FromLong(NPY_USERDEF));
-
- PyDict_SetItemString(d, "CONTIGUOUS", PyInt_FromLong(NPY_ARRAY_C_CONTIGUOUS));
- PyDict_SetItemString(d, "FORTRAN", PyInt_FromLong(NPY_ARRAY_F_CONTIGUOUS));
- PyDict_SetItemString(d, "OWNDATA", PyInt_FromLong(NPY_ARRAY_OWNDATA));
- PyDict_SetItemString(d, "FORCECAST", PyInt_FromLong(NPY_ARRAY_FORCECAST));
- PyDict_SetItemString(d, "ENSURECOPY", PyInt_FromLong(NPY_ARRAY_ENSURECOPY));
- PyDict_SetItemString(d, "ENSUREARRAY", PyInt_FromLong(NPY_ARRAY_ENSUREARRAY));
- PyDict_SetItemString(d, "ALIGNED", PyInt_FromLong(NPY_ARRAY_ALIGNED));
- PyDict_SetItemString(d, "WRITEABLE", PyInt_FromLong(NPY_ARRAY_WRITEABLE));
- PyDict_SetItemString(d, "UPDATEIFCOPY", PyInt_FromLong(NPY_ARRAY_UPDATEIFCOPY));
- PyDict_SetItemString(d, "WRITEBACKIFCOPY", PyInt_FromLong(NPY_ARRAY_WRITEBACKIFCOPY));
-
- PyDict_SetItemString(d, "BEHAVED", PyInt_FromLong(NPY_ARRAY_BEHAVED));
- PyDict_SetItemString(d, "BEHAVED_NS", PyInt_FromLong(NPY_ARRAY_BEHAVED_NS));
- PyDict_SetItemString(d, "CARRAY", PyInt_FromLong(NPY_ARRAY_CARRAY));
- PyDict_SetItemString(d, "FARRAY", PyInt_FromLong(NPY_ARRAY_FARRAY));
- PyDict_SetItemString(d, "CARRAY_RO", PyInt_FromLong(NPY_ARRAY_CARRAY_RO));
- PyDict_SetItemString(d, "FARRAY_RO", PyInt_FromLong(NPY_ARRAY_FARRAY_RO));
- PyDict_SetItemString(d, "DEFAULT", PyInt_FromLong(NPY_ARRAY_DEFAULT));
- PyDict_SetItemString(d, "UPDATE_ALL", PyInt_FromLong(NPY_ARRAY_UPDATE_ALL));
+
+#define ADDCONST(NAME, CONST) \
+ s = PyInt_FromLong(CONST); \
+ PyDict_SetItemString(d, NAME, s); \
+ Py_DECREF(s)
+
+ ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN);
+ ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT);
+ ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT);
+ ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE);
+ ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE);
+ ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY);
+ ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C);
+ ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL);
+ ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE);
+ ADDCONST("NPY_BOOL", NPY_BOOL);
+ ADDCONST("NPY_BYTE", NPY_BYTE);
+ ADDCONST("NPY_UBYTE", NPY_UBYTE);
+ ADDCONST("NPY_SHORT", NPY_SHORT);
+ ADDCONST("NPY_USHORT", NPY_USHORT);
+ ADDCONST("NPY_INT", NPY_INT);
+ ADDCONST("NPY_UINT", NPY_UINT);
+ ADDCONST("NPY_INTP", NPY_INTP);
+ ADDCONST("NPY_UINTP", NPY_UINTP);
+ ADDCONST("NPY_LONG", NPY_LONG);
+ ADDCONST("NPY_ULONG", NPY_ULONG);
+ ADDCONST("NPY_LONGLONG", NPY_LONGLONG);
+ ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG);
+ ADDCONST("NPY_FLOAT", NPY_FLOAT);
+ ADDCONST("NPY_DOUBLE", NPY_DOUBLE);
+ ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE);
+ ADDCONST("NPY_CFLOAT", NPY_CFLOAT);
+ ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE);
+ ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE);
+ ADDCONST("NPY_OBJECT", NPY_OBJECT);
+ ADDCONST("NPY_STRING", NPY_STRING);
+ ADDCONST("NPY_UNICODE", NPY_UNICODE);
+ ADDCONST("NPY_VOID", NPY_VOID);
+ ADDCONST("NPY_NTYPES", NPY_NTYPES);
+ ADDCONST("NPY_NOTYPE", NPY_NOTYPE);
+ ADDCONST("NPY_USERDEF", NPY_USERDEF);
+
+ ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS);
+ ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS);
+ ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA);
+ ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST);
+ ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY);
+ ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY);
+ ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED);
+ ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE);
+ ADDCONST("UPDATEIFCOPY", NPY_ARRAY_UPDATEIFCOPY);
+ ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY);
+
+ ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED);
+ ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS);
+ ADDCONST("CARRAY", NPY_ARRAY_CARRAY);
+ ADDCONST("FARRAY", NPY_ARRAY_FARRAY);
+ ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO);
+ ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO);
+ ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT);
+ ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL);
+
+#undef ADDCONST(
if (PyErr_Occurred())
Py_FatalError("can't initialize module wrap");
import os
import pytest
+import tempfile
from numpy.testing import assert_
from . import util
_path('src', 'assumed_shape', 'foo_use.f90'),
_path('src', 'assumed_shape', 'precision.f90'),
_path('src', 'assumed_shape', 'foo_mod.f90'),
+ _path('src', 'assumed_shape', '.f2py_f2cmap'),
]
@pytest.mark.slow
assert_(r == 3, repr(r))
r = self.module.mod.fsum([1, 2])
assert_(r == 3, repr(r))
+
+
+class TestF2cmapOption(TestAssumedShapeSumExample):
+ def setup(self):
+ # Use a custom file name for .f2py_f2cmap
+ self.sources = list(self.sources)
+ f2cmap_src = self.sources.pop(-1)
+
+ self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False)
+ with open(f2cmap_src, 'rb') as f:
+ self.f2cmap_file.write(f.read())
+ self.f2cmap_file.close()
+
+ self.sources.append(self.f2cmap_file.name)
+ self.options = ["--f2cmap", self.f2cmap_file.name]
+
+ super(TestF2cmapOption, self).setup()
+
+ def teardown(self):
+ os.unlink(self.f2cmap_file.name)
@pytest.mark.parametrize(
"extra_args", [['--noopt', '--debug'], '--noopt --debug', '']
)
+@pytest.mark.leaks_references(reason="Imported module seems never deleted.")
def test_f2py_init_compile(extra_args):
# flush through the f2py __init__ compile() function code path as a
# crude test for input handling following migration from
return_check = import_module(modname)
calc_result = return_check.foo()
assert_equal(calc_result, 15)
+ # Removal from sys.modules, is not as such necessary. Even with
+ # removal, the module (dict) stays alive.
+ del sys.modules[modname]
def test_f2py_init_compile_failure():
#
_module_dir = None
+_module_num = 5403
def _cleanup():
def get_temp_module_name():
# Assume single-threaded, and the module dir usable only by this thread
+ global _module_num
d = get_module_dir()
- for j in range(5403, 9999999):
- name = "_test_ext_module_%d" % j
- fn = os.path.join(d, name)
- if name not in sys.modules and not os.path.isfile(fn + '.py'):
- return name
- raise RuntimeError("Failed to create a temporary module name")
+ name = "_test_ext_module_%d" % _module_num
+ _module_num += 1
+ if name in sys.modules:
+ # this should not be possible, but check anyway
+ raise RuntimeError("Temporary module name already in use.")
+ return name
def _memoize(func):
# Copy files
dst_sources = []
+ f2py_sources = []
for fn in source_files:
if not os.path.isfile(fn):
raise RuntimeError("%s is not a file" % fn)
shutil.copyfile(fn, dst)
dst_sources.append(dst)
- fn = os.path.join(os.path.dirname(fn), '.f2py_f2cmap')
- if os.path.isfile(fn):
- dst = os.path.join(d, os.path.basename(fn))
- if not os.path.isfile(dst):
- shutil.copyfile(fn, dst)
+ base, ext = os.path.splitext(dst)
+ if ext in ('.f90', '.f', '.c', '.pyf'):
+ f2py_sources.append(dst)
# Prepare options
if module_name is None:
module_name = get_temp_module_name()
- f2py_opts = ['-c', '-m', module_name] + options + dst_sources
+ f2py_opts = ['-c', '-m', module_name] + options + f2py_sources
if skip:
f2py_opts += ['skip:'] + skip
if only:
""")
code = code % dict(syspath=repr(sys.path))
- with temppath(suffix='.py') as script:
+ tmpdir = tempfile.mkdtemp()
+ try:
+ script = os.path.join(tmpdir, 'setup.py')
+
with open(script, 'w') as f:
f.write(code)
- cmd = [sys.executable, script, 'config']
+ cmd = [sys.executable, 'setup.py', 'config']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
+ stderr=subprocess.STDOUT,
+ cwd=tmpdir)
out, err = p.communicate()
+ finally:
+ shutil.rmtree(tmpdir)
m = re.search(br'COMPILERS:(\d+),(\d+),(\d+)', out)
if m:
It differs from the forward transform by the sign of the exponential
argument and the default normalization by :math:`1/n`.
+Type Promotion
+--------------
+
+`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and
+``complex128`` arrays respectively. For an FFT implementation that does not
+promote input arrays, see `scipy.fftpack`.
+
Normalization
-------------
+
The default normalization has the direct transforms unscaled and the inverse
transforms are scaled by :math:`1/n`. It is possible to obtain unitary
transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
assert_allclose(fft1(x) / np.sqrt(30),
np.fft.fft(x, norm="ortho"), atol=1e-6)
- def test_ifft(self):
+ @pytest.mark.parametrize('norm', (None, 'ortho'))
+ def test_ifft(self, norm):
x = random(30) + 1j*random(30)
- assert_allclose(x, np.fft.ifft(np.fft.fft(x)), atol=1e-6)
assert_allclose(
- x, np.fft.ifft(np.fft.fft(x, norm="ortho"), norm="ortho"),
+ x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm),
atol=1e-6)
+ # Ensure we get the correct error message
+ with pytest.raises(ValueError,
+ match='Invalid number of FFT data points'):
+ np.fft.ifft([], norm=norm)
def test_fft2(self):
x = random((30, 20)) + 1j*random((30, 20))
+"""
+**Note:** almost all functions in the ``numpy.lib`` namespace
+are also present in the main ``numpy`` namespace. Please use the
+functions as ``np.<funcname>`` where possible.
+
+``numpy.lib`` is mostly a space for implementing functions that don't
+belong in core or in another NumPy submodule with a clear purpose
+(e.g. ``random``, ``fft``, ``linalg``, ``ma``).
+
+Most contains basic functions that are used by several submodules and are
+useful to have in the main name-space.
+
+"""
from __future__ import division, absolute_import, print_function
import math
-from .info import __doc__
from numpy.version import version as __version__
+# Public submodules
+# Note: recfunctions and (maybe) format are public too, but not imported
+from . import mixins
+from . import scimath as emath
+
+# Private submodules
from .type_check import *
from .index_tricks import *
from .function_base import *
-from .mixins import *
from .nanfunctions import *
from .shape_base import *
from .stride_tricks import *
from .ufunclike import *
from .histograms import *
-from . import scimath as emath
from .polynomial import *
-#import convertcode
from .utils import *
from .arraysetops import *
from .npyio import *
from ._version import *
from numpy.core._multiarray_umath import tracemalloc_domain
-__all__ = ['emath', 'math', 'tracemalloc_domain']
+__all__ = ['emath', 'math', 'tracemalloc_domain', 'Arrayterator']
__all__ += type_check.__all__
__all__ += index_tricks.__all__
__all__ += function_base.__all__
-__all__ += mixins.__all__
__all__ += shape_base.__all__
__all__ += stride_tricks.__all__
__all__ += twodim_base.__all__
if right_length is None or max_length < right_length:
right_length = max_length
+ if (left_length == 0 or right_length == 0) \
+ and stat_func in {np.amax, np.amin}:
+ # amax and amin can't operate on an emtpy array,
+ # raise a more descriptive warning here instead of the default one
+ raise ValueError("stat_length of 0 yields no value for padding")
+
# Calculate statistic for the left side
left_slice = _slice_at_axis(
slice(left_index, left_index + left_length), axis)
right_chunk = padded[right_slice]
right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
_round_if_needed(right_stat, padded.dtype)
+
return left_stat, right_stat
raise ValueError("unsupported keyword arguments for mode '{}': {}"
.format(mode, unsupported_kwargs))
- stat_functions = {"maximum": np.max, "minimum": np.min,
+ stat_functions = {"maximum": np.amax, "minimum": np.amin,
"mean": np.mean, "median": np.median}
# Create array with final shape and original values
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
+ (move the axis to the first dimension to keep the order of the other axes)
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
# axis was specified and not None
try:
- ar = np.swapaxes(ar, axis, 0)
+ ar = np.moveaxis(ar, axis, 0)
except np.AxisError:
# this removes the "axis1" or "axis2" prefix from the error message
raise np.AxisError(axis, ar.ndim)
def reshape_uniq(uniq):
uniq = uniq.view(orig_dtype)
uniq = uniq.reshape(-1, *orig_shape[1:])
- uniq = np.swapaxes(uniq, 0, axis)
+ uniq = np.moveaxis(uniq, 0, axis)
return uniq
output = _unique1d(consolidated, return_index,
"""
from __future__ import division, absolute_import, print_function
+import warnings
from decimal import Decimal
import functools
from numpy.core import overrides
+_depmsg = ("numpy.{name} is deprecated and will be removed from NumPy 1.20. "
+ "Use numpy_financial.{name} instead "
+ "(https://pypi.org/project/numpy-financial/).")
+
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _fv_dispatcher(rate, nper, pmt, pv, when=None):
+ warnings.warn(_depmsg.format(name='fv'),
+ DeprecationWarning, stacklevel=3)
return (rate, nper, pmt, pv)
"""
Compute the future value.
+ .. deprecated:: 1.18
+
+ `fv` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Given:
* a present value, `pv`
* an interest `rate` compounded once per period, of which
References
----------
- .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+ .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
Open Document Format for Office Applications (OpenDocument)v1.2,
Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
Pre-Draft 12. Organization for the Advancement of Structured Information
http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
OpenDocument-formula-20090508.odt
+
Examples
--------
What is the future value after 10 years of saving $100 now, with
def _pmt_dispatcher(rate, nper, pv, fv=None, when=None):
+ warnings.warn(_depmsg.format(name='pmt'),
+ DeprecationWarning, stacklevel=3)
return (rate, nper, pv, fv)
"""
Compute the payment against loan principal plus interest.
+ .. deprecated:: 1.18
+
+ `pmt` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Given:
* a present value, `pv` (e.g., an amount borrowed)
* a future value, `fv` (e.g., 0)
References
----------
- .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+ .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
Open Document Format for Office Applications (OpenDocument)v1.2,
Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
Pre-Draft 12. Organization for the Advancement of Structured Information
def _nper_dispatcher(rate, pmt, pv, fv=None, when=None):
+ warnings.warn(_depmsg.format(name='nper'),
+ DeprecationWarning, stacklevel=3)
return (rate, pmt, pv, fv)
"""
Compute the number of periodic payments.
+ .. deprecated:: 1.18
+
+ `nper` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
:class:`decimal.Decimal` type is not supported.
Parameters
fv + pv + pmt*nper = 0
+ References
+ ----------
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+
Examples
--------
If you only had $150/month to pay towards the loan, how long would it take
def _ipmt_dispatcher(rate, per, nper, pv, fv=None, when=None):
+ warnings.warn(_depmsg.format(name='ipmt'),
+ DeprecationWarning, stacklevel=3)
return (rate, per, nper, pv, fv)
"""
Compute the interest portion of a payment.
+ .. deprecated:: 1.18
+
+ `ipmt` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Parameters
----------
rate : scalar or array_like of shape(M, )
``pmt = ppmt + ipmt``
+ References
+ ----------
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+
Examples
--------
What is the amortization schedule for a 1 year loan of $2500 at
def _ppmt_dispatcher(rate, per, nper, pv, fv=None, when=None):
+ warnings.warn(_depmsg.format(name='ppmt'),
+ DeprecationWarning, stacklevel=3)
return (rate, per, nper, pv, fv)
"""
Compute the payment against loan principal.
+ .. deprecated:: 1.18
+
+ `ppmt` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Parameters
----------
rate : array_like
--------
pmt, pv, ipmt
+ References
+ ----------
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+
"""
total = pmt(rate, nper, pv, fv, when)
return total - ipmt(rate, per, nper, pv, fv, when)
def _pv_dispatcher(rate, nper, pmt, fv=None, when=None):
+ warnings.warn(_depmsg.format(name='pv'),
+ DeprecationWarning, stacklevel=3)
return (rate, nper, nper, pv, fv)
"""
Compute the present value.
+ .. deprecated:: 1.18
+
+ `pv` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Given:
* a future value, `fv`
* an interest `rate` compounded once per period, of which
References
----------
- .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+ .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
Open Document Format for Office Applications (OpenDocument)v1.2,
Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
Pre-Draft 12. Organization for the Advancement of Structured Information
def _rate_dispatcher(nper, pmt, pv, fv, when=None, guess=None, tol=None,
maxiter=None):
+ warnings.warn(_depmsg.format(name='rate'),
+ DeprecationWarning, stacklevel=3)
return (nper, pmt, pv, fv)
"""
Compute the rate of interest per period.
+ .. deprecated:: 1.18
+
+ `rate` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Parameters
----------
nper : array_like
References
----------
- Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May). Open Document
- Format for Office Applications (OpenDocument)v1.2, Part 2: Recalculated
- Formula (OpenFormula) Format - Annotated Version, Pre-Draft 12.
- Organization for the Advancement of Structured Information Standards
- (OASIS). Billerica, MA, USA. [ODT Document]. Available:
- http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
- OpenDocument-formula-20090508.odt
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+ .. [2] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
+ Open Document Format for Office Applications (OpenDocument)v1.2,
+ Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
+ Pre-Draft 12. Organization for the Advancement of Structured Information
+ Standards (OASIS). Billerica, MA, USA. [ODT Document].
+ Available:
+ http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
+ OpenDocument-formula-20090508.odt
"""
when = _convert_when(when)
def _irr_dispatcher(values):
+ warnings.warn(_depmsg.format(name='irr'),
+ DeprecationWarning, stacklevel=3)
return (values,)
"""
Return the Internal Rate of Return (IRR).
+ .. deprecated:: 1.18
+
+ `irr` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
This is the "average" periodically compounded rate of return
that gives a net present value of 0.0; for a more complete explanation,
see Notes below.
+ \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0
In general, for `values` :math:`= [v_0, v_1, ... v_M]`,
- irr is the solution of the equation: [G]_
+ irr is the solution of the equation: [2]_
.. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0
References
----------
- .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+ .. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
Addison-Wesley, 2003, pg. 348.
Examples
>>> round(np.irr([-5, 10.5, 1, -8, 1]), 5)
0.0886
- (Compare with the Example given for numpy.lib.financial.npv)
-
"""
# `np.roots` call is why this function does not support Decimal type.
#
def _npv_dispatcher(rate, values):
+ warnings.warn(_depmsg.format(name='npv'),
+ DeprecationWarning, stacklevel=3)
return (values,)
"""
Returns the NPV (Net Present Value) of a cash flow series.
+ .. deprecated:: 1.18
+
+ `npv` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Parameters
----------
rate : scalar
The NPV of the input cash flow series `values` at the discount
`rate`.
+ Warnings
+ --------
+ ``npv`` considers a series of cashflows starting in the present (t = 0).
+ NPV can also be defined with a series of future cashflows, paid at the
+ end, rather than the start, of each period. If future cashflows are used,
+ the first cashflow `values[0]` must be zeroed and added to the net
+ present value of the future cashflows. This is demonstrated in the
+ examples.
+
Notes
-----
- Returns the result of: [G]_
+ Returns the result of: [2]_
.. math :: \\sum_{t=0}^{M-1}{\\frac{values_t}{(1+rate)^{t}}}
References
----------
- .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
+ .. [2] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
Addison-Wesley, 2003, pg. 346.
Examples
--------
- >>> np.npv(0.281,[-100, 39, 59, 55, 20])
- -0.0084785916384548798 # may vary
+ Consider a potential project with an initial investment of $40 000 and
+ projected cashflows of $5 000, $8 000, $12 000 and $30 000 at the end of
+ each period discounted at a rate of 8% per period. To find the project's
+ net present value:
+
+ >>> rate, cashflows = 0.08, [-40_000, 5_000, 8_000, 12_000, 30_000]
+ >>> np.npv(rate, cashflows).round(5)
+ 3065.22267
- (Compare with the Example given for numpy.lib.financial.irr)
+ It may be preferable to split the projected cashflow into an initial
+ investment and expected future cashflows. In this case, the value of
+ the initial cashflow is zero and the initial investment is later added
+ to the future cashflows net present value:
+
+ >>> initial_cashflow = cashflows[0]
+ >>> cashflows[0] = 0
+ >>> np.round(np.npv(rate, cashflows) + initial_cashflow, 5)
+ 3065.22267
"""
values = np.asarray(values)
def _mirr_dispatcher(values, finance_rate, reinvest_rate):
+ warnings.warn(_depmsg.format(name='mirr'),
+ DeprecationWarning, stacklevel=3)
return (values,)
"""
Modified internal rate of return.
+ .. deprecated:: 1.18
+
+ `mirr` is deprecated; for details, see NEP 32 [1]_.
+ Use the corresponding function in the numpy-financial library,
+ https://pypi.org/project/numpy-financial.
+
Parameters
----------
values : array_like
out : float
Modified internal rate of return
+ References
+ ----------
+ .. [1] NumPy Enhancement Proposal (NEP) 32,
+ https://numpy.org/neps/nep-0032-remove-financial-functions.html
"""
values = np.asarray(values)
n = values.size
)
+__all__ = []
+
+
MAGIC_PREFIX = b'\x93NUMPY'
MAGIC_LEN = len(MAGIC_PREFIX) + 2
ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
major, minor = magic_str[-2:]
return major, minor
+def _has_metadata(dt):
+ if dt.metadata is not None:
+ return True
+ elif dt.names is not None:
+ return any(_has_metadata(dt[k]) for k in dt.names)
+ elif dt.subdtype is not None:
+ return _has_metadata(dt.base)
+ else:
+ return False
+
def dtype_to_descr(dtype):
"""
Get a serializable descriptor from the dtype.
replicate the input dtype.
"""
+ if _has_metadata(dtype):
+ warnings.warn("metadata on a dtype may be saved or ignored, but will "
+ "raise if saved when read. Use another form of storage.",
+ UserWarning, stacklevel=2)
if dtype.names is not None:
# This is a record array. The .descr is fine. XXX: parts of the
# record array with an empty name, like padding bytes, still get
return ret
header = _wrap_header(header, (3, 0))
- warnings.warn("Stored array in format 3.0. It can only be"
+ warnings.warn("Stored array in format 3.0. It can only be "
"read by NumPy >= 1.17", UserWarning, stacklevel=2)
return header
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
- weight equal to one.
+ weight equal to one. The 1-D calculation is::
+
+ avg = sum(a * weights) / sum(weights)
+
+ The only constraint on `weights` is that `sum(weights)` must not be 0.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
-
Returns
-------
retval, [sum_of_weights] : array_type or double
Examples
--------
- >>> data = list(range(1,5))
+ >>> data = np.arange(1, 5)
>>> data
- [1, 2, 3, 4]
+ array([1, 2, 3, 4])
>>> np.average(data)
2.5
- >>> np.average(range(1,11), weights=range(10,0,-1))
+ >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
4.0
>>> data = np.arange(6).reshape((3,2))
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
- # 2014-02-24, 1.9
- warnings.warn("select with an empty condition list is not possible"
- "and will be deprecated",
- DeprecationWarning, stacklevel=3)
- return np.asarray(default)[()]
+ raise ValueError("select with an empty condition list is not possible")
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
- deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
- if np.issubdtype(cond.dtype, np.integer):
- # A previous implementation accepted int ndarrays accidentally.
- # Supported here deliberately, but deprecated.
- condlist[i] = condlist[i].astype(bool)
- deprecated_ints = True
- else:
- raise ValueError(
- 'invalid entry {} in condlist: should be boolean ndarray'.format(i))
-
- if deprecated_ints:
- # 2014-02-24, 1.9
- msg = "select condlists containing integer ndarrays is deprecated " \
- "and will be removed in the future. Use `.astype(bool)` to " \
- "convert to bools."
- warnings.warn(msg, DeprecationWarning, stacklevel=3)
+ raise TypeError(
+ 'invalid entry {} in condlist: should be boolean ndarray'.format(i))
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
The axis along which the difference is taken, default is the
last axis.
prepend, append : array_like, optional
- Values to prepend or append to "a" along axis prior to
+ Values to prepend or append to `a` along axis prior to
performing the difference. Scalar values are expanded to
arrays with length 1 in the direction of axis and the shape
of the input array in along all other axes. Otherwise the
- dimension and shape must match "a" except along axis.
+ dimension and shape must match `a` except along axis.
+
+ .. versionadded:: 1.16.0
Returns
-------
Notes
-----
- Does not check that the x-coordinate sequence `xp` is increasing.
- If `xp` is not increasing, the results are nonsense.
- A simple check for increasing is::
+ The x-coordinate sequence is expected to be increasing, but this is not
+ explicitly enforced. However, if the sequence `xp` is non-increasing,
+ interpolation results are meaningless.
+
+ Note that, since NaN is unsortable, `xp` also cannot contain NaNs.
+
+ A simple check for `xp` being strictly increasing is::
np.all(np.diff(xp) > 0)
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
- The docstring for the function. If `None`, the docstring will be the
+ The docstring for the function. If None, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
>>> m = np.arange(10, dtype=np.float64)
>>> f = np.arange(10) * 2
>>> a = np.arange(10) ** 2.
- >>> ddof = 9 # N - 1
+ >>> ddof = 1
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
Text(0.5, 0, 'X')
>>> plt.show()
- It works in 2-D as well:
-
- >>> x = np.linspace(-4, 4, 401)
- >>> xx = np.outer(x, x)
- >>> plt.imshow(np.sinc(xx))
- <matplotlib.image.AxesImage object at 0x...>
-
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
+++ /dev/null
-"""
-Basic functions used by several sub-packages and
-useful to have in the main name-space.
-
-Type Handling
--------------
-================ ===================
-iscomplexobj Test for complex object, scalar result
-isrealobj Test for real object, scalar result
-iscomplex Test for complex elements, array result
-isreal Test for real elements, array result
-imag Imaginary part
-real Real part
-real_if_close Turns complex number with tiny imaginary part to real
-isneginf Tests for negative infinity, array result
-isposinf Tests for positive infinity, array result
-isnan Tests for nans, array result
-isinf Tests for infinity, array result
-isfinite Tests for finite numbers, array result
-isscalar True if argument is a scalar
-nan_to_num Replaces NaN's with 0 and infinities with large numbers
-cast Dictionary of functions to force cast to each type
-common_type Determine the minimum common type code for a group
- of arrays
-mintypecode Return minimal allowed common typecode.
-================ ===================
-
-Index Tricks
-------------
-================ ===================
-mgrid Method which allows easy construction of N-d
- 'mesh-grids'
-``r_`` Append and construct arrays: turns slice objects into
- ranges and concatenates them, for 2d arrays appends rows.
-index_exp Konrad Hinsen's index_expression class instance which
- can be useful for building complicated slicing syntax.
-================ ===================
-
-Useful Functions
-----------------
-================ ===================
-select Extension of where to multiple conditions and choices
-extract Extract 1d array from flattened array according to mask
-insert Insert 1d array of values into Nd array according to mask
-linspace Evenly spaced samples in linear space
-logspace Evenly spaced samples in logarithmic space
-fix Round x to nearest integer towards zero
-mod Modulo mod(x,y) = x % y except keeps sign of y
-amax Array maximum along axis
-amin Array minimum along axis
-ptp Array max-min along axis
-cumsum Cumulative sum along axis
-prod Product of elements along axis
-cumprod Cumluative product along axis
-diff Discrete differences along axis
-angle Returns angle of complex argument
-unwrap Unwrap phase along given axis (1-d algorithm)
-sort_complex Sort a complex-array (based on real, then imaginary)
-trim_zeros Trim the leading and trailing zeros from 1D array.
-vectorize A class that wraps a Python function taking scalar
- arguments into a generalized function which can handle
- arrays of arguments using the broadcast rules of
- numerix Python.
-================ ===================
-
-Shape Manipulation
-------------------
-================ ===================
-squeeze Return a with length-one dimensions removed.
-atleast_1d Force arrays to be >= 1D
-atleast_2d Force arrays to be >= 2D
-atleast_3d Force arrays to be >= 3D
-vstack Stack arrays vertically (row on row)
-hstack Stack arrays horizontally (column on column)
-column_stack Stack 1D arrays as columns into 2D array
-dstack Stack arrays depthwise (along third dimension)
-stack Stack arrays along a new axis
-split Divide array into a list of sub-arrays
-hsplit Split into columns
-vsplit Split into rows
-dsplit Split along third dimension
-================ ===================
-
-Matrix (2D Array) Manipulations
--------------------------------
-================ ===================
-fliplr 2D array with columns flipped
-flipud 2D array with rows flipped
-rot90 Rotate a 2D array a multiple of 90 degrees
-eye Return a 2D array with ones down a given diagonal
-diag Construct a 2D array from a vector, or return a given
- diagonal from a 2D array.
-mat Construct a Matrix
-bmat Build a Matrix from blocks
-================ ===================
-
-Polynomials
------------
-================ ===================
-poly1d A one-dimensional polynomial class
-poly Return polynomial coefficients from roots
-roots Find roots of polynomial given coefficients
-polyint Integrate polynomial
-polyder Differentiate polynomial
-polyadd Add polynomials
-polysub Subtract polynomials
-polymul Multiply polynomials
-polydiv Divide polynomials
-polyval Evaluate polynomial at given argument
-================ ===================
-
-Iterators
----------
-================ ===================
-Arrayterator A buffered iterator for big arrays.
-================ ===================
-
-Import Tricks
--------------
-================ ===================
-ppimport Postpone module import until trying to use it
-ppimport_attr Postpone module import until trying to use its attribute
-ppresolve Import postponed module and return it.
-================ ===================
-
-Machine Arithmetics
--------------------
-================ ===================
-machar_single Single precision floating point arithmetic parameters
-machar_double Double precision floating point arithmetic parameters
-================ ===================
-
-Threading Tricks
-----------------
-================ ===================
-ParallelExec Execute commands in parallel thread.
-================ ===================
-
-Array Set Operations
------------------------
-Set operations for numeric arrays based on sort() function.
-
-================ ===================
-unique Unique elements of an array.
-isin Test whether each element of an ND array is present
- anywhere within a second array.
-ediff1d Array difference (auxiliary function).
-intersect1d Intersection of 1D arrays with unique elements.
-setxor1d Set exclusive-or of 1D arrays with unique elements.
-in1d Test whether elements in a 1D array are also present in
- another array.
-union1d Union of 1D arrays with unique elements.
-setdiff1d Set difference of 1D arrays with unique elements.
-================ ===================
-
-"""
-from __future__ import division, absolute_import, print_function
-
-depends = ['core', 'testing']
-global_symbols = ['*']
from numpy.core import umath as um
-# Nothing should be exposed in the top-level NumPy module.
-__all__ = []
+
+__all__ = ['NDArrayOperatorsMixin']
def _disables_array_ufunc(obj):
NaNs, otherwise return None.
"""
- a = np.array(a, subok=True, copy=True)
+ a = np.asanyarray(a)
if a.dtype == np.object_:
# object arrays do not support `isnan` (gh-9009), so make a guess
- mask = a != a
+ mask = np.not_equal(a, a, dtype=bool)
elif issubclass(a.dtype.type, np.inexact):
mask = np.isnan(a)
else:
mask = None
if mask is not None:
+ a = np.array(a, subok=True, copy=True)
np.copyto(a, val, where=mask)
return a, mask
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
- expected output, but the type will be cast if necessary. See
- `doc.ufuncs` for details.
+ expected output, but the type will be cast if necessary. See
+ `ufuncs-output-type` for more details.
.. versionadded:: 1.8.0
keepdims : bool, optional
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
- expected output, but the type will be cast if necessary. See
- `doc.ufuncs` for details.
+ expected output, but the type will be cast if necessary. See
+ `ufuncs-output-type` for more details.
.. versionadded:: 1.8.0
keepdims : bool, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
- `doc.ufuncs` for details. The casting of NaN to integer can yield
- unexpected results.
+ `ufuncs-output-type` for more details. The casting of NaN to integer
+ can yield unexpected results.
.. versionadded:: 1.8.0
keepdims : bool, optional
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
- expected output, but the type will be cast if necessary. See
- `doc.ufuncs` for details. The casting of NaN to integer can yield
- unexpected results.
+ expected output, but the type will be cast if necessary. See
+ `ufuncs-output-type` for more details. The casting of NaN to integer
+ can yield unexpected results.
keepdims : bool, optional
If True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
- but the type will be cast if necessary. See `doc.ufuncs`
- (Section "Output arguments") for more details.
+ but the type will be cast if necessary. See `ufuncs-output-type` for
+ more details.
Returns
-------
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
- expected output, but the type will be cast if necessary. See
- `doc.ufuncs` for details.
+ expected output, but the type will be cast if necessary. See
+ `ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the variance of the flattened array.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
- the default is `float32`; for arrays of float types it is the same as
+ the default is `float64`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
mean : Average
var : Variance while not ignoring NaNs
nanstd, nanmean
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Notes
-----
--------
var, mean, std
nanvar, nanmean
- numpy.doc.ufuncs : Section "Output arguments"
+ ufuncs-output-type
Notes
-----
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
- extension will be appended to the file name if it does not already
+ extension will be appended to the filename if it does not already
have one.
arr : array_like
Array data to be saved.
-----
For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
+ Any data saved to the file is appended to the end of the file.
+
Examples
--------
>>> from tempfile import TemporaryFile
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ >>> with open('test.npy', 'wb') as f:
+ ... np.save(f, np.array([1, 2]))
+ ... np.save(f, np.array([1, 3]))
+ >>> with open('test.npy', 'rb') as f:
+ ... a = np.load(f)
+ ... b = np.load(f)
+ >>> print(a, b)
+ # [1 2] [1 3]
"""
own_fid = False
if hasattr(file, 'write'):
@array_function_dispatch(_savez_dispatcher)
def savez(file, *args, **kwds):
- """
- Save several arrays into a single file in uncompressed ``.npz`` format.
+ """Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
Parameters
----------
file : str or file
- Either the file name (string) or an open file (file-like object)
+ Either the filename (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
- ``.npz`` extension will be appended to the file name if it is not
+ ``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
+ When saving dictionaries, the dictionary keys become filenames
+ inside the ZIP archive. Therefore, keys should be valid filenames.
+ E.g., avoid keys that begin with ``/`` or contain ``.``.
+
Examples
--------
>>> from tempfile import TemporaryFile
['x', 'y']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
-
"""
_savez(file, args, kwds, False)
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
- If arguments are passed in with no keywords, then stored file names are
+ If arguments are passed in with no keywords, then stored filenames are
arr_0, arr_1, etc.
Parameters
----------
file : str or file
- Either the file name (string) or an open file (file-like object)
+ Either the filename (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
- ``.npz`` extension will be appended to the file name if it is not
+ ``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
- generators should return byte strings for Python 3k.
+ generators should return byte strings.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
Parameters
----------
file : str or file
- File name or file object to read.
+ Filename or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype = np.dtype(dtype)
content = file.read()
- if isinstance(content, bytes) and isinstance(regexp, np.unicode):
+ if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode):
regexp = asbytes(regexp)
- elif isinstance(content, np.unicode) and isinstance(regexp, bytes):
+ elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes):
regexp = asstr(regexp)
if not hasattr(regexp, 'match'):
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
- that generators must return byte strings in Python 3k. The strings
+ that generators must return byte strings. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
replace_space=replace_space)
# Skip the first `skip_header` rows
- for i in range(skip_header):
- next(fhd)
-
- # Keep on until we find the first valid values
- first_values = None
try:
+ for i in range(skip_header):
+ next(fhd)
+
+ # Keep on until we find the first valid values
+ first_values = None
+
while not first_values:
first_line = _decode_line(next(fhd), encoding)
if (names is True) and (comments is not None):
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
- Present only if `full` = True. Residuals of the least-squares fit,
- the effective rank of the scaled Vandermonde coefficient matrix,
- its singular values, and the specified value of `rcond`. For more
- details, see `linalg.lstsq`.
+ Present only if `full` = True. Residuals is sum of squared residuals
+ of the least-squares fit, the effective rank of the scaled Vandermonde
+ coefficient matrix, its singular values, and the specified value of
+ `rcond`. For more details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
- if typ.names:
+ if typ.names is not None:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
Nested fields are supported.
+ ..versionchanged: 1.18.0
+ `drop_fields` returns an array with 0 fields if all fields are dropped,
+ rather than returning ``None`` as it did previously.
+
Parameters
----------
base : array
current = ndtype[name]
if name in drop_names:
continue
- if current.names:
+ if current.names is not None:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
- if not newdtype:
- return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
from __future__ import division, absolute_import, print_function
import functools
-import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
from numpy.core.multiarray import normalize_axis_index
from numpy.core import overrides
from numpy.core import vstack, atleast_3d
+from numpy.core.numeric import normalize_axis_tuple
from numpy.core.shape_base import _arrays_for_stack_dispatcher
from numpy.lib.index_tricks import ndindex
from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells
def _make_along_axis_idx(arr_shape, indices, axis):
- # compute dimensions to iterate over
+ # compute dimensions to iterate over
if not _nx.issubdtype(indices.dtype, _nx.integer):
raise IndexError('`indices` must be an integer array')
if len(arr_shape) != indices.ndim:
Insert a new axis that will appear at the `axis` position in the expanded
array shape.
- .. note:: Previous to NumPy 1.13.0, neither ``axis < -a.ndim - 1`` nor
- ``axis > a.ndim`` raised errors or put the new axis where documented.
- Those axis values are now deprecated and will raise an AxisError in the
- future.
-
Parameters
----------
a : array_like
Input array.
- axis : int
- Position in the expanded axes where the new axis is placed.
+ axis : int or tuple of ints
+ Position in the expanded axes where the new axis (or axes) is placed.
+
+ .. deprecated:: 1.13.0
+ Passing an axis where ``axis > a.ndim`` will be treated as
+ ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will
+ be treated as ``axis == 0``. This behavior is deprecated.
+
+ .. versionchanged:: 1.18.0
+ A tuple of axes is now supported. Out of range axes as
+ described above are now forbidden and raise an `AxisError`.
Returns
-------
- res : ndarray
- View of `a` with the number of dimensions increased by one.
+ result : ndarray
+ View of `a` with the number of dimensions increased.
See Also
--------
Examples
--------
- >>> x = np.array([1,2])
+ >>> x = np.array([1, 2])
>>> x.shape
(2,)
- The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
+ The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
>>> y.shape
(1, 2)
- >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]
+ The following is equivalent to ``x[:, np.newaxis]``:
+
+ >>> y = np.expand_dims(x, axis=1)
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
+ ``axis`` may also be a tuple:
+
+ >>> y = np.expand_dims(x, axis=(0, 1))
+ >>> y
+ array([[[1, 2]]])
+
+ >>> y = np.expand_dims(x, axis=(2, 0))
+ >>> y
+ array([[[1],
+ [2]]])
+
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
else:
a = asanyarray(a)
- shape = a.shape
- if axis > a.ndim or axis < -a.ndim - 1:
- # 2017-05-17, 1.13.0
- warnings.warn("Both axis > a.ndim and axis < -a.ndim - 1 are "
- "deprecated and will raise an AxisError in the future.",
- DeprecationWarning, stacklevel=3)
- # When the deprecation period expires, delete this if block,
- if axis < 0:
- axis = axis + a.ndim + 1
- # and uncomment the following line.
- # axis = normalize_axis_index(axis, a.ndim + 1)
- return a.reshape(shape[:axis] + (1,) + shape[axis:])
+ if type(axis) not in (tuple, list):
+ axis = (axis,)
+
+ out_ndim = len(axis) + a.ndim
+ axis = normalize_axis_tuple(axis, out_ndim)
+
+ shape_it = iter(a.shape)
+ shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
+
+ return a.reshape(shape)
row_stack = vstack
@array_function_dispatch(_split_dispatcher)
def split(ary, indices_or_sections, axis=0):
"""
- Split an array into multiple sub-arrays.
+ Split an array into multiple sub-arrays as views into `ary`.
Parameters
----------
Returns
-------
sub-arrays : list of ndarrays
- A list of sub-arrays.
+ A list of sub-arrays as views into `ary`.
Raises
------
if N % sections:
raise ValueError(
'array split does not result in an equal division')
- res = array_split(ary, indices_or_sections, axis)
- return res
+ return array_split(ary, indices_or_sections, axis)
def _hvdsplit_dispatcher(ary, indices_or_sections):
def test_upgrademapper(self):
"Tests updatemapper"
dateparser = _bytes_to_date
- StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
- convert = StringConverter(dateparser, date(2000, 1, 1))
- test = convert('2001-01-01')
- assert_equal(test, date(2001, 1, 1))
- test = convert('2009-01-01')
- assert_equal(test, date(2009, 1, 1))
- test = convert('')
- assert_equal(test, date(2000, 1, 1))
+ _original_mapper = StringConverter._mapper[:]
+ try:
+ StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
+ convert = StringConverter(dateparser, date(2000, 1, 1))
+ test = convert('2001-01-01')
+ assert_equal(test, date(2001, 1, 1))
+ test = convert('2009-01-01')
+ assert_equal(test, date(2009, 1, 1))
+ test = convert('')
+ assert_equal(test, date(2000, 1, 1))
+ finally:
+ StringConverter._mapper = _original_mapper
def test_string_to_object(self):
"Make sure that string-to-object functions are properly recognized"
)
assert_array_equal(a, b)
+ @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning")
+ @pytest.mark.filterwarnings(
+ "ignore:invalid value encountered in (true_divide|double_scalars):"
+ "RuntimeWarning"
+ )
+ @pytest.mark.parametrize("mode", ["mean", "median"])
+ def test_zero_stat_length_valid(self, mode):
+ arr = np.pad([1., 2.], (1, 2), mode, stat_length=0)
+ expected = np.array([np.nan, 1., 2., np.nan, np.nan])
+ assert_equal(arr, expected)
+
+ @pytest.mark.parametrize("mode", ["minimum", "maximum"])
+ def test_zero_stat_length_invalid(self, mode):
+ match = "stat_length of 0 yields no value for padding"
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 0, mode, stat_length=0)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 0, mode, stat_length=(1, 0))
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 1, mode, stat_length=0)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 1, mode, stat_length=(1, 0))
+
class TestConstant(object):
def test_check_constant(self):
assert_array_equal(v, b, msg)
msg = base_msg.format('return_index', dt)
- v, j = unique(a, 1, 0, 0)
+ v, j = unique(a, True, False, False)
assert_array_equal(v, b, msg)
assert_array_equal(j, i1, msg)
msg = base_msg.format('return_inverse', dt)
- v, j = unique(a, 0, 1, 0)
+ v, j = unique(a, False, True, False)
assert_array_equal(v, b, msg)
assert_array_equal(j, i2, msg)
msg = base_msg.format('return_counts', dt)
- v, j = unique(a, 0, 0, 1)
+ v, j = unique(a, False, False, True)
assert_array_equal(v, b, msg)
assert_array_equal(j, c, msg)
msg = base_msg.format('return_index and return_inverse', dt)
- v, j1, j2 = unique(a, 1, 1, 0)
+ v, j1, j2 = unique(a, True, True, False)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i1, msg)
assert_array_equal(j2, i2, msg)
msg = base_msg.format('return_index and return_counts', dt)
- v, j1, j2 = unique(a, 1, 0, 1)
+ v, j1, j2 = unique(a, True, False, True)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i1, msg)
assert_array_equal(j2, c, msg)
msg = base_msg.format('return_inverse and return_counts', dt)
- v, j1, j2 = unique(a, 0, 1, 1)
+ v, j1, j2 = unique(a, False, True, True)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i2, msg)
assert_array_equal(j2, c, msg)
msg = base_msg.format(('return_index, return_inverse '
'and return_counts'), dt)
- v, j1, j2, j3 = unique(a, 1, 1, 1)
+ v, j1, j2, j3 = unique(a, True, True, True)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i1, msg)
assert_array_equal(j2, i2, msg)
assert_array_equal(unique(data, axis=1), result.astype(dtype), msg)
msg = 'Unique with 3d array and axis=2 failed'
- data3d = np.dstack([data] * 3)
- result = data3d[..., :1]
+ data3d = np.array([[[1, 1],
+ [1, 0]],
+ [[0, 1],
+ [0, 0]]]).astype(dtype)
+ result = np.take(data3d, [1, 0], axis=2)
assert_array_equal(unique(data3d, axis=2), result, msg)
uniq, idx, inv, cnt = unique(data, axis=0, return_index=True,
from __future__ import division, absolute_import, print_function
+import warnings
from decimal import Decimal
import numpy as np
)
+def filter_deprecation(func):
+ def newfunc(*args, **kwargs):
+ with warnings.catch_warnings(record=True) as ws:
+ warnings.filterwarnings('always', category=DeprecationWarning)
+ func(*args, **kwargs)
+ assert_(all(w.category is DeprecationWarning for w in ws))
+ return newfunc
+
+
class TestFinancial(object):
+ @filter_deprecation
+ def test_npv_irr_congruence(self):
+ # IRR is defined as the rate required for the present value of a
+ # a series of cashflows to be zero i.e. NPV(IRR(x), x) = 0
+ cashflows = np.array([-40000, 5000, 8000, 12000, 30000])
+ assert_allclose(np.npv(np.irr(cashflows), cashflows), 0, atol=1e-10, rtol=0)
+
+ @filter_deprecation
def test_rate(self):
assert_almost_equal(
np.rate(10, 0, -3500, 10000),
0.1107, 4)
+ @filter_deprecation
def test_rate_decimal(self):
rate = np.rate(Decimal('10'), Decimal('0'), Decimal('-3500'), Decimal('10000'))
assert_equal(Decimal('0.1106908537142689284704528100'), rate)
+ @filter_deprecation
def test_irr(self):
v = [-150000, 15000, 25000, 35000, 45000, 60000]
assert_almost_equal(np.irr(v), 0.0524, 2)
v = [-1, -2, -3]
assert_equal(np.irr(v), np.nan)
+ @filter_deprecation
def test_pv(self):
assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2)
+ @filter_deprecation
def test_pv_decimal(self):
assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')),
Decimal('-127128.1709461939327295222005'))
+ @filter_deprecation
def test_fv(self):
assert_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.362673042924)
+ @filter_deprecation
def test_fv_decimal(self):
assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), 0, 0),
Decimal('86609.36267304300040536731624'))
+ @filter_deprecation
def test_pmt(self):
res = np.pmt(0.08 / 12, 5 * 12, 15000)
tgt = -304.145914
tgt = np.array([[-166.66667, -19311.258], [-626.90814, -19311.258]])
assert_allclose(res, tgt)
+ @filter_deprecation
def test_pmt_decimal(self):
res = np.pmt(Decimal('0.08') / Decimal('12'), 5 * 12, 15000)
tgt = Decimal('-304.1459143262052370338701494')
assert_equal(res[1][0], tgt[1][0])
assert_equal(res[1][1], tgt[1][1])
+ @filter_deprecation
def test_ppmt(self):
assert_equal(np.round(np.ppmt(0.1 / 12, 1, 60, 55000), 2), -710.25)
+ @filter_deprecation
def test_ppmt_decimal(self):
assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000')),
Decimal('-710.2541257864217612489830917'))
# Two tests showing how Decimal is actually getting at a more exact result
# .23 / 12 does not come out nicely as a float but does as a decimal
+ @filter_deprecation
def test_ppmt_special_rate(self):
assert_equal(np.round(np.ppmt(0.23 / 12, 1, 60, 10000000000), 8), -90238044.232277036)
+ @filter_deprecation
def test_ppmt_special_rate_decimal(self):
# When rounded out to 8 decimal places like the float based test, this should not equal the same value
# as the float, substituted for the decimal
assert_equal(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')),
Decimal('-90238044.2322778884413969909'))
+ @filter_deprecation
def test_ipmt(self):
assert_almost_equal(np.round(np.ipmt(0.1 / 12, 1, 24, 2000), 2), -16.67)
+ @filter_deprecation
def test_ipmt_decimal(self):
result = np.ipmt(Decimal('0.1') / Decimal('12'), 1, 24, 2000)
assert_equal(result.flat[0], Decimal('-16.66666666666666666666666667'))
+ @filter_deprecation
def test_nper(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
21.54, 2)
+ @filter_deprecation
def test_nper2(self):
assert_almost_equal(np.nper(0.0, -2000, 0, 100000.),
50.0, 1)
+ @filter_deprecation
def test_npv(self):
assert_almost_equal(
np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]),
122.89, 2)
+ @filter_deprecation
def test_npv_decimal(self):
assert_equal(
np.npv(Decimal('0.05'), [-15000, 1500, 2500, 3500, 4500, 6000]),
Decimal('122.894854950942692161628715'))
+ @filter_deprecation
def test_mirr(self):
val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000]
assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4)
val = [39000, 30000, 21000, 37000, 46000]
assert_(np.isnan(np.mirr(val, 0.10, 0.12)))
+ @filter_deprecation
def test_mirr_decimal(self):
val = [Decimal('-4500'), Decimal('-800'), Decimal('800'), Decimal('800'),
Decimal('600'), Decimal('600'), Decimal('800'), Decimal('800'),
val = [Decimal('39000'), Decimal('30000'), Decimal('21000'), Decimal('37000'), Decimal('46000')]
assert_(np.isnan(np.mirr(val, Decimal('0.10'), Decimal('0.12'))))
+ @filter_deprecation
def test_when(self):
# begin
assert_equal(np.rate(10, 20, -3500, 10000, 1),
assert_equal(np.nper(0.075, -2000, 0, 100000., 0),
np.nper(0.075, -2000, 0, 100000., 'end'))
+ @filter_deprecation
def test_decimal_with_when(self):
"""Test that decimals are still supported if the when argument is passed"""
# begin
np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
Decimal('0'), 'end').flat[0])
+ @filter_deprecation
def test_broadcast(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]),
[21.5449442, 20.76156441], 4)
[-74.998201, -75.62318601, -75.62318601,
-76.88882405, -76.88882405], 4)
+ @filter_deprecation
def test_broadcast_decimal(self):
# Use almost equal because precision is tested in the explicit tests, this test is to ensure
# broadcast with Decimal is not broken.
with open(fname, 'wb') as f:
with assert_warns(UserWarning):
format.write_array(f, arr, version=None)
+
+
+@pytest.mark.parametrize('dt, fail', [
+ (np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3',
+ metadata={'some': 'stuff'})]}), True),
+ (np.dtype(int, metadata={'some': 'stuff'}), False),
+ (np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False),
+ # recursive: metadata on the field of a dtype
+ (np.dtype({'names': ['a', 'b'], 'formats': [
+ float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]})
+ ]}), False)
+ ])
+def test_metadata_dtype(dt, fail):
+ # gh-14142
+ arr = np.ones(10, dtype=dt)
+ buf = BytesIO()
+ with assert_warns(UserWarning):
+ np.save(buf, arr)
+ buf.seek(0)
+ if fail:
+ with assert_raises(ValueError):
+ np.load(buf)
+ else:
+ arr2 = np.load(buf)
+ # BUG: assert_array_equal does not check metadata
+ from numpy.lib.format import _has_metadata
+ assert_array_equal(arr, arr2)
+ assert _has_metadata(arr.dtype)
+ assert not _has_metadata(arr2.dtype)
+
assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0])
def test_deprecated_empty(self):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always")
- assert_equal(select([], [], 3j), 3j)
-
- with warnings.catch_warnings():
- warnings.simplefilter("always")
- assert_warns(DeprecationWarning, select, [], [])
- warnings.simplefilter("error")
- assert_raises(DeprecationWarning, select, [], [])
+ assert_raises(ValueError, select, [], [], 3j)
+ assert_raises(ValueError, select, [], [])
def test_non_bool_deprecation(self):
choices = self.choices
conditions = self.conditions[:]
- with warnings.catch_warnings():
- warnings.filterwarnings("always")
- conditions[0] = conditions[0].astype(np.int_)
- assert_warns(DeprecationWarning, select, conditions, choices)
- conditions[0] = conditions[0].astype(np.uint8)
- assert_warns(DeprecationWarning, select, conditions, choices)
- warnings.filterwarnings("error")
- assert_raises(DeprecationWarning, select, conditions, choices)
+ conditions[0] = conditions[0].astype(np.int_)
+ assert_raises(TypeError, select, conditions, choices)
+ conditions[0] = conditions[0].astype(np.uint8)
+ assert_raises(TypeError, select, conditions, choices)
+ assert_raises(TypeError, select, conditions, choices)
def test_many_arguments(self):
# This used to be limited by NPY_MAXARGS == 32
np.arctan(3.0 / 1.0),
np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0,
-np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)]
- z = angle(x, deg=1)
+ z = angle(x, deg=True)
zo = np.array(yo) * 180 / np.pi
assert_array_almost_equal(y, yo, 11)
assert_array_almost_equal(z, zo, 11)
[-np.inf, np.inf]]))
def test_1D_rowvar(self):
- assert_allclose(cov(self.x3), cov(self.x3, rowvar=0))
+ assert_allclose(cov(self.x3), cov(self.x3, rowvar=False))
y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501])
- assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=0))
+ assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=False))
def test_1D_variance(self):
assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1))
assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan)
def test_fraction(self):
- x = [Fraction(i, 2) for i in np.arange(8)]
+ x = [Fraction(i, 2) for i in range(8)]
p = np.percentile(x, Fraction(0))
assert_equal(p, Fraction(0))
def test_fraction(self):
# fractional input, integral quantile
- x = [Fraction(i, 2) for i in np.arange(8)]
+ x = [Fraction(i, 2) for i in range(8)]
q = np.quantile(x, 0)
assert_equal(q, 0)
assert_raises_regex(
ValueError, "out of bounds", np.unravel_index, [1], ())
+ @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"])
+ def test_empty_array_ravel(self, mode):
+ res = np.ravel_multi_index(
+ np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode)
+ assert(res.shape == (0,))
+
+ with assert_raises(ValueError):
+ np.ravel_multi_index(
+ np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode)
+
+ def test_empty_array_unravel(self):
+ res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0))
+ # res is a tuple of three empty arrays
+ assert(len(res) == 3)
+ assert(all(a.shape == (0,) for a in res))
+
+ with assert_raises(ValueError):
+ np.unravel_index([1], (2, 1, 0))
class TestGrid(object):
def test_basic(self):
assert_almost_equal(a[1]-a[0], 2.0/9.0, 11)
def test_linspace_equivalence(self):
- y, st = np.linspace(2, 10, retstep=1)
+ y, st = np.linspace(2, 10, retstep=True)
assert_almost_equal(st, 8/49.0)
assert_array_almost_equal(y, mgrid[2:10:50j], 13)
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings
)
+from numpy.testing._private.utils import requires_memory
class TextIO(BytesIO):
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode)
+ a = np.array([utf8], dtype=np.unicode_)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode)
+ a = np.array([utf8], dtype=np.unicode_)
# our gz wrapper support encoding
suffixes = ['', '.gz']
# stdlib 2 versions do not support encoding
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
- encoding='UTF-16-LE', dtype=np.unicode)
+ encoding='UTF-16-LE', dtype=np.unicode_)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode)
+ a = np.array([utf8], dtype=np.unicode_)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode)
+ a = np.array([utf8], dtype=np.unicode_)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
@pytest.mark.skipif(sys.platform=='win32',
reason="large files cause problems")
@pytest.mark.slow
+ @requires_memory(free_bytes=7e9)
def test_large_zip(self):
# The test takes at least 6GB of memory, writes a file larger than 4GB
- try:
- a = 'a' * 6 * 1024 * 1024 * 1024
- del a
- except (MemoryError, OverflowError):
- pytest.skip("Cannot allocate enough memory for test")
test_data = np.asarray([np.random.rand(np.random.randint(50,100),4)
for i in range(800000)])
with tempdir() as tmpdir:
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
- x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode)
+ x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
- v = self.loadfunc(BytesIO(utf16), dtype=np.unicode, encoding='UTF-16')
+ v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
- x = self.loadfunc(c, dtype=np.unicode,
+ x = self.loadfunc(c, dtype=np.unicode_,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
- x = self.loadfunc(path, dtype=np.unicode,
+ x = self.loadfunc(path, dtype=np.unicode_,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
- x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode)
+ x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
with open(path, 'wb') as f:
f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
- usecols=(2, 3), converters={2: np.unicode},
+ usecols=(2, 3), converters={2: np.compat.unicode},
encoding='UTF-8')
control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
dtype=[('', '|U11'), ('', float)])
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
+ # when skip_header > 0
+ test = np.genfromtxt(data, skip_header=1)
+ assert_equal(test, np.array([]))
+
def test_fancy_dtype_alt(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
- converters = {4: lambda x: "(%s)" % x}
+ converters = {4: lambda x: "(%s)" % x.decode()}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
- dtype=np.unicode)
+ dtype=np.unicode_)
assert_array_equal(test, ctl)
# test a mixed dtype
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
- dtype=np.unicode)
+ dtype=np.unicode_)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
import pytest
import numpy as np
-from numpy.lib.nanfunctions import _nan_mask
+from numpy.lib.nanfunctions import _nan_mask, _replace_nan
from numpy.testing import (
assert_, assert_equal, assert_almost_equal, assert_no_warnings,
assert_raises, assert_array_equal, suppress_warnings
# for types that can't possibly contain NaN
if type(expected) is not np.ndarray:
assert actual is True
+
+
+def test__replace_nan():
+ """ Test that _replace_nan returns the original array if there are no
+ NaNs, not a copy.
+ """
+ for dtype in [np.bool, np.int32, np.int64]:
+ arr = np.array([0, 1], dtype=dtype)
+ result, mask = _replace_nan(arr, 0)
+ assert mask is None
+ # do not make a copy if there are no nans
+ assert result is arr
+
+ for dtype in [np.float32, np.float64]:
+ arr = np.array([0, 1], dtype=dtype)
+ result, mask = _replace_nan(arr, 2)
+ assert (mask == False).all()
+ # mask is not None, so we make a copy
+ assert result is not arr
+ assert_equal(result, arr)
+
+ arr_nan = np.array([0, 1, np.nan], dtype=dtype)
+ result_nan, mask_nan = _replace_nan(arr_nan, 2)
+ assert_equal(mask_nan, np.array([False, False, True]))
+ assert result_nan is not arr_nan
+ assert_equal(result_nan, np.array([0, 1, 2]))
+ assert np.isnan(arr_nan[-1])
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
+ # dropping all fields results in an array with no fields
test = drop_fields(a, ['a', 'b'])
- assert_(test is None)
+ control = np.array([(), ()], dtype=[])
+ assert_equal(test, control)
def test_rename_fields(self):
# Test rename fields
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array(
- [(1, (2, 3.0)), (4, (5, 6.0))],
- dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+ [(1, (2, 3.0, ())), (4, (5, 6.0, ()))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])])
self.data = (w, x, y, z)
def test_solo(self):
test = merge_arrays((x, w), flatten=False)
controldtype = [('f0', int),
('f1', [('a', int),
- ('b', [('ba', float), ('bb', int)])])]
- control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],
+ ('b', [('ba', float), ('bb', int), ('bc', [])])])]
+ control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))],
dtype=controldtype)
assert_equal(test, control)
# Ticket #91
x = np.random.random((3, 3))
y = x.copy()
- np.cov(x, rowvar=1)
- np.cov(y, rowvar=0)
+ np.cov(x, rowvar=True)
+ np.cov(y, rowvar=False)
assert_array_equal(x, y)
def test_mem_digitize(self):
def test_poly1d_nan_roots(self):
# Ticket #396
- p = np.poly1d([np.nan, np.nan, 1], r=0)
+ p = np.poly1d([np.nan, np.nan, 1], r=False)
assert_raises(np.linalg.LinAlgError, getattr, p, "r")
def test_mem_polymul(self):
assert_(b.shape[axis] == 1)
assert_(np.squeeze(b).shape == s)
- def test_deprecations(self):
- # 2017-05-17, 1.13.0
+ def test_axis_tuple(self):
+ a = np.empty((3, 3, 3))
+ assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3)
+ assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1)
+ assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1)
+ assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3)
+
+ def test_axis_out_of_range(self):
s = (2, 3, 4, 5)
a = np.empty(s)
- with warnings.catch_warnings():
- warnings.simplefilter("always")
- assert_warns(DeprecationWarning, expand_dims, a, -6)
- assert_warns(DeprecationWarning, expand_dims, a, 5)
+ assert_raises(np.AxisError, expand_dims, a, -6)
+ assert_raises(np.AxisError, expand_dims, a, 5)
+
+ a = np.empty((3, 3, 3))
+ assert_raises(np.AxisError, expand_dims, a, (0, -6))
+ assert_raises(np.AxisError, expand_dims, a, (0, 5))
+
+ def test_repeated_axis(self):
+ a = np.empty((3, 3, 3))
+ assert_raises(ValueError, expand_dims, a, axis=(1, 1))
def test_subclasses(self):
a = np.arange(10).reshape((2, 5))
assert_equal(res, tgt)
assert_equal(out, tgt)
- a = a.astype(np.complex)
+ a = a.astype(np.complex_)
with assert_raises(TypeError):
ufl.isposinf(a)
assert_equal(res, tgt)
assert_equal(out, tgt)
- a = a.astype(np.complex)
+ a = a.astype(np.complex_)
with assert_raises(TypeError):
ufl.isneginf(a)
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
- in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
+ in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise.
Examples
--------
'G'
"""
- typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char
- for t in typechars]
- intersection = [t for t in typecodes if t in typeset]
+ typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char
+ for t in typechars)
+ intersection = set(t for t in typecodes if t in typeset)
if not intersection:
return default
if 'F' in intersection and 'd' in intersection:
return 'D'
- l = [(_typecodes_by_elsize.index(t), t) for t in intersection]
- l.sort()
- return l[0][1]
+ return min(intersection, key=_typecodes_by_elsize.index)
def _asfarray_dispatcher(a, dtype=None):
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
+
+ .. versionadded:: 1.13
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
+
+ .. versionadded:: 1.17
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
+
+ .. versionadded:: 1.17
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
+
+ .. versionadded:: 1.17
- .. versionadded:: 1.13
+
Returns
-------
if kind in ('module', 'object'):
# don't show modules or objects
continue
- ok = True
doc = docstring.lower()
- for w in whats:
- if w not in doc:
- ok = False
- break
- if ok:
+ if all(w in doc for w in whats):
found.append(name)
# Relevance sort
if hasattr(item, x)]
return members
-#-----------------------------------------------------------------------------
-
-# The following SafeEval class and company are adapted from Michael Spencer's
-# ASPN Python Cookbook recipe: https://code.activestate.com/recipes/364469/
-#
-# Accordingly it is mostly Copyright 2006 by Michael Spencer.
-# The recipe, like most of the other ASPN Python Cookbook recipes was made
-# available under the Python license.
-# https://en.wikipedia.org/wiki/Python_License
-
-# It has been modified to:
-# * handle unary -/+
-# * support True/False/None
-# * raise SyntaxError instead of a custom exception.
-
-class SafeEval(object):
- """
- Object to evaluate constant string expressions.
-
- This includes strings with lists, dicts and tuples using the abstract
- syntax tree created by ``compiler.parse``.
-
- .. deprecated:: 1.10.0
-
- See Also
- --------
- safe_eval
-
- """
- def __init__(self):
- # 2014-10-15, 1.10
- warnings.warn("SafeEval is deprecated in 1.10 and will be removed.",
- DeprecationWarning, stacklevel=2)
-
- def visit(self, node):
- cls = node.__class__
- meth = getattr(self, 'visit' + cls.__name__, self.default)
- return meth(node)
-
- def default(self, node):
- raise SyntaxError("Unsupported source construct: %s"
- % node.__class__)
-
- def visitExpression(self, node):
- return self.visit(node.body)
-
- def visitNum(self, node):
- return node.n
-
- def visitStr(self, node):
- return node.s
-
- def visitBytes(self, node):
- return node.s
-
- def visitDict(self, node,**kw):
- return dict([(self.visit(k), self.visit(v))
- for k, v in zip(node.keys, node.values)])
-
- def visitTuple(self, node):
- return tuple([self.visit(i) for i in node.elts])
-
- def visitList(self, node):
- return [self.visit(i) for i in node.elts]
-
- def visitUnaryOp(self, node):
- import ast
- if isinstance(node.op, ast.UAdd):
- return +self.visit(node.operand)
- elif isinstance(node.op, ast.USub):
- return -self.visit(node.operand)
- else:
- raise SyntaxError("Unknown unary op: %r" % node.op)
-
- def visitName(self, node):
- if node.id == 'False':
- return False
- elif node.id == 'True':
- return True
- elif node.id == 'None':
- return None
- else:
- raise SyntaxError("Unknown name: %s" % node.id)
-
- def visitNameConstant(self, node):
- return node.value
-
def safe_eval(source):
"""
"""
-Core Linear Algebra Tools
-=========================
-
-=============== ==========================================================
-Linear algebra basics
-==========================================================================
-norm Vector or matrix norm
-inv Inverse of a square matrix
-solve Solve a linear system of equations
-det Determinant of a square matrix
-slogdet Logarithm of the determinant of a square matrix
-lstsq Solve linear least-squares problem
-pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
- value decomposition
-matrix_power Integer power of a square matrix
-matrix_rank Calculate matrix rank using an SVD-based method
-=============== ==========================================================
-
-=============== ==========================================================
-Eigenvalues and decompositions
-==========================================================================
-eig Eigenvalues and vectors of a square matrix
-eigh Eigenvalues and eigenvectors of a Hermitian matrix
-eigvals Eigenvalues of a square matrix
-eigvalsh Eigenvalues of a Hermitian matrix
-qr QR decomposition of a matrix
-svd Singular value decomposition of a matrix
-cholesky Cholesky decomposition of a matrix
-=============== ==========================================================
-
-=============== ==========================================================
-Tensor operations
-==========================================================================
-tensorsolve Solve a linear tensor equation
-tensorinv Calculate an inverse of a tensor
-=============== ==========================================================
-
-=============== ==========================================================
+``numpy.linalg``
+================
+
+The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient
+low level implementations of standard linear algebra algorithms. Those
+libraries may be provided by NumPy itself using C versions of a subset of their
+reference implementations but, when possible, highly optimized libraries that
+take advantage of specialized processor functionality are preferred. Examples
+of such libraries are OpenBLAS, MKL (TM), and ATLAS. Because those libraries
+are multithreaded and processor dependent, environmental variables and external
+packages such as threadpoolctl may be needed to control the number of threads
+or specify the processor architecture.
+
+- OpenBLAS: https://www.openblas.net/
+- threadpoolctl: https://github.com/joblib/threadpoolctl
+
+Please note that the most-used linear algebra functions in NumPy are present in
+the main ``numpy`` namespace rather than in ``numpy.linalg``. There are:
+``dot``, ``vdot``, ``inner``, ``outer``, ``matmul``, ``tensordot``, ``einsum``,
+``einsum_path`` and ``kron``.
+
+Functions present in numpy.linalg are listed below.
+
+
+Matrix and vector products
+--------------------------
+
+ multi_dot
+ matrix_power
+
+Decompositions
+--------------
+
+ cholesky
+ qr
+ svd
+
+Matrix eigenvalues
+------------------
+
+ eig
+ eigh
+ eigvals
+ eigvalsh
+
+Norms and other numbers
+-----------------------
+
+ norm
+ cond
+ det
+ matrix_rank
+ slogdet
+
+Solving equations and inverting matrices
+----------------------------------------
+
+ solve
+ tensorsolve
+ lstsq
+ inv
+ pinv
+ tensorinv
+
Exceptions
-==========================================================================
-LinAlgError Indicates a failed linear algebra operation
-=============== ==========================================================
+----------
+
+ LinAlgError
"""
from __future__ import division, absolute_import, print_function
# To get sub-modules
-from .info import __doc__
-
from .linalg import *
from numpy._pytesttester import PytestTester
+++ /dev/null
-"""\
-Core Linear Algebra Tools
--------------------------
-Linear algebra basics:
-
-- norm Vector or matrix norm
-- inv Inverse of a square matrix
-- solve Solve a linear system of equations
-- det Determinant of a square matrix
-- lstsq Solve linear least-squares problem
-- pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
- value decomposition
-- matrix_power Integer power of a square matrix
-
-Eigenvalues and decompositions:
-
-- eig Eigenvalues and vectors of a square matrix
-- eigh Eigenvalues and eigenvectors of a Hermitian matrix
-- eigvals Eigenvalues of a square matrix
-- eigvalsh Eigenvalues of a Hermitian matrix
-- qr QR decomposition of a matrix
-- svd Singular value decomposition of a matrix
-- cholesky Cholesky decomposition of a matrix
-
-Tensor operations:
-
-- tensorsolve Solve a linear tensor equation
-- tensorinv Calculate an inverse of a tensor
-
-Exceptions:
-
-- LinAlgError Indicates a failed linear algebra operation
-
-"""
-from __future__ import division, absolute_import, print_function
-
-depends = ['core']
Return an iterator for which items can be pushed back into.
Call the .pushback(item) method to have item returned as the next
- value of .next().
+ value of next().
"""
def __init__(self, iterable):
object.__init__(self)
#include "Python.h"
+#include "numpy/npy_common.h"
+#include "npy_cblas.h"
#undef c_abs
#include "f2c.h"
info: Number of the invalid parameter.
*/
-int xerbla_(char *srname, integer *info)
+CBLAS_INT BLAS_FUNC(xerbla)(char *srname, CBLAS_INT *info)
{
static const char format[] = "On entry to %.*s" \
" parameter number %d had an illegal value";
#ifdef WITH_THREAD
save = PyGILState_Ensure();
#endif
- PyOS_snprintf(buf, sizeof(buf), format, len, srname, *info);
+ PyOS_snprintf(buf, sizeof(buf), format, len, srname, (int)*info);
PyErr_SetString(PyExc_ValueError, buf);
#ifdef WITH_THREAD
PyGILState_Release(save);
#include "Python.h"
#include "numpy/arrayobject.h"
+#include "npy_cblas.h"
+
+
+#define FNAME(name) BLAS_FUNC(name)
+
+typedef CBLAS_INT fortran_int;
+
+#ifdef HAVE_BLAS_ILP64
+
+#if NPY_BITSOF_SHORT == 64
+#define FINT_PYFMT "h"
+#elif NPY_BITSOF_INT == 64
+#define FINT_PYFMT "i"
+#elif NPY_BITSOF_LONG == 64
+#define FINT_PYFMT "l"
+#elif NPY_BITSOF_LONGLONG == 64
+#define FINT_PYFMT "L"
+#else
+#error No compatible 64-bit integer size. \
+ Please contact NumPy maintainers and give detailed information about your \
+ compiler and platform, or set NPY_USE_BLAS64_=0
+#endif
-#ifdef NO_APPEND_FORTRAN
-# define FNAME(x) x
#else
-# define FNAME(x) x##_
+#define FINT_PYFMT "i"
#endif
typedef struct { float r, i; } f2c_complex;
typedef struct { double r, i; } f2c_doublecomplex;
/* typedef long int (*L_fp)(); */
-extern int FNAME(dgelsd)(int *m, int *n, int *nrhs,
- double a[], int *lda, double b[], int *ldb,
- double s[], double *rcond, int *rank,
- double work[], int *lwork, int iwork[], int *info);
+extern fortran_int FNAME(dgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ double a[], fortran_int *lda, double b[], fortran_int *ldb,
+ double s[], double *rcond, fortran_int *rank,
+ double work[], fortran_int *lwork, fortran_int iwork[], fortran_int *info);
-extern int FNAME(zgelsd)(int *m, int *n, int *nrhs,
- f2c_doublecomplex a[], int *lda,
- f2c_doublecomplex b[], int *ldb,
- double s[], double *rcond, int *rank,
- f2c_doublecomplex work[], int *lwork,
- double rwork[], int iwork[], int *info);
+extern fortran_int FNAME(zgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ f2c_doublecomplex a[], fortran_int *lda,
+ f2c_doublecomplex b[], fortran_int *ldb,
+ double s[], double *rcond, fortran_int *rank,
+ f2c_doublecomplex work[], fortran_int *lwork,
+ double rwork[], fortran_int iwork[], fortran_int *info);
-extern int FNAME(dgeqrf)(int *m, int *n, double a[], int *lda,
+extern fortran_int FNAME(dgeqrf)(fortran_int *m, fortran_int *n, double a[], fortran_int *lda,
double tau[], double work[],
- int *lwork, int *info);
+ fortran_int *lwork, fortran_int *info);
-extern int FNAME(zgeqrf)(int *m, int *n, f2c_doublecomplex a[], int *lda,
+extern fortran_int FNAME(zgeqrf)(fortran_int *m, fortran_int *n, f2c_doublecomplex a[], fortran_int *lda,
f2c_doublecomplex tau[], f2c_doublecomplex work[],
- int *lwork, int *info);
+ fortran_int *lwork, fortran_int *info);
-extern int FNAME(dorgqr)(int *m, int *n, int *k, double a[], int *lda,
+extern fortran_int FNAME(dorgqr)(fortran_int *m, fortran_int *n, fortran_int *k, double a[], fortran_int *lda,
double tau[], double work[],
- int *lwork, int *info);
+ fortran_int *lwork, fortran_int *info);
-extern int FNAME(zungqr)(int *m, int *n, int *k, f2c_doublecomplex a[],
- int *lda, f2c_doublecomplex tau[],
- f2c_doublecomplex work[], int *lwork, int *info);
+extern fortran_int FNAME(zungqr)(fortran_int *m, fortran_int *n, fortran_int *k, f2c_doublecomplex a[],
+ fortran_int *lda, f2c_doublecomplex tau[],
+ f2c_doublecomplex work[], fortran_int *lwork, fortran_int *info);
-extern int FNAME(xerbla)(char *srname, int *info);
+extern fortran_int FNAME(xerbla)(char *srname, fortran_int *info);
static PyObject *LapackError;
#define FDATA(p) ((float *) PyArray_DATA((PyArrayObject *)p))
#define CDATA(p) ((f2c_complex *) PyArray_DATA((PyArrayObject *)p))
#define ZDATA(p) ((f2c_doublecomplex *) PyArray_DATA((PyArrayObject *)p))
-#define IDATA(p) ((int *) PyArray_DATA((PyArrayObject *)p))
+#define IDATA(p) ((fortran_int *) PyArray_DATA((PyArrayObject *)p))
static PyObject *
lapack_lite_dgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m;
- int n;
- int nrhs;
+ fortran_int lapack_lite_status;
+ fortran_int m;
+ fortran_int n;
+ fortran_int nrhs;
PyObject *a;
- int lda;
+ fortran_int lda;
PyObject *b;
- int ldb;
+ fortran_int ldb;
PyObject *s;
double rcond;
- int rank;
+ fortran_int rank;
PyObject *work;
PyObject *iwork;
- int lwork;
- int info;
- TRY(PyArg_ParseTuple(args,"iiiOiOiOdiOiOi:dgelsd",
+ fortran_int lwork;
+ fortran_int info;
+
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT FINT_PYFMT "O" FINT_PYFMT "O"
+ FINT_PYFMT "O" "d" FINT_PYFMT "O" FINT_PYFMT "O"
+ FINT_PYFMT ":dgelsd"),
&m,&n,&nrhs,&a,&lda,&b,&ldb,&s,&rcond,
&rank,&work,&lwork,&iwork,&info));
TRY(check_object(b,NPY_DOUBLE,"b","NPY_DOUBLE","dgelsd"));
TRY(check_object(s,NPY_DOUBLE,"s","NPY_DOUBLE","dgelsd"));
TRY(check_object(work,NPY_DOUBLE,"work","NPY_DOUBLE","dgelsd"));
+#ifndef NPY_UMATH_USE_BLAS64_
TRY(check_object(iwork,NPY_INT,"iwork","NPY_INT","dgelsd"));
+#else
+ TRY(check_object(iwork,NPY_INT64,"iwork","NPY_INT64","dgelsd"));
+#endif
lapack_lite_status =
FNAME(dgelsd)(&m,&n,&nrhs,DDATA(a),&lda,DDATA(b),&ldb,
return NULL;
}
- return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:d,s:i,s:i,s:i}","dgelsd_",
- lapack_lite_status,"m",m,"n",n,"nrhs",nrhs,
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:d,s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT "}"),
+ "dgelsd_",lapack_lite_status,"m",m,"n",n,"nrhs",nrhs,
"lda",lda,"ldb",ldb,"rcond",rcond,"rank",rank,
"lwork",lwork,"info",info);
}
static PyObject *
lapack_lite_dgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m, n, lwork;
+ fortran_int lapack_lite_status;
+ fortran_int m, n, lwork;
PyObject *a, *tau, *work;
- int lda;
- int info;
+ fortran_int lda;
+ fortran_int info;
- TRY(PyArg_ParseTuple(args,"iiOiOOii:dgeqrf",&m,&n,&a,&lda,&tau,&work,&lwork,&info));
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT "O" FINT_PYFMT "OO"
+ FINT_PYFMT FINT_PYFMT ":dgeqrf"),
+ &m,&n,&a,&lda,&tau,&work,&lwork,&info));
/* check objects and convert to right storage order */
TRY(check_object(a,NPY_DOUBLE,"a","NPY_DOUBLE","dgeqrf"));
return NULL;
}
- return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","dgeqrf_",
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT "}"),
+ "dgeqrf_",
lapack_lite_status,"m",m,"n",n,"lda",lda,
"lwork",lwork,"info",info);
}
static PyObject *
lapack_lite_dorgqr(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m, n, k, lwork;
+ fortran_int lapack_lite_status;
+ fortran_int m, n, k, lwork;
PyObject *a, *tau, *work;
- int lda;
- int info;
-
- TRY(PyArg_ParseTuple(args,"iiiOiOOii:dorgqr", &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info));
+ fortran_int lda;
+ fortran_int info;
+
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT FINT_PYFMT "O"
+ FINT_PYFMT "OO" FINT_PYFMT FINT_PYFMT
+ ":dorgqr"),
+ &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info));
TRY(check_object(a,NPY_DOUBLE,"a","NPY_DOUBLE","dorgqr"));
TRY(check_object(tau,NPY_DOUBLE,"tau","NPY_DOUBLE","dorgqr"));
TRY(check_object(work,NPY_DOUBLE,"work","NPY_DOUBLE","dorgqr"));
static PyObject *
lapack_lite_zgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m;
- int n;
- int nrhs;
+ fortran_int lapack_lite_status;
+ fortran_int m;
+ fortran_int n;
+ fortran_int nrhs;
PyObject *a;
- int lda;
+ fortran_int lda;
PyObject *b;
- int ldb;
+ fortran_int ldb;
PyObject *s;
double rcond;
- int rank;
+ fortran_int rank;
PyObject *work;
- int lwork;
+ fortran_int lwork;
PyObject *rwork;
PyObject *iwork;
- int info;
- TRY(PyArg_ParseTuple(args,"iiiOiOiOdiOiOOi:zgelsd",
+ fortran_int info;
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT FINT_PYFMT "O" FINT_PYFMT
+ "O" FINT_PYFMT "Od" FINT_PYFMT "O" FINT_PYFMT
+ "OO" FINT_PYFMT ":zgelsd"),
&m,&n,&nrhs,&a,&lda,&b,&ldb,&s,&rcond,
&rank,&work,&lwork,&rwork,&iwork,&info));
TRY(check_object(s,NPY_DOUBLE,"s","NPY_DOUBLE","zgelsd"));
TRY(check_object(work,NPY_CDOUBLE,"work","NPY_CDOUBLE","zgelsd"));
TRY(check_object(rwork,NPY_DOUBLE,"rwork","NPY_DOUBLE","zgelsd"));
+#ifndef NPY_UMATH_USE_BLAS64_
TRY(check_object(iwork,NPY_INT,"iwork","NPY_INT","zgelsd"));
+#else
+ TRY(check_object(iwork,NPY_INT64,"iwork","NPY_INT64","zgelsd"));
+#endif
lapack_lite_status =
FNAME(zgelsd)(&m,&n,&nrhs,ZDATA(a),&lda,ZDATA(b),&ldb,DDATA(s),&rcond,
return NULL;
}
- return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i}","zgelsd_",
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ "}"),
+ "zgelsd_",
lapack_lite_status,"m",m,"n",n,"nrhs",nrhs,"lda",lda,
"ldb",ldb,"rank",rank,"lwork",lwork,"info",info);
}
static PyObject *
lapack_lite_zgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m, n, lwork;
+ fortran_int lapack_lite_status;
+ fortran_int m, n, lwork;
PyObject *a, *tau, *work;
- int lda;
- int info;
+ fortran_int lda;
+ fortran_int info;
- TRY(PyArg_ParseTuple(args,"iiOiOOii:zgeqrf",&m,&n,&a,&lda,&tau,&work,&lwork,&info));
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT "O" FINT_PYFMT "OO"
+ FINT_PYFMT "" FINT_PYFMT ":zgeqrf"),
+ &m,&n,&a,&lda,&tau,&work,&lwork,&info));
/* check objects and convert to right storage order */
TRY(check_object(a,NPY_CDOUBLE,"a","NPY_CDOUBLE","zgeqrf"));
return NULL;
}
- return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","zgeqrf_",lapack_lite_status,"m",m,"n",n,"lda",lda,"lwork",lwork,"info",info);
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT "}"),
+ "zgeqrf_",lapack_lite_status,"m",m,"n",n,"lda",lda,"lwork",lwork,"info",info);
}
static PyObject *
lapack_lite_zungqr(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m, n, k, lwork;
+ fortran_int lapack_lite_status;
+ fortran_int m, n, k, lwork;
PyObject *a, *tau, *work;
- int lda;
- int info;
-
- TRY(PyArg_ParseTuple(args,"iiiOiOOii:zungqr", &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info));
+ fortran_int lda;
+ fortran_int info;
+
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT FINT_PYFMT "O"
+ FINT_PYFMT "OO" FINT_PYFMT "" FINT_PYFMT
+ ":zungqr"),
+ &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info));
TRY(check_object(a,NPY_CDOUBLE,"a","NPY_CDOUBLE","zungqr"));
TRY(check_object(tau,NPY_CDOUBLE,"tau","NPY_CDOUBLE","zungqr"));
TRY(check_object(work,NPY_CDOUBLE,"work","NPY_CDOUBLE","zungqr"));
return NULL;
}
- return Py_BuildValue("{s:i,s:i}","zungqr_",lapack_lite_status,
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT "}"),
+ "zungqr_",lapack_lite_status,
"info",info);
}
static PyObject *
lapack_lite_xerbla(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int info = -1;
+ fortran_int info = -1;
NPY_BEGIN_THREADS_DEF;
NPY_BEGIN_THREADS;
else:
return cast_arrays
-def _assertRank2(*arrays):
+def _assert_2d(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
-def _assertRankAtLeast2(*arrays):
+def _assert_stacked_2d(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
-def _assertNdSquareness(*arrays):
+def _assert_stacked_square(*arrays):
for a in arrays:
m, n = a.shape[-2:]
if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
-def _assertFinite(*arrays):
+def _assert_finite(*arrays):
for a in arrays:
- if not (isfinite(a).all()):
+ if not isfinite(a).all():
raise LinAlgError("Array must not contain infs or NaNs")
-def _isEmpty2d(arr):
+def _is_empty_2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
-def _assertNoEmpty2d(*arrays):
- for a in arrays:
- if _isEmpty2d(a):
- raise LinAlgError("Arrays cannot be empty")
def transpose(a):
"""
"""
a, _ = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
"""
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
"""
a = asanyarray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
try:
n = operator.index(n)
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
----------
a : array_like, shape (M, N)
Matrix to be factored.
- mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
+ mode : {'reduced', 'complete', 'r', 'raw'}, optional
If K = min(M, N), then
* 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
* 'complete' : returns q, r with dimensions (M, M), (M, N)
* 'r' : returns r only with dimensions (K, N)
* 'raw' : returns h, tau with dimensions (N, M), (K,)
- * 'full' : alias of 'reduced', deprecated
- * 'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced', and to
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
- >>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
- >>> # But only triu parts are guaranteed equal when mode='economic'
- >>> np.allclose(r, np.triu(r3[:6,:6], k=0))
- True
Example illustrating a common use of `qr`: solving of least squares
problems
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
- _assertRank2(a)
+ _assert_2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
"""
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- _assertFinite(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ _assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
"""
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- _assertFinite(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ _assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
s = abs(s)
return s
- _assertRankAtLeast2(a)
+ _assert_stacked_2d(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746 # may vary
- >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
+ >>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False))
0.70710678118654746 # may vary
"""
x = asarray(x) # in case we have a matrix
- _assertNoEmpty2d(x)
+ if _is_empty_2d(x):
+ raise LinAlgError("cond is not defined on empty arrays")
if p is None or p == 2 or p == -2:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
else:
# Call inv(x) ignoring errors. The result array will
# contain nans in the entries where inversion failed.
- _assertRankAtLeast2(x)
- _assertNdSquareness(x)
+ _assert_stacked_2d(x)
+ _assert_stacked_square(x)
t, result_t = _commonType(x)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(all='ignore'):
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
- if _isEmpty2d(a):
+ if _is_empty_2d(a):
m, n = a.shape[-2:]
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
"""
a = asarray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
"""
a = asarray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
- _assertRank2(a, b)
+ _assert_2d(a, b)
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
"""
y = moveaxis(x, (row_axis, col_axis), (-2, -1))
- result = op(svd(y, compute_uv=0), axis=-1)
+ result = op(svd(y, compute_uv=False), axis=-1)
return result
Parameters
----------
x : array_like
- Input array. If `axis` is None, `x` must be 1-D or 2-D.
+ Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
+ is None. If both `axis` and `ord` are None, the 2-norm of
+ ``x.ravel`` will be returned.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
- `inf` object.
- axis : {int, 2-tuple of ints, None}, optional
+ `inf` object. The default is None.
+ axis : {None, int, 2-tuple of ints}, optional.
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
- is 1-D) or a matrix norm (when `x` is 2-D) is returned.
+ is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
+ is None.
.. versionadded:: 1.8.0
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
- _assertRank2(*arrays)
+ _assert_2d(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
]
all_sources = config.paths(lapack_lite_src)
- lapack_info = get_info('lapack_opt', 0) # and {}
+ if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
+ lapack_info = get_info('lapack_ilp64_opt', 2)
+ else:
+ lapack_info = get_info('lapack_opt', 0) # and {}
def get_lapack_lite_sources(ext, build_dir):
if not lapack_info:
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, suppress_warnings,
- assert_raises_regex,
+ assert_raises_regex, HAS_LAPACK64,
)
+from numpy.testing._private.utils import requires_memory
def consistent_subclass(out, in_):
class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
- u, s, vt = linalg.svd(a, 0)
+ u, s, vt = linalg.svd(a, False)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
- u, s, vt = linalg.svd(a, 0, hermitian=True)
+ u, s, vt = linalg.svd(a, False, hermitian=True)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
def do(self, a, b, tags):
arr = np.asarray(a)
m, n = arr.shape
- u, s, vt = linalg.svd(a, 0)
+ u, s, vt = linalg.svd(a, False)
x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1)
if m == 0:
assert_((x == 0).all())
arr = np.array([[1, -2], [2, 5]], dtype='float16')
with assert_raises_regex(TypeError, "unsupported in linalg"):
linalg.cholesky(arr)
+
+
+@pytest.mark.slow
+@pytest.mark.xfail(not HAS_LAPACK64, run=False,
+ reason="Numpy not compiled with 64-bit BLAS/LAPACK")
+@requires_memory(free_bytes=16e9)
+def test_blas64_dot():
+ n = 2**32
+ a = np.zeros([1, n], dtype=np.float32)
+ b = np.ones([1, 1], dtype=np.float32)
+ a[0,-1] = 1
+ c = np.dot(b, a)
+ assert_equal(c[0,-1], 1)
#include "npy_config.h"
+#include "npy_cblas.h"
+
#include <stddef.h>
#include <stdio.h>
#include <assert.h>
*****************************************************************************
*/
-#ifdef NO_APPEND_FORTRAN
-# define FNAME(x) x
-#else
-# define FNAME(x) x##_
-#endif
+#define FNAME(x) BLAS_FUNC(x)
+
+typedef CBLAS_INT fortran_int;
typedef struct { float r, i; } f2c_complex;
typedef struct { double r, i; } f2c_doublecomplex;
/* typedef long int (*L_fp)(); */
-extern int
-FNAME(sgeev)(char *jobvl, char *jobvr, int *n,
- float a[], int *lda, float wr[], float wi[],
- float vl[], int *ldvl, float vr[], int *ldvr,
- float work[], int lwork[],
- int *info);
-extern int
-FNAME(dgeev)(char *jobvl, char *jobvr, int *n,
- double a[], int *lda, double wr[], double wi[],
- double vl[], int *ldvl, double vr[], int *ldvr,
- double work[], int lwork[],
- int *info);
-extern int
-FNAME(cgeev)(char *jobvl, char *jobvr, int *n,
- f2c_doublecomplex a[], int *lda,
+typedef float fortran_real;
+typedef double fortran_doublereal;
+typedef f2c_complex fortran_complex;
+typedef f2c_doublecomplex fortran_doublecomplex;
+
+extern fortran_int
+FNAME(sgeev)(char *jobvl, char *jobvr, fortran_int *n,
+ float a[], fortran_int *lda, float wr[], float wi[],
+ float vl[], fortran_int *ldvl, float vr[], fortran_int *ldvr,
+ float work[], fortran_int lwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(dgeev)(char *jobvl, char *jobvr, fortran_int *n,
+ double a[], fortran_int *lda, double wr[], double wi[],
+ double vl[], fortran_int *ldvl, double vr[], fortran_int *ldvr,
+ double work[], fortran_int lwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(cgeev)(char *jobvl, char *jobvr, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
f2c_doublecomplex w[],
- f2c_doublecomplex vl[], int *ldvl,
- f2c_doublecomplex vr[], int *ldvr,
- f2c_doublecomplex work[], int *lwork,
+ f2c_doublecomplex vl[], fortran_int *ldvl,
+ f2c_doublecomplex vr[], fortran_int *ldvr,
+ f2c_doublecomplex work[], fortran_int *lwork,
double rwork[],
- int *info);
-extern int
-FNAME(zgeev)(char *jobvl, char *jobvr, int *n,
- f2c_doublecomplex a[], int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(zgeev)(char *jobvl, char *jobvr, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
f2c_doublecomplex w[],
- f2c_doublecomplex vl[], int *ldvl,
- f2c_doublecomplex vr[], int *ldvr,
- f2c_doublecomplex work[], int *lwork,
+ f2c_doublecomplex vl[], fortran_int *ldvl,
+ f2c_doublecomplex vr[], fortran_int *ldvr,
+ f2c_doublecomplex work[], fortran_int *lwork,
double rwork[],
- int *info);
-
-extern int
-FNAME(ssyevd)(char *jobz, char *uplo, int *n,
- float a[], int *lda, float w[], float work[],
- int *lwork, int iwork[], int *liwork,
- int *info);
-extern int
-FNAME(dsyevd)(char *jobz, char *uplo, int *n,
- double a[], int *lda, double w[], double work[],
- int *lwork, int iwork[], int *liwork,
- int *info);
-extern int
-FNAME(cheevd)(char *jobz, char *uplo, int *n,
- f2c_complex a[], int *lda,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(ssyevd)(char *jobz, char *uplo, fortran_int *n,
+ float a[], fortran_int *lda, float w[], float work[],
+ fortran_int *lwork, fortran_int iwork[], fortran_int *liwork,
+ fortran_int *info);
+extern fortran_int
+FNAME(dsyevd)(char *jobz, char *uplo, fortran_int *n,
+ double a[], fortran_int *lda, double w[], double work[],
+ fortran_int *lwork, fortran_int iwork[], fortran_int *liwork,
+ fortran_int *info);
+extern fortran_int
+FNAME(cheevd)(char *jobz, char *uplo, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
float w[], f2c_complex work[],
- int *lwork, float rwork[], int *lrwork, int iwork[],
- int *liwork,
- int *info);
-extern int
-FNAME(zheevd)(char *jobz, char *uplo, int *n,
- f2c_doublecomplex a[], int *lda,
+ fortran_int *lwork, float rwork[], fortran_int *lrwork, fortran_int iwork[],
+ fortran_int *liwork,
+ fortran_int *info);
+extern fortran_int
+FNAME(zheevd)(char *jobz, char *uplo, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
double w[], f2c_doublecomplex work[],
- int *lwork, double rwork[], int *lrwork, int iwork[],
- int *liwork,
- int *info);
-
-extern int
-FNAME(sgelsd)(int *m, int *n, int *nrhs,
- float a[], int *lda, float b[], int *ldb,
- float s[], float *rcond, int *rank,
- float work[], int *lwork, int iwork[],
- int *info);
-extern int
-FNAME(dgelsd)(int *m, int *n, int *nrhs,
- double a[], int *lda, double b[], int *ldb,
- double s[], double *rcond, int *rank,
- double work[], int *lwork, int iwork[],
- int *info);
-extern int
-FNAME(cgelsd)(int *m, int *n, int *nrhs,
- f2c_complex a[], int *lda,
- f2c_complex b[], int *ldb,
- float s[], float *rcond, int *rank,
- f2c_complex work[], int *lwork,
- float rwork[], int iwork[],
- int *info);
-extern int
-FNAME(zgelsd)(int *m, int *n, int *nrhs,
- f2c_doublecomplex a[], int *lda,
- f2c_doublecomplex b[], int *ldb,
- double s[], double *rcond, int *rank,
- f2c_doublecomplex work[], int *lwork,
- double rwork[], int iwork[],
- int *info);
-
-extern int
-FNAME(sgesv)(int *n, int *nrhs,
- float a[], int *lda,
- int ipiv[],
- float b[], int *ldb,
- int *info);
-extern int
-FNAME(dgesv)(int *n, int *nrhs,
- double a[], int *lda,
- int ipiv[],
- double b[], int *ldb,
- int *info);
-extern int
-FNAME(cgesv)(int *n, int *nrhs,
- f2c_complex a[], int *lda,
- int ipiv[],
- f2c_complex b[], int *ldb,
- int *info);
-extern int
-FNAME(zgesv)(int *n, int *nrhs,
- f2c_doublecomplex a[], int *lda,
- int ipiv[],
- f2c_doublecomplex b[], int *ldb,
- int *info);
-
-extern int
-FNAME(sgetrf)(int *m, int *n,
- float a[], int *lda,
- int ipiv[],
- int *info);
-extern int
-FNAME(dgetrf)(int *m, int *n,
- double a[], int *lda,
- int ipiv[],
- int *info);
-extern int
-FNAME(cgetrf)(int *m, int *n,
- f2c_complex a[], int *lda,
- int ipiv[],
- int *info);
-extern int
-FNAME(zgetrf)(int *m, int *n,
- f2c_doublecomplex a[], int *lda,
- int ipiv[],
- int *info);
-
-extern int
-FNAME(spotrf)(char *uplo, int *n,
- float a[], int *lda,
- int *info);
-extern int
-FNAME(dpotrf)(char *uplo, int *n,
- double a[], int *lda,
- int *info);
-extern int
-FNAME(cpotrf)(char *uplo, int *n,
- f2c_complex a[], int *lda,
- int *info);
-extern int
-FNAME(zpotrf)(char *uplo, int *n,
- f2c_doublecomplex a[], int *lda,
- int *info);
-
-extern int
-FNAME(sgesdd)(char *jobz, int *m, int *n,
- float a[], int *lda, float s[], float u[],
- int *ldu, float vt[], int *ldvt, float work[],
- int *lwork, int iwork[], int *info);
-extern int
-FNAME(dgesdd)(char *jobz, int *m, int *n,
- double a[], int *lda, double s[], double u[],
- int *ldu, double vt[], int *ldvt, double work[],
- int *lwork, int iwork[], int *info);
-extern int
-FNAME(cgesdd)(char *jobz, int *m, int *n,
- f2c_complex a[], int *lda,
- float s[], f2c_complex u[], int *ldu,
- f2c_complex vt[], int *ldvt,
- f2c_complex work[], int *lwork,
- float rwork[], int iwork[], int *info);
-extern int
-FNAME(zgesdd)(char *jobz, int *m, int *n,
- f2c_doublecomplex a[], int *lda,
- double s[], f2c_doublecomplex u[], int *ldu,
- f2c_doublecomplex vt[], int *ldvt,
- f2c_doublecomplex work[], int *lwork,
- double rwork[], int iwork[], int *info);
-
-extern int
-FNAME(spotrs)(char *uplo, int *n, int *nrhs,
- float a[], int *lda,
- float b[], int *ldb,
- int *info);
-extern int
-FNAME(dpotrs)(char *uplo, int *n, int *nrhs,
- double a[], int *lda,
- double b[], int *ldb,
- int *info);
-extern int
-FNAME(cpotrs)(char *uplo, int *n, int *nrhs,
- f2c_complex a[], int *lda,
- f2c_complex b[], int *ldb,
- int *info);
-extern int
-FNAME(zpotrs)(char *uplo, int *n, int *nrhs,
- f2c_doublecomplex a[], int *lda,
- f2c_doublecomplex b[], int *ldb,
- int *info);
-
-extern int
-FNAME(spotri)(char *uplo, int *n,
- float a[], int *lda,
- int *info);
-extern int
-FNAME(dpotri)(char *uplo, int *n,
- double a[], int *lda,
- int *info);
-extern int
-FNAME(cpotri)(char *uplo, int *n,
- f2c_complex a[], int *lda,
- int *info);
-extern int
-FNAME(zpotri)(char *uplo, int *n,
- f2c_doublecomplex a[], int *lda,
- int *info);
-
-extern int
-FNAME(scopy)(int *n,
- float *sx, int *incx,
- float *sy, int *incy);
-extern int
-FNAME(dcopy)(int *n,
- double *sx, int *incx,
- double *sy, int *incy);
-extern int
-FNAME(ccopy)(int *n,
- f2c_complex *sx, int *incx,
- f2c_complex *sy, int *incy);
-extern int
-FNAME(zcopy)(int *n,
- f2c_doublecomplex *sx, int *incx,
- f2c_doublecomplex *sy, int *incy);
+ fortran_int *lwork, double rwork[], fortran_int *lrwork, fortran_int iwork[],
+ fortran_int *liwork,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(sgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ float a[], fortran_int *lda, float b[], fortran_int *ldb,
+ float s[], float *rcond, fortran_int *rank,
+ float work[], fortran_int *lwork, fortran_int iwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(dgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ double a[], fortran_int *lda, double b[], fortran_int *ldb,
+ double s[], double *rcond, fortran_int *rank,
+ double work[], fortran_int *lwork, fortran_int iwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(cgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ f2c_complex a[], fortran_int *lda,
+ f2c_complex b[], fortran_int *ldb,
+ float s[], float *rcond, fortran_int *rank,
+ f2c_complex work[], fortran_int *lwork,
+ float rwork[], fortran_int iwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(zgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ f2c_doublecomplex a[], fortran_int *lda,
+ f2c_doublecomplex b[], fortran_int *ldb,
+ double s[], double *rcond, fortran_int *rank,
+ f2c_doublecomplex work[], fortran_int *lwork,
+ double rwork[], fortran_int iwork[],
+ fortran_int *info);
+
+extern fortran_int
+FNAME(sgesv)(fortran_int *n, fortran_int *nrhs,
+ float a[], fortran_int *lda,
+ fortran_int ipiv[],
+ float b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(dgesv)(fortran_int *n, fortran_int *nrhs,
+ double a[], fortran_int *lda,
+ fortran_int ipiv[],
+ double b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(cgesv)(fortran_int *n, fortran_int *nrhs,
+ f2c_complex a[], fortran_int *lda,
+ fortran_int ipiv[],
+ f2c_complex b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(zgesv)(fortran_int *n, fortran_int *nrhs,
+ f2c_doublecomplex a[], fortran_int *lda,
+ fortran_int ipiv[],
+ f2c_doublecomplex b[], fortran_int *ldb,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(sgetrf)(fortran_int *m, fortran_int *n,
+ float a[], fortran_int *lda,
+ fortran_int ipiv[],
+ fortran_int *info);
+extern fortran_int
+FNAME(dgetrf)(fortran_int *m, fortran_int *n,
+ double a[], fortran_int *lda,
+ fortran_int ipiv[],
+ fortran_int *info);
+extern fortran_int
+FNAME(cgetrf)(fortran_int *m, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
+ fortran_int ipiv[],
+ fortran_int *info);
+extern fortran_int
+FNAME(zgetrf)(fortran_int *m, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
+ fortran_int ipiv[],
+ fortran_int *info);
+
+extern fortran_int
+FNAME(spotrf)(char *uplo, fortran_int *n,
+ float a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(dpotrf)(char *uplo, fortran_int *n,
+ double a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(cpotrf)(char *uplo, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(zpotrf)(char *uplo, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(sgesdd)(char *jobz, fortran_int *m, fortran_int *n,
+ float a[], fortran_int *lda, float s[], float u[],
+ fortran_int *ldu, float vt[], fortran_int *ldvt, float work[],
+ fortran_int *lwork, fortran_int iwork[], fortran_int *info);
+extern fortran_int
+FNAME(dgesdd)(char *jobz, fortran_int *m, fortran_int *n,
+ double a[], fortran_int *lda, double s[], double u[],
+ fortran_int *ldu, double vt[], fortran_int *ldvt, double work[],
+ fortran_int *lwork, fortran_int iwork[], fortran_int *info);
+extern fortran_int
+FNAME(cgesdd)(char *jobz, fortran_int *m, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
+ float s[], f2c_complex u[], fortran_int *ldu,
+ f2c_complex vt[], fortran_int *ldvt,
+ f2c_complex work[], fortran_int *lwork,
+ float rwork[], fortran_int iwork[], fortran_int *info);
+extern fortran_int
+FNAME(zgesdd)(char *jobz, fortran_int *m, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
+ double s[], f2c_doublecomplex u[], fortran_int *ldu,
+ f2c_doublecomplex vt[], fortran_int *ldvt,
+ f2c_doublecomplex work[], fortran_int *lwork,
+ double rwork[], fortran_int iwork[], fortran_int *info);
+
+extern fortran_int
+FNAME(spotrs)(char *uplo, fortran_int *n, fortran_int *nrhs,
+ float a[], fortran_int *lda,
+ float b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(dpotrs)(char *uplo, fortran_int *n, fortran_int *nrhs,
+ double a[], fortran_int *lda,
+ double b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(cpotrs)(char *uplo, fortran_int *n, fortran_int *nrhs,
+ f2c_complex a[], fortran_int *lda,
+ f2c_complex b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(zpotrs)(char *uplo, fortran_int *n, fortran_int *nrhs,
+ f2c_doublecomplex a[], fortran_int *lda,
+ f2c_doublecomplex b[], fortran_int *ldb,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(spotri)(char *uplo, fortran_int *n,
+ float a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(dpotri)(char *uplo, fortran_int *n,
+ double a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(cpotri)(char *uplo, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(zpotri)(char *uplo, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(scopy)(fortran_int *n,
+ float *sx, fortran_int *incx,
+ float *sy, fortran_int *incy);
+extern fortran_int
+FNAME(dcopy)(fortran_int *n,
+ double *sx, fortran_int *incx,
+ double *sy, fortran_int *incy);
+extern fortran_int
+FNAME(ccopy)(fortran_int *n,
+ f2c_complex *sx, fortran_int *incx,
+ f2c_complex *sy, fortran_int *incy);
+extern fortran_int
+FNAME(zcopy)(fortran_int *n,
+ f2c_doublecomplex *sx, fortran_int *incx,
+ f2c_doublecomplex *sy, fortran_int *incy);
extern float
-FNAME(sdot)(int *n,
- float *sx, int *incx,
- float *sy, int *incy);
+FNAME(sdot)(fortran_int *n,
+ float *sx, fortran_int *incx,
+ float *sy, fortran_int *incy);
extern double
-FNAME(ddot)(int *n,
- double *sx, int *incx,
- double *sy, int *incy);
+FNAME(ddot)(fortran_int *n,
+ double *sx, fortran_int *incx,
+ double *sy, fortran_int *incy);
extern void
-FNAME(cdotu)(f2c_complex *ret, int *n,
- f2c_complex *sx, int *incx,
- f2c_complex *sy, int *incy);
+FNAME(cdotu)(f2c_complex *ret, fortran_int *n,
+ f2c_complex *sx, fortran_int *incx,
+ f2c_complex *sy, fortran_int *incy);
extern void
-FNAME(zdotu)(f2c_doublecomplex *ret, int *n,
- f2c_doublecomplex *sx, int *incx,
- f2c_doublecomplex *sy, int *incy);
+FNAME(zdotu)(f2c_doublecomplex *ret, fortran_int *n,
+ f2c_doublecomplex *sx, fortran_int *incx,
+ f2c_doublecomplex *sy, fortran_int *incy);
extern void
-FNAME(cdotc)(f2c_complex *ret, int *n,
- f2c_complex *sx, int *incx,
- f2c_complex *sy, int *incy);
+FNAME(cdotc)(f2c_complex *ret, fortran_int *n,
+ f2c_complex *sx, fortran_int *incx,
+ f2c_complex *sy, fortran_int *incy);
extern void
-FNAME(zdotc)(f2c_doublecomplex *ret, int *n,
- f2c_doublecomplex *sx, int *incx,
- f2c_doublecomplex *sy, int *incy);
+FNAME(zdotc)(f2c_doublecomplex *ret, fortran_int *n,
+ f2c_doublecomplex *sx, fortran_int *incx,
+ f2c_doublecomplex *sy, fortran_int *incy);
-extern int
+extern fortran_int
FNAME(sgemm)(char *transa, char *transb,
- int *m, int *n, int *k,
+ fortran_int *m, fortran_int *n, fortran_int *k,
float *alpha,
- float *a, int *lda,
- float *b, int *ldb,
+ float *a, fortran_int *lda,
+ float *b, fortran_int *ldb,
float *beta,
- float *c, int *ldc);
-extern int
+ float *c, fortran_int *ldc);
+extern fortran_int
FNAME(dgemm)(char *transa, char *transb,
- int *m, int *n, int *k,
+ fortran_int *m, fortran_int *n, fortran_int *k,
double *alpha,
- double *a, int *lda,
- double *b, int *ldb,
+ double *a, fortran_int *lda,
+ double *b, fortran_int *ldb,
double *beta,
- double *c, int *ldc);
-extern int
+ double *c, fortran_int *ldc);
+extern fortran_int
FNAME(cgemm)(char *transa, char *transb,
- int *m, int *n, int *k,
+ fortran_int *m, fortran_int *n, fortran_int *k,
f2c_complex *alpha,
- f2c_complex *a, int *lda,
- f2c_complex *b, int *ldb,
+ f2c_complex *a, fortran_int *lda,
+ f2c_complex *b, fortran_int *ldb,
f2c_complex *beta,
- f2c_complex *c, int *ldc);
-extern int
+ f2c_complex *c, fortran_int *ldc);
+extern fortran_int
FNAME(zgemm)(char *transa, char *transb,
- int *m, int *n, int *k,
+ fortran_int *m, fortran_int *n, fortran_int *k,
f2c_doublecomplex *alpha,
- f2c_doublecomplex *a, int *lda,
- f2c_doublecomplex *b, int *ldb,
+ f2c_doublecomplex *a, fortran_int *lda,
+ f2c_doublecomplex *b, fortran_int *ldb,
f2c_doublecomplex *beta,
- f2c_doublecomplex *c, int *ldc);
+ f2c_doublecomplex *c, fortran_int *ldc);
#define LAPACK_T(FUNC) \
#define LAPACK(FUNC) \
FNAME(FUNC)
-typedef int fortran_int;
-typedef float fortran_real;
-typedef double fortran_doublereal;
-typedef f2c_complex fortran_complex;
-typedef f2c_doublecomplex fortran_doublecomplex;
-
/*
*****************************************************************************
params->VT = vt;
params->RWORK = NULL;
params->IWORK = iwork;
- params->M = m;
- params->N = n;
params->LDA = ld;
params->LDU = ld;
params->LDVT = vt_column_count;
'choose', 'clip', 'common_fill_value', 'compress', 'compressed',
'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh',
'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal',
- 'diff', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp',
+ 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp',
'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask',
'flatten_structured_array', 'floor', 'floor_divide', 'fmod',
'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask',
'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA',
'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift',
- 'less', 'less_equal', 'load', 'loads', 'log', 'log10', 'log2',
+ 'less', 'less_equal', 'log', 'log10', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask',
'make_mask_descr', 'make_mask_none', 'mask_or', 'masked',
'masked_array', 'masked_equal', 'masked_greater',
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero',
'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod',
- 'product', 'ptp', 'put', 'putmask', 'rank', 'ravel', 'remainder',
+ 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder',
'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask',
'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
----------
a : MaskedArray or array_like
An input object.
- fill_value : scalar, optional
- Filling value. Default is None.
+ fill_value : array_like, optional.
+ Can be scalar or non-scalar. If non-scalar, the
+ resulting filled array should be broadcastable
+ over input array. Default is None.
Returns
-------
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
+ >>> x.filled(fill_value=333)
+ array([[333, 1, 2],
+ [333, 4, 5],
+ [ 6, 7, 8]])
+ >>> x.filled(fill_value=np.arange(3))
+ array([[0, 1, 2],
+ [0, 4, 5],
+ [6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
+
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
- if (a > b):
+ if a > b:
(a, b) = (b, a)
self.a = a
self.b = b
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
- m = make_mask(m, copy=1)
+ m = make_mask(m, copy=True)
m.shape = (1,)
if m is nomask:
if domain is not None:
m |= domain(da, db)
# Take care of the scalar case first
- if (not m.ndim):
+ if not m.ndim:
if m:
return masked
else:
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
- of 0 are interepreted as False, everything else as True.
+ of 0 are interpreted as False, everything else as True.
Parameters
----------
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
- if (dtype1 != dtype2):
+ if dtype1 != dtype2:
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names is not None:
# Allocate an output mask array with the properly broadcast shape.
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
- >>> fl.next()
+ >>> next(fl)
3
- >>> fl.next()
+ >>> next(fl)
masked
- >>> fl.next()
+ >>> next(fl)
Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next
- d = self.dataiter.next()
+ ...
StopIteration
"""
def view(self, dtype=None, type=None, fill_value=None):
"""
- Return a view of the MaskedArray data
+ Return a view of the MaskedArray data.
Parameters
----------
type : Python type, optional
Type of the returned view, either ndarray or a subclass. The
default None results in type preservation.
+ fill_value : scalar, optional
+ The value to use for invalid entries (None by default).
+ If None, then this argument is inferred from the passed `dtype`, or
+ in its absence the original array, as discussed in the notes below.
+
+ See Also
+ --------
+ numpy.ndarray.view : Equivalent method on ndarray object.
Notes
-----
# also make the mask be a view (so attr changes to the view's
# mask do no affect original object's mask)
# (especially important to avoid affecting np.masked singleton)
- if (getmask(output) is not nomask):
+ if getmask(output) is not nomask:
output._mask = output._mask.view()
# Make sure to reset the _fill_value if needed
else:
output.fill_value = fill_value
return output
- view.__doc__ = ndarray.view.__doc__
def __getitem__(self, indx):
"""
if mask is masked:
mask = True
- if (current_mask is nomask):
+ if current_mask is nomask:
# Make sure the mask is set
# Just don't do anything if there's nothing to do.
if mask is nomask:
@fill_value.setter
def fill_value(self, value=None):
target = _check_fill_value(value, self.dtype)
+ if not target.ndim == 0:
+ # 2019-11-12, 1.18.0
+ warnings.warn(
+ "Non-scalar arrays for the fill value are deprecated. Use "
+ "arrays with scalar values instead. The filled function "
+ "still supports any array as `fill_value`.",
+ DeprecationWarning, stacklevel=2)
+
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
Parameters
----------
- fill_value : scalar, optional
- The value to use for invalid entries (None by default).
- If None, the `fill_value` attribute of the array is used instead.
+ fill_value : array_like, optional
+ The value to use for invalid entries. Can be scalar or non-scalar.
+ If non-scalar, the resulting ndarray must be broadcastable over
+ input array. Default is None, in which case, the `fill_value`
+ attribute of the array is used instead.
Returns
-------
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([ 1, 2, -999, 4, -999])
+ >>> x.filled(fill_value=1000)
+ array([ 1, 2, 1000, 4, 1000])
>>> type(x.filled())
<class 'numpy.ndarray'>
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the count is performed.
- The default (`axis` = `None`) performs the count over all
+ The default, None, performs the count over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
See Also
--------
- ndarray.all : corresponding function for ndarrays
+ numpy.ndarray.all : corresponding function for ndarrays
numpy.all : equivalent function
Examples
See Also
--------
- ndarray.any : corresponding function for ndarrays
+ numpy.ndarray.any : corresponding function for ndarrays
numpy.any : equivalent function
"""
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
- ndarray.nonzero :
+ numpy.ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
See Also
--------
- ndarray.sum : corresponding function for ndarrays
+ numpy.ndarray.sum : corresponding function for ndarrays
numpy.sum : equivalent function
Examples
result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
See Also
--------
- ndarray.cumsum : corresponding function for ndarrays
+ numpy.ndarray.cumsum : corresponding function for ndarrays
numpy.cumsum : equivalent function
Examples
See Also
--------
- ndarray.prod : corresponding function for ndarrays
+ numpy.ndarray.prod : corresponding function for ndarrays
numpy.prod : equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
See Also
--------
- ndarray.cumprod : corresponding function for ndarrays
+ numpy.ndarray.cumprod : corresponding function for ndarrays
numpy.cumprod : equivalent function
"""
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
See Also
--------
- ndarray.mean : corresponding function for ndarrays
+ numpy.ndarray.mean : corresponding function for ndarrays
numpy.mean : Equivalent function
numpy.ma.average: Weighted average.
out.flat = result
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getmask(result)
return out
return m
if not axis:
- return (self - m)
+ return self - m
else:
- return (self - expand_dims(m, axis))
+ return self - expand_dims(m, axis)
def var(self, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
See Also
--------
- ndarray.var : corresponding function for ndarrays
+ numpy.ndarray.var : corresponding function for ndarrays
numpy.var : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
See Also
--------
- ndarray.std : corresponding function for ndarrays
+ numpy.ndarray.std : corresponding function for ndarrays
numpy.std : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
See Also
--------
- ndarray.around : corresponding function for ndarrays
+ numpy.ndarray.around : corresponding function for ndarrays
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
--------
MaskedArray.sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
- ndarray.sort : Inplace sort.
+ numpy.ndarray.sort : Inplace sort.
Notes
-----
See Also
--------
- ndarray.sort : Method to sort an array in-place.
+ numpy.ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
Returns
-------
result = self.filled(fill_value).min(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of maximum_fill_value().
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
Returns
-------
result = self.filled(fill_value).max(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
but the type will be cast if necessary.
fill_value : {var}, optional
Value used to fill in the masked values.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
Returns
-------
See Also
--------
- ndarray.tobytes
+ numpy.ndarray.tobytes
tolist, tofile
Notes
Parameters
----------
- fill_value : scalar, optional
- The value to use for invalid entries (None by default).
- If None, the `fill_value` attribute is used instead.
+ fill_value : array_like, optional
+ The value to use for invalid entries. Can be scalar or
+ non-scalar. If latter is the case, the filled array should
+ be broadcastable over input array. Default is None, in
+ which case the `fill_value` attribute is used instead.
Returns
-------
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
- if not (result.ndim):
+ if not result.ndim:
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
return result
-def rank(obj):
- """
- maskedarray version of the numpy function.
-
- .. note::
- Deprecated since 1.10.0
-
- """
- # 2015-04-12, 1.10.0
- warnings.warn(
- "`rank` is deprecated; use the `ndim` function instead. ",
- np.VisibleDeprecationWarning, stacklevel=2)
- return np.ndim(getdata(obj))
-
-rank.__doc__ = np.rank.__doc__
-
-
def ndim(obj):
"""
maskedarray version of the numpy function.
# Construct the mask
outputmask = np.choose(c, masks, mode=mode)
outputmask = make_mask(mask_or(outputmask, getmask(indices)),
- copy=0, shrink=True)
+ copy=False, shrink=True)
# Get the choices.
d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
if out is not None:
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
- m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0)
+ m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False)
return masked_array(d, mask=m)
outer.__doc__ = doc_note(np.outer.__doc__,
"Masked values are replaced by 0.")
stacklevel=3)
-def dump(a, F):
- """
- Pickle a masked array to a file.
-
- This is a wrapper around ``cPickle.dump``.
-
- Parameters
- ----------
- a : MaskedArray
- The array to be pickled.
- F : str or file-like object
- The file to pickle `a` to. If a string, the full path to the file.
-
- """
- _pickle_warn('dump')
- if not hasattr(F, 'readline'):
- with open(F, 'w') as F:
- pickle.dump(a, F)
- else:
- pickle.dump(a, F)
-
-
-def dumps(a):
- """
- Return a string corresponding to the pickling of a masked array.
-
- This is a wrapper around ``cPickle.dumps``.
-
- Parameters
- ----------
- a : MaskedArray
- The array for which the string representation of the pickle is
- returned.
-
- """
- _pickle_warn('dumps')
- return pickle.dumps(a)
-
-
-def load(F):
- """
- Wrapper around ``cPickle.load`` which accepts either a file-like object
- or a filename.
-
- Parameters
- ----------
- F : str or file
- The file or file name to load.
-
- See Also
- --------
- dump : Pickle an array
-
- Notes
- -----
- This is different from `numpy.load`, which does not use cPickle but loads
- the NumPy binary .npy format.
-
- """
- _pickle_warn('load')
- if not hasattr(F, 'readline'):
- with open(F, 'r') as F:
- return pickle.load(F)
- else:
- return pickle.load(F)
-
-
-def loads(strg):
- """
- Load a pickle from the current string.
-
- The result of ``cPickle.loads(strg)`` is returned.
-
- Parameters
- ----------
- strg : str
- The string to load.
-
- See Also
- --------
- dumps : Return a string corresponding to the pickling of a masked array.
-
- """
- _pickle_warn('loads')
- return pickle.loads(strg)
-
-
def fromfile(file, dtype=float, count=-1, sep=''):
raise NotImplementedError(
"fromfile() not yet implemented for a MaskedArray.")
Data to be averaged.
Masked entries are not taken into account in the computation.
axis : int, optional
- Axis along which to average `a`. If `None`, averaging is done over
+ Axis along which to average `a`. If None, averaging is done over
the flattened array.
weights : array_like, optional
The importance that each element has in the computation of the average.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If ``weights=None``, then all data in `a` are assumed to have a
- weight equal to one. If `weights` is complex, the imaginary parts
- are ignored.
+ weight equal to one. The 1-D calculation is::
+
+ avg = sum(a * weights) / sum(weights)
+
+ The only constraint on `weights` is that `sum(weights)` must not be 0.
returned : bool, optional
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
raise NotImplementedError("compress_cols works for 2D arrays only.")
return compress_rowcols(a, 1)
-def mask_rows(a, axis=None):
+def mask_rows(a, axis=np._NoValue):
"""
Mask rows of a 2D array that contain masked values.
fill_value=1)
"""
+ if axis is not np._NoValue:
+ # remove the axis argument when this deprecation expires
+ # NumPy 1.18.0, 2019-11-28
+ warnings.warn(
+ "The axis argument has always been ignored, in future passing it "
+ "will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 0)
-def mask_cols(a, axis=None):
+def mask_cols(a, axis=np._NoValue):
"""
Mask columns of a 2D array that contain masked values.
fill_value=1)
"""
+ if axis is not np._NoValue:
+ # remove the axis argument when this deprecation expires
+ # NumPy 1.18.0, 2019-11-28
+ warnings.warn(
+ "The axis argument has always been ignored, in future passing it "
+ "will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 1)
m = make_mask(n)
m2 = make_mask(m)
assert_(m is m2)
- m3 = make_mask(m, copy=1)
+ m3 = make_mask(m, copy=True)
assert_(m is not m3)
x1 = np.arange(5)
import warnings
import itertools
+import pytest
import numpy as np
from numpy.testing import (
ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
assert_equal(2.0, average(ott, axis=0))
assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.]))
- result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
+ result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True)
assert_equal(2.0, result)
assert_(wts == 4.0)
ott[:] = masked
assert_equal(average(ott, axis=0), [2.0, 0.0])
assert_equal(average(ott, axis=1).mask[0], [True])
assert_equal([2., 0.], average(ott, axis=0))
- result, wts = average(ott, axis=0, returned=1)
+ result, wts = average(ott, axis=0, returned=True)
assert_equal(wts, [1., 0.])
def test_testAverage2(self):
# Yet more tests of average!
a = arange(6)
b = arange(6) * 3
- r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)
+ r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)
assert_equal(shape(r1), shape(w1))
assert_equal(r1.shape, w1.shape)
- r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)
+ r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True)
assert_equal(shape(w2), shape(r2))
- r2, w2 = average(ones((2, 2, 3)), returned=1)
+ r2, w2 = average(ones((2, 2, 3)), returned=True)
assert_equal(shape(w2), shape(r2))
- r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1)
+ r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True)
assert_equal(shape(w2), shape(r2))
a2d = array([[1, 2], [0, 4]], float)
a2dm = masked_array(a2d, [[False, False], [True, False]])
assert_(mask_rowcols(x, 0).mask.all())
assert_(mask_rowcols(x, 1).mask.all())
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize(["func", "rowcols_axis"],
+ [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)])
+ def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis):
+ # Test deprecation of the axis argument to `mask_rows` and `mask_cols`
+ x = array(np.arange(9).reshape(3, 3),
+ mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+
+ with assert_warns(DeprecationWarning):
+ res = func(x, axis=axis)
+ assert_equal(res, mask_rowcols(x, rowcols_axis))
+
def test_dot(self):
# Tests dot product
n = np.arange(1, 7)
m = make_mask(n)
m2 = make_mask(m)
assert_(m is m2)
- m3 = make_mask(m, copy=1)
+ m3 = make_mask(m, copy=True)
assert_(m is not m3)
x1 = np.arange(5)
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
assert_(eq(2.0, average(ott, axis=0)))
assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
- result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
+ result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True)
assert_(eq(2.0, result))
assert_(wts == 4.0)
ott[:] = masked
assert_(eq(average(ott, axis=0), [2.0, 0.0]))
assert_(average(ott, axis=1)[0] is masked)
assert_(eq([2., 0.], average(ott, axis=0)))
- result, wts = average(ott, axis=0, returned=1)
+ result, wts = average(ott, axis=0, returned=True)
assert_(eq(wts, [1., 0.]))
def test_testAverage2(self):
a = arange(6)
b = arange(6) * 3
- r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)
+ r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)
assert_equal(shape(r1), shape(w1))
assert_equal(r1.shape, w1.shape)
- r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)
+ r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True)
assert_equal(shape(w2), shape(r2))
- r2, w2 = average(ones((2, 2, 3)), returned=1)
+ r2, w2 = average(ones((2, 2, 3)), returned=True)
assert_equal(shape(w2), shape(r2))
- r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1)
+ r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True)
assert_(shape(w2) == shape(r2))
a2d = array([[1, 2], [0, 4]], float)
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
+++ /dev/null
-"""Version number
-
-"""
-from __future__ import division, absolute_import, print_function
-
-version = '1.00'
-release = False
-
-if not release:
- from . import core
- from . import extras
- revision = [core.__revision__.split(':')[-1][:-1].strip(),
- extras.__revision__.split(':')[-1][:-1].strip(),]
- version += '.dev%04i' % max([int(rev) for rev in revision])
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
-# need * as we're copying the numpy namespace
+# need * as we're copying the numpy namespace (FIXME: this makes little sense)
from numpy import *
__version__ = np.__version__
See Also
--------
- randn, numpy.random.rand
+ randn, numpy.random.RandomState.rand
Examples
--------
See Also
--------
- rand, random.randn
+ rand, numpy.random.RandomState.randn
Notes
-----
referenced by name.
ldict : dict, optional
A dictionary that replaces local operands in current frame.
- Ignored if `obj` is not a string or `gdict` is `None`.
+ Ignored if `obj` is not a string or `gdict` is None.
gdict : dict, optional
A dictionary that replaces global operands in current frame.
Ignored if `obj` is not a string.
array([12., 96.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt = pu._deprecate_as_int(m, "the order of derivation")
array([-1., 1., -1., -1.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
--------
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
if ideg < 0:
raise ValueError("deg must be non-negative")
- x = np.array(x, copy=0, ndmin=1) + 0.0
+ x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
.. versionadded:: 1.7.0
"""
- return pu._vander2d(chebvander, x, y, deg)
+ return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg)
def chebvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander3d(chebvander, x, y, z, deg)
+ return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg)
def chebfit(x, y, deg, rcond=None, full=False, w=None):
array([1., 2., 3.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt = pu._deprecate_as_int(m, "the order of derivation")
array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
[115., 203.]])
"""
- c = np.array(c, ndmin=1, copy=0)
+ c = np.array(c, ndmin=1, copy=False)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
if ideg < 0:
raise ValueError("deg must be non-negative")
- x = np.array(x, copy=0, ndmin=1) + 0.0
+ x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
.. versionadded:: 1.7.0
"""
- return pu._vander2d(hermvander, x, y, deg)
+ return pu._vander_nd_flat((hermvander, hermvander), (x, y), deg)
def hermvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander3d(hermvander, x, y, z, deg)
+ return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg)
def hermfit(x, y, deg, rcond=None, full=False, w=None):
array([1., 2., 3.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt = pu._deprecate_as_int(m, "the order of derivation")
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
[31., 54.]])
"""
- c = np.array(c, ndmin=1, copy=0)
+ c = np.array(c, ndmin=1, copy=False)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
if ideg < 0:
raise ValueError("deg must be non-negative")
- x = np.array(x, copy=0, ndmin=1) + 0.0
+ x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
.. versionadded:: 1.7.0
"""
- return pu._vander2d(hermevander, x, y, deg)
+ return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg)
def hermevander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander3d(hermevander, x, y, z, deg)
+ return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg)
def hermefit(x, y, deg, rcond=None, full=False, w=None):
array([1., 2., 3.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
array([ 11.16666667, -5. , -3. , 2. ]) # may vary
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
[-4.5, -2. ]])
"""
- c = np.array(c, ndmin=1, copy=0)
+ c = np.array(c, ndmin=1, copy=False)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
if ideg < 0:
raise ValueError("deg must be non-negative")
- x = np.array(x, copy=0, ndmin=1) + 0.0
+ x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
.. versionadded:: 1.7.0
"""
- return pu._vander2d(lagvander, x, y, deg)
+ return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg)
def lagvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander3d(lagvander, x, y, z, deg)
+ return pu._vander_nd_flat((lagvander, lagvander, lagvander), (x, y, z), deg)
def lagfit(x, y, deg, rcond=None, full=False, w=None):
array([ 9., 60.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt = pu._deprecate_as_int(m, "the order of derivation")
array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
--------
"""
- c = np.array(c, ndmin=1, copy=0)
+ c = np.array(c, ndmin=1, copy=False)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
if ideg < 0:
raise ValueError("deg must be non-negative")
- x = np.array(x, copy=0, ndmin=1) + 0.0
+ x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
.. versionadded:: 1.7.0
"""
- return pu._vander2d(legvander, x, y, deg)
+ return pu._vander_nd_flat((legvander, legvander), (x, y), deg)
def legvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander3d(legvander, x, y, z, deg)
+ return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg)
def legfit(x, y, deg, rcond=None, full=False, w=None):
array([ 6., 24.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
array([ 0., -2., -2., -2.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype doesn't preserve mask attribute.
c = c + 0.0
array([2., 7.])
"""
- c = np.array(c, ndmin=1, copy=0)
+ c = np.array(c, ndmin=1, copy=False)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
>>> polyvalfromroots(b, r, tensor=False)
array([-0., 0.])
"""
- r = np.array(r, ndmin=1, copy=0)
+ r = np.array(r, ndmin=1, copy=False)
if r.dtype.char in '?bBhHiIlLqQpP':
r = r.astype(np.double)
if isinstance(x, (tuple, list)):
if ideg < 0:
raise ValueError("deg must be non-negative")
- x = np.array(x, copy=0, ndmin=1) + 0.0
+ x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
polyvander, polyvander3d, polyval2d, polyval3d
"""
- return pu._vander2d(polyvander, x, y, deg)
+ return pu._vander_nd_flat((polyvander, polyvander), (x, y), deg)
def polyvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- return pu._vander3d(polyvander, x, y, z, deg)
+ return pu._vander_nd_flat((polyvander, polyvander, polyvander), (x, y, z), deg)
def polyfit(x, y, deg, rcond=None, full=False, w=None):
from __future__ import division, absolute_import, print_function
import operator
+import functools
import warnings
import numpy as np
[array([2.]), array([1.1, 0. ])]
"""
- arrays = [np.array(a, ndmin=1, copy=0) for a in alist]
+ arrays = [np.array(a, ndmin=1, copy=False) for a in alist]
if min([a.size for a in arrays]) == 0:
raise ValueError("Coefficient array is empty")
if any([a.ndim != 1 for a in arrays]):
dtype = np.common_type(*arrays)
except Exception:
raise ValueError("Coefficient arrays have no common type")
- ret = [np.array(a, copy=1, dtype=dtype) for a in arrays]
+ ret = [np.array(a, copy=True, dtype=dtype) for a in arrays]
return ret
return off + scl*x
-def _vander2d(vander_f, x, y, deg):
- """
- Helper function used to implement the ``<type>vander2d`` functions.
+def _nth_slice(i, ndim):
+ sl = [np.newaxis] * ndim
+ sl[i] = slice(None)
+ return tuple(sl)
+
+
+def _vander_nd(vander_fs, points, degrees):
+ r"""
+ A generalization of the Vandermonde matrix for N dimensions
+
+ The result is built by combining the results of 1d Vandermonde matrices,
+
+ .. math::
+ W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]}
+
+ where
+
+ .. math::
+ N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\
+ M &= \texttt{points[k].ndim} \\
+ V_k &= \texttt{vander\_fs[k]} \\
+ x_k &= \texttt{points[k]} \\
+ 0 \le j_k &\le \texttt{degrees[k]}
+
+ Expanding the one-dimensional :math:`V_k` functions gives:
+
+ .. math::
+ W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])}
+
+ where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along
+ dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`.
Parameters
----------
- vander_f : function(array_like, int) -> ndarray
- The 1d vander function, such as ``polyvander``
- x, y, deg :
- See the ``<type>vander2d`` functions for more detail
+ vander_fs : Sequence[function(array_like, int) -> ndarray]
+ The 1d vander function to use for each axis, such as ``polyvander``
+ points : Sequence[array_like]
+ Arrays of point coordinates, all of the same shape. The dtypes
+ will be converted to either float64 or complex128 depending on
+ whether any of the elements are complex. Scalars are converted to
+ 1-D arrays.
+ This must be the same length as `vander_fs`.
+ degrees : Sequence[int]
+ The maximum degree (inclusive) to use for each axis.
+ This must be the same length as `vander_fs`.
+
+ Returns
+ -------
+ vander_nd : ndarray
+ An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``.
"""
- degx, degy = [
- _deprecate_as_int(d, "degrees")
- for d in deg
- ]
- x, y = np.array((x, y), copy=0) + 0.0
+ n_dims = len(vander_fs)
+ if n_dims != len(points):
+ raise ValueError(
+ "Expected {} dimensions of sample points, got {}".format(n_dims, len(points)))
+ if n_dims != len(degrees):
+ raise ValueError(
+ "Expected {} dimensions of degrees, got {}".format(n_dims, len(degrees)))
+ if n_dims == 0:
+ raise ValueError("Unable to guess a dtype or shape when no points are given")
+
+ # convert to the same shape and type
+ points = tuple(np.array(tuple(points), copy=False) + 0.0)
- vx = vander_f(x, degx)
- vy = vander_f(y, degy)
- v = vx[..., None]*vy[..., None,:]
- return v.reshape(v.shape[:-2] + (-1,))
+ # produce the vandermonde matrix for each dimension, placing the last
+ # axis of each in an independent trailing axis of the output
+ vander_arrays = (
+ vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)]
+ for i in range(n_dims)
+ )
+ # we checked this wasn't empty already, so no `initial` needed
+ return functools.reduce(operator.mul, vander_arrays)
-def _vander3d(vander_f, x, y, z, deg):
+
+def _vander_nd_flat(vander_fs, points, degrees):
"""
- Helper function used to implement the ``<type>vander3d`` functions.
+ Like `_vander_nd`, but flattens the last ``len(degrees)`` axes into a single axis
- Parameters
- ----------
- vander_f : function(array_like, int) -> ndarray
- The 1d vander function, such as ``polyvander``
- x, y, z, deg :
- See the ``<type>vander3d`` functions for more detail
+ Used to implement the public ``<type>vander<n>d`` functions.
"""
- degx, degy, degz = [
- _deprecate_as_int(d, "degrees")
- for d in deg
- ]
- x, y, z = np.array((x, y, z), copy=0) + 0.0
-
- vx = vander_f(x, degx)
- vy = vander_f(y, degy)
- vz = vander_f(z, degz)
- v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
- return v.reshape(v.shape[:-3] + (-1,))
+ v = _vander_nd(vander_fs, points, degrees)
+ return v.reshape(v.shape[:-len(degrees)] + (-1,))
def _fromroots(line_f, mul_f, roots):
--- /dev/null
+# generated files
+_bounded_integers.pyx
+_bounded_integers.pxd
--- /dev/null
+cimport numpy as np
+from libc.stdint cimport uint32_t, uint64_t
+
+cdef extern from "numpy/random/bitgen.h":
+ struct bitgen:
+ void *state
+ uint64_t (*next_uint64)(void *st) nogil
+ uint32_t (*next_uint32)(void *st) nogil
+ double (*next_double)(void *st) nogil
+ uint64_t (*next_raw)(void *st) nogil
+
+ ctypedef bitgen bitgen_t
+
+from numpy.random._bit_generator cimport BitGenerator, SeedSequence
# add these for module-freeze analysis (like PyInstaller)
from . import _pickle
-from . import common
-from . import bounded_integers
-
+from . import _common
+from . import _bounded_integers
+
+from ._generator import Generator, default_rng
+from ._bit_generator import SeedSequence, BitGenerator
+from ._mt19937 import MT19937
+from ._pcg64 import PCG64
+from ._philox import Philox
+from ._sfc64 import SFC64
from .mtrand import *
-from .generator import Generator, default_rng
-from .bit_generator import SeedSequence
-from .mt19937 import MT19937
-from .pcg64 import PCG64
-from .philox import Philox
-from .sfc64 import SFC64
-from .mtrand import RandomState
__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
- 'Philox', 'PCG64', 'SFC64', 'default_rng']
+ 'Philox', 'PCG64', 'SFC64', 'default_rng', 'BitGenerator']
def __RandomState_ctor():
--- /dev/null
+cimport numpy as np
+from libc.stdint cimport uint32_t, uint64_t
+
+cdef extern from "numpy/random/bitgen.h":
+ struct bitgen:
+ void *state
+ uint64_t (*next_uint64)(void *st) nogil
+ uint32_t (*next_uint32)(void *st) nogil
+ double (*next_double)(void *st) nogil
+ uint64_t (*next_raw)(void *st) nogil
+
+ ctypedef bitgen bitgen_t
+
+cdef class BitGenerator():
+ cdef readonly object _seed_seq
+ cdef readonly object lock
+ cdef bitgen_t _bitgen
+ cdef readonly object _ctypes
+ cdef readonly object _cffi
+ cdef readonly object capsule
+
+
+cdef class SeedSequence():
+ cdef readonly object entropy
+ cdef readonly tuple spawn_key
+ cdef readonly uint32_t pool_size
+ cdef readonly object pool
+ cdef readonly uint32_t n_children_spawned
+
+ cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
+ np.ndarray[np.npy_uint32, ndim=1] entropy_array)
+ cdef get_assembled_entropy(self)
+
+cdef class SeedlessSequence():
+ pass
--- /dev/null
+"""
+BitGenerator base class and SeedSequence used to seed the BitGenerators.
+
+SeedSequence is derived from Melissa E. O'Neill's C++11 `std::seed_seq`
+implementation, as it has a lot of nice properties that we want.
+
+https://gist.github.com/imneme/540829265469e673d045
+http://www.pcg-random.org/posts/developing-a-seed_seq-alternative.html
+
+The MIT License (MIT)
+
+Copyright (c) 2015 Melissa E. O'Neill
+Copyright (c) 2019 NumPy Developers
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+
+import abc
+import sys
+from itertools import cycle
+import re
+
+try:
+ from secrets import randbits
+except ImportError:
+ # secrets unavailable on python 3.5 and before
+ from random import SystemRandom
+ randbits = SystemRandom().getrandbits
+
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
+
+from cpython.pycapsule cimport PyCapsule_New
+
+import numpy as np
+cimport numpy as np
+
+from ._common cimport (random_raw, benchmark, prepare_ctypes, prepare_cffi)
+
+__all__ = ['SeedSequence', 'BitGenerator']
+
+np.import_array()
+
+DECIMAL_RE = re.compile(r'[0-9]+')
+
+cdef uint32_t DEFAULT_POOL_SIZE = 4 # Appears also in docstring for pool_size
+cdef uint32_t INIT_A = 0x43b0d7e5
+cdef uint32_t MULT_A = 0x931e8875
+cdef uint32_t INIT_B = 0x8b51f9dd
+cdef uint32_t MULT_B = 0x58f38ded
+cdef uint32_t MIX_MULT_L = 0xca01f9dd
+cdef uint32_t MIX_MULT_R = 0x4973f715
+cdef uint32_t XSHIFT = np.dtype(np.uint32).itemsize * 8 // 2
+cdef uint32_t MASK32 = 0xFFFFFFFF
+
+def _int_to_uint32_array(n):
+ arr = []
+ if n < 0:
+ raise ValueError("expected non-negative integer")
+ if n == 0:
+ arr.append(np.uint32(n))
+ if isinstance(n, np.unsignedinteger):
+ # Cannot do n & MASK32, convert to python int
+ n = int(n)
+ while n > 0:
+ arr.append(np.uint32(n & MASK32))
+ n //= (2**32)
+ return np.array(arr, dtype=np.uint32)
+
+def _coerce_to_uint32_array(x):
+ """ Coerce an input to a uint32 array.
+
+ If a `uint32` array, pass it through directly.
+ If a non-negative integer, then break it up into `uint32` words, lowest
+ bits first.
+ If a string starting with "0x", then interpret as a hex integer, as above.
+ If a string of decimal digits, interpret as a decimal integer, as above.
+ If a sequence of ints or strings, interpret each element as above and
+ concatenate.
+
+ Note that the handling of `int64` or `uint64` arrays are not just
+ straightforward views as `uint32` arrays. If an element is small enough to
+ fit into a `uint32`, then it will only take up one `uint32` element in the
+ output. This is to make sure that the interpretation of a sequence of
+ integers is the same regardless of numpy's default integer type, which
+ differs on different platforms.
+
+ Parameters
+ ----------
+ x : int, str, sequence of int or str
+
+ Returns
+ -------
+ seed_array : uint32 array
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.random._bit_generator import _coerce_to_uint32_array
+ >>> _coerce_to_uint32_array(12345)
+ array([12345], dtype=uint32)
+ >>> _coerce_to_uint32_array('12345')
+ array([12345], dtype=uint32)
+ >>> _coerce_to_uint32_array('0x12345')
+ array([74565], dtype=uint32)
+ >>> _coerce_to_uint32_array([12345, '67890'])
+ array([12345, 67890], dtype=uint32)
+ >>> _coerce_to_uint32_array(np.array([12345, 67890], dtype=np.uint32))
+ array([12345, 67890], dtype=uint32)
+ >>> _coerce_to_uint32_array(np.array([12345, 67890], dtype=np.int64))
+ array([12345, 67890], dtype=uint32)
+ >>> _coerce_to_uint32_array([12345, 0x10deadbeef, 67890, 0xdeadbeef])
+ array([ 12345, 3735928559, 16, 67890, 3735928559],
+ dtype=uint32)
+ >>> _coerce_to_uint32_array(1234567890123456789012345678901234567890)
+ array([3460238034, 2898026390, 3235640248, 2697535605, 3],
+ dtype=uint32)
+ """
+ if isinstance(x, np.ndarray) and x.dtype == np.dtype(np.uint32):
+ return x.copy()
+ elif isinstance(x, str):
+ if x.startswith('0x'):
+ x = int(x, base=16)
+ elif DECIMAL_RE.match(x):
+ x = int(x)
+ else:
+ raise ValueError("unrecognized seed string")
+ if isinstance(x, (int, np.integer)):
+ return _int_to_uint32_array(x)
+ elif isinstance(x, (float, np.inexact)):
+ raise TypeError('seed must be integer')
+ else:
+ if len(x) == 0:
+ return np.array([], dtype=np.uint32)
+ # Should be a sequence of interpretable-as-ints. Convert each one to
+ # a uint32 array and concatenate.
+ subseqs = [_coerce_to_uint32_array(v) for v in x]
+ return np.concatenate(subseqs)
+
+
+cdef uint32_t hashmix(uint32_t value, uint32_t * hash_const):
+ # We are modifying the multiplier as we go along, so it is input-output
+ value ^= hash_const[0]
+ hash_const[0] *= MULT_A
+ value *= hash_const[0]
+ value ^= value >> XSHIFT
+ return value
+
+cdef uint32_t mix(uint32_t x, uint32_t y):
+ cdef uint32_t result = (MIX_MULT_L * x - MIX_MULT_R * y)
+ result ^= result >> XSHIFT
+ return result
+
+
+class ISeedSequence(abc.ABC):
+ """
+ Abstract base class for seed sequences.
+
+ ``BitGenerator`` implementations should treat any object that adheres to
+ this interface as a seed sequence.
+
+ See Also
+ --------
+ SeedSequence, SeedlessSeedSequence
+ """
+
+ @abc.abstractmethod
+ def generate_state(self, n_words, dtype=np.uint32):
+ """
+ generate_state(n_words, dtype=np.uint32)
+
+ Return the requested number of words for PRNG seeding.
+
+ A BitGenerator should call this method in its constructor with
+ an appropriate `n_words` parameter to properly seed itself.
+
+ Parameters
+ ----------
+ n_words : int
+ dtype : np.uint32 or np.uint64, optional
+ The size of each word. This should only be either `uint32` or
+ `uint64`. Strings (`'uint32'`, `'uint64'`) are fine. Note that
+ requesting `uint64` will draw twice as many bits as `uint32` for
+ the same `n_words`. This is a convenience for `BitGenerator`s that
+ express their states as `uint64` arrays.
+
+ Returns
+ -------
+ state : uint32 or uint64 array, shape=(n_words,)
+ """
+
+
+class ISpawnableSeedSequence(ISeedSequence):
+ """
+ Abstract base class for seed sequences that can spawn.
+ """
+
+ @abc.abstractmethod
+ def spawn(self, n_children):
+ """
+ spawn(n_children)
+
+ Spawn a number of child `SeedSequence` s by extending the
+ `spawn_key`.
+
+ Parameters
+ ----------
+ n_children : int
+
+ Returns
+ -------
+ seqs : list of `SeedSequence` s
+ """
+
+
+cdef class SeedlessSeedSequence():
+ """
+ A seed sequence for BitGenerators with no need for seed state.
+
+ See Also
+ --------
+ SeedSequence, ISeedSequence
+ """
+
+ def generate_state(self, n_words, dtype=np.uint32):
+ raise NotImplementedError('seedless SeedSequences cannot generate state')
+
+ def spawn(self, n_children):
+ return [self] * n_children
+
+
+# We cannot directly subclass a `cdef class` type from an `ABC` in Cython, so
+# we must register it after the fact.
+ISpawnableSeedSequence.register(SeedlessSeedSequence)
+
+
+cdef class SeedSequence():
+ """
+ SeedSequence(entropy=None, *, spawn_key=(), pool_size=4)
+
+ SeedSequence mixes sources of entropy in a reproducible way to set the
+ initial state for independent and very probably non-overlapping
+ BitGenerators.
+
+ Once the SeedSequence is instantiated, you can call the `generate_state`
+ method to get an appropriately sized seed. Calling `spawn(n) <spawn>` will
+ create ``n`` SeedSequences that can be used to seed independent
+ BitGenerators, i.e. for different threads.
+
+ Parameters
+ ----------
+ entropy : {None, int, sequence[int]}, optional
+ The entropy for creating a `SeedSequence`.
+ spawn_key : {(), sequence[int]}, optional
+ A third source of entropy, used internally when calling
+ `SeedSequence.spawn`
+ pool_size : {int}, optional
+ Size of the pooled entropy to store. Default is 4 to give a 128-bit
+ entropy pool. 8 (for 256 bits) is another reasonable choice if working
+ with larger PRNGs, but there is very little to be gained by selecting
+ another value.
+ n_children_spawned : {int}, optional
+ The number of children already spawned. Only pass this if
+ reconstructing a `SeedSequence` from a serialized form.
+
+ Notes
+ -----
+
+ Best practice for achieving reproducible bit streams is to use
+ the default ``None`` for the initial entropy, and then use
+ `SeedSequence.entropy` to log/pickle the `entropy` for reproducibility:
+
+ >>> sq1 = np.random.SeedSequence()
+ >>> sq1.entropy
+ 243799254704924441050048792905230269161 # random
+ >>> sq2 = np.random.SeedSequence(sq1.entropy)
+ >>> np.all(sq1.generate_state(10) == sq2.generate_state(10))
+ True
+ """
+
+ def __init__(self, entropy=None, *, spawn_key=(),
+ pool_size=DEFAULT_POOL_SIZE, n_children_spawned=0):
+ if pool_size < DEFAULT_POOL_SIZE:
+ raise ValueError("The size of the entropy pool should be at least "
+ f"{DEFAULT_POOL_SIZE}")
+ if entropy is None:
+ entropy = randbits(pool_size * 32)
+ elif not isinstance(entropy, (int, np.integer, list, tuple, range,
+ np.ndarray)):
+ raise TypeError('SeedSequence expects int or sequence of ints for '
+ 'entropy not {}'.format(entropy))
+ self.entropy = entropy
+ self.spawn_key = tuple(spawn_key)
+ self.pool_size = pool_size
+ self.n_children_spawned = n_children_spawned
+
+ self.pool = np.zeros(pool_size, dtype=np.uint32)
+ self.mix_entropy(self.pool, self.get_assembled_entropy())
+
+ def __repr__(self):
+ lines = [
+ f'{type(self).__name__}(',
+ f' entropy={self.entropy!r},',
+ ]
+ # Omit some entries if they are left as the defaults in order to
+ # simplify things.
+ if self.spawn_key:
+ lines.append(f' spawn_key={self.spawn_key!r},')
+ if self.pool_size != DEFAULT_POOL_SIZE:
+ lines.append(f' pool_size={self.pool_size!r},')
+ if self.n_children_spawned != 0:
+ lines.append(f' n_children_spawned={self.n_children_spawned!r},')
+ lines.append(')')
+ text = '\n'.join(lines)
+ return text
+
+ @property
+ def state(self):
+ return {k:getattr(self, k) for k in
+ ['entropy', 'spawn_key', 'pool_size',
+ 'n_children_spawned']
+ if getattr(self, k) is not None}
+
+ cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
+ np.ndarray[np.npy_uint32, ndim=1] entropy_array):
+ """ Mix in the given entropy to mixer.
+
+ Parameters
+ ----------
+ mixer : 1D uint32 array, modified in-place
+ entropy_array : 1D uint32 array
+ """
+ cdef uint32_t hash_const[1]
+ hash_const[0] = INIT_A
+
+ # Add in the entropy up to the pool size.
+ for i in range(len(mixer)):
+ if i < len(entropy_array):
+ mixer[i] = hashmix(entropy_array[i], hash_const)
+ else:
+ # Our pool size is bigger than our entropy, so just keep
+ # running the hash out.
+ mixer[i] = hashmix(0, hash_const)
+
+ # Mix all bits together so late bits can affect earlier bits.
+ for i_src in range(len(mixer)):
+ for i_dst in range(len(mixer)):
+ if i_src != i_dst:
+ mixer[i_dst] = mix(mixer[i_dst],
+ hashmix(mixer[i_src], hash_const))
+
+ # Add any remaining entropy, mixing each new entropy word with each
+ # pool word.
+ for i_src in range(len(mixer), len(entropy_array)):
+ for i_dst in range(len(mixer)):
+ mixer[i_dst] = mix(mixer[i_dst],
+ hashmix(entropy_array[i_src], hash_const))
+
+ cdef get_assembled_entropy(self):
+ """ Convert and assemble all entropy sources into a uniform uint32
+ array.
+
+ Returns
+ -------
+ entropy_array : 1D uint32 array
+ """
+ # Convert run-entropy, program-entropy, and the spawn key into uint32
+ # arrays and concatenate them.
+
+ # We MUST have at least some run-entropy. The others are optional.
+ assert self.entropy is not None
+ run_entropy = _coerce_to_uint32_array(self.entropy)
+ spawn_entropy = _coerce_to_uint32_array(self.spawn_key)
+ entropy_array = np.concatenate([run_entropy, spawn_entropy])
+ return entropy_array
+
+ @np.errstate(over='ignore')
+ def generate_state(self, n_words, dtype=np.uint32):
+ """
+ generate_state(n_words, dtype=np.uint32)
+
+ Return the requested number of words for PRNG seeding.
+
+ A BitGenerator should call this method in its constructor with
+ an appropriate `n_words` parameter to properly seed itself.
+
+ Parameters
+ ----------
+ n_words : int
+ dtype : np.uint32 or np.uint64, optional
+ The size of each word. This should only be either `uint32` or
+ `uint64`. Strings (`'uint32'`, `'uint64'`) are fine. Note that
+ requesting `uint64` will draw twice as many bits as `uint32` for
+ the same `n_words`. This is a convenience for `BitGenerator`s that
+ express their states as `uint64` arrays.
+
+ Returns
+ -------
+ state : uint32 or uint64 array, shape=(n_words,)
+ """
+ cdef uint32_t hash_const = INIT_B
+ cdef uint32_t data_val
+
+ out_dtype = np.dtype(dtype)
+ if out_dtype == np.dtype(np.uint32):
+ pass
+ elif out_dtype == np.dtype(np.uint64):
+ n_words *= 2
+ else:
+ raise ValueError("only support uint32 or uint64")
+ state = np.zeros(n_words, dtype=np.uint32)
+ src_cycle = cycle(self.pool)
+ for i_dst in range(n_words):
+ data_val = next(src_cycle)
+ data_val ^= hash_const
+ hash_const *= MULT_B
+ data_val *= hash_const
+ data_val ^= data_val >> XSHIFT
+ state[i_dst] = data_val
+ if out_dtype == np.dtype(np.uint64):
+ # For consistency across different endiannesses, view first as
+ # little-endian then convert the values to the native endianness.
+ state = state.astype('<u4').view('<u8').astype(np.uint64)
+ return state
+
+ def spawn(self, n_children):
+ """
+ spawn(n_children)
+
+ Spawn a number of child `SeedSequence` s by extending the
+ `spawn_key`.
+
+ Parameters
+ ----------
+ n_children : int
+
+ Returns
+ -------
+ seqs : list of `SeedSequence` s
+ """
+ cdef uint32_t i
+
+ seqs = []
+ for i in range(self.n_children_spawned,
+ self.n_children_spawned + n_children):
+ seqs.append(type(self)(
+ self.entropy,
+ spawn_key=self.spawn_key + (i,),
+ pool_size=self.pool_size,
+ ))
+ self.n_children_spawned += n_children
+ return seqs
+
+
+ISpawnableSeedSequence.register(SeedSequence)
+
+
+cdef class BitGenerator():
+ """
+ BitGenerator(seed=None)
+
+ Base Class for generic BitGenerators, which provide a stream
+ of random bits based on different algorithms. Must be overridden.
+
+ Parameters
+ ----------
+ seed : {None, int, array_like[ints], SeedSequence}, optional
+ A seed to initialize the `BitGenerator`. If None, then fresh,
+ unpredictable entropy will be pulled from the OS. If an ``int`` or
+ ``array_like[ints]`` is passed, then it will be passed to
+ ~`numpy.random.SeedSequence` to derive the initial `BitGenerator` state.
+ One may also pass in a `SeedSequence` instance.
+
+ Attributes
+ ----------
+ lock : threading.Lock
+ Lock instance that is shared so that the same BitGenerator can
+ be used in multiple Generators without corrupting the state. Code that
+ generates values from a bit generator should hold the bit generator's
+ lock.
+
+ See Also
+ -------
+ SeedSequence
+ """
+
+ def __init__(self, seed=None):
+ self.lock = Lock()
+ self._bitgen.state = <void *>0
+ if type(self) is BitGenerator:
+ raise NotImplementedError('BitGenerator is a base class and cannot be instantized')
+
+ self._ctypes = None
+ self._cffi = None
+
+ cdef const char *name = "BitGenerator"
+ self.capsule = PyCapsule_New(<void *>&self._bitgen, name, NULL)
+ if not isinstance(seed, ISeedSequence):
+ seed = SeedSequence(seed)
+ self._seed_seq = seed
+
+ # Pickling support:
+ def __getstate__(self):
+ return self.state
+
+ def __setstate__(self, state):
+ self.state = state
+
+ def __reduce__(self):
+ from ._pickle import __bit_generator_ctor
+ return __bit_generator_ctor, (self.state['bit_generator'],), self.state
+
+ @property
+ def state(self):
+ """
+ Get or set the PRNG state
+
+ The base BitGenerator.state must be overridden by a subclass
+
+ Returns
+ -------
+ state : dict
+ Dictionary containing the information required to describe the
+ state of the PRNG
+ """
+ raise NotImplementedError('Not implemented in base BitGenerator')
+
+ @state.setter
+ def state(self, value):
+ raise NotImplementedError('Not implemented in base BitGenerator')
+
+ def random_raw(self, size=None, output=True):
+ """
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BitGenerator
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
+
+ Returns
+ -------
+ out : uint or ndarray
+ Drawn samples.
+
+ Notes
+ -----
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
+ """
+ return random_raw(&self._bitgen, self.lock, size, output)
+
+ def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
+ '''Used in tests'''
+ return benchmark(&self._bitgen, self.lock, cnt, method)
+
+ @property
+ def ctypes(self):
+ """
+ ctypes interface
+
+ Returns
+ -------
+ interface : namedtuple
+ Named tuple containing ctypes wrapper
+
+ * state_address - Memory address of the state struct
+ * state - pointer to the state struct
+ * next_uint64 - function pointer to produce 64 bit integers
+ * next_uint32 - function pointer to produce 32 bit integers
+ * next_double - function pointer to produce doubles
+ * bitgen - pointer to the bit generator struct
+ """
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(&self._bitgen)
+
+ return self._ctypes
+
+ @property
+ def cffi(self):
+ """
+ CFFI interface
+
+ Returns
+ -------
+ interface : namedtuple
+ Named tuple containing CFFI wrapper
+
+ * state_address - Memory address of the state struct
+ * state - pointer to the state struct
+ * next_uint64 - function pointer to produce 64 bit integers
+ * next_uint32 - function pointer to produce 32 bit integers
+ * next_double - function pointer to produce doubles
+ * bitgen - pointer to the bit generator struct
+ """
+ if self._cffi is None:
+ self._cffi = prepare_cffi(&self._bitgen)
+ return self._cffi
--- /dev/null
+from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
+ int8_t, int16_t, int32_t, int64_t, intptr_t)
+import numpy as np
+cimport numpy as np
+ctypedef np.npy_bool bool_t
+
+from numpy.random cimport bitgen_t
+
+cdef inline uint64_t _gen_mask(uint64_t max_val) nogil:
+ """Mask generator for use in bounded random numbers"""
+ # Smallest bit mask >= max
+ cdef uint64_t mask = max_val
+ mask |= mask >> 1
+ mask |= mask >> 2
+ mask |= mask >> 4
+ mask |= mask >> 8
+ mask |= mask >> 16
+ mask |= mask >> 32
+ return mask
+{{
+py:
+inttypes = ('uint64','uint32','uint16','uint8','bool','int64','int32','int16','int8')
+}}
+{{for inttype in inttypes}}
+cdef object _rand_{{inttype}}(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
+{{endfor}}
--- /dev/null
+#!python
+#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True
+
+import numpy as np
+cimport numpy as np
+
+__all__ = []
+
+np.import_array()
+
+cdef extern from "numpy/random/distributions.h":
+ # Generate random numbers in closed interval [off, off + rng].
+ uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
+ uint64_t off, uint64_t rng,
+ uint64_t mask, bint use_masked) nogil
+ uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
+ uint32_t off, uint32_t rng,
+ uint32_t mask, bint use_masked,
+ int *bcnt, uint32_t *buf) nogil
+ uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
+ uint16_t off, uint16_t rng,
+ uint16_t mask, bint use_masked,
+ int *bcnt, uint32_t *buf) nogil
+ uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state,
+ uint8_t off, uint8_t rng,
+ uint8_t mask, bint use_masked,
+ int *bcnt, uint32_t *buf) nogil
+ np.npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state,
+ np.npy_bool off, np.npy_bool rng,
+ np.npy_bool mask, bint use_masked,
+ int *bcnt, uint32_t *buf) nogil
+ void random_bounded_uint64_fill(bitgen_t *bitgen_state,
+ uint64_t off, uint64_t rng, np.npy_intp cnt,
+ bint use_masked,
+ uint64_t *out) nogil
+ void random_bounded_uint32_fill(bitgen_t *bitgen_state,
+ uint32_t off, uint32_t rng, np.npy_intp cnt,
+ bint use_masked,
+ uint32_t *out) nogil
+ void random_bounded_uint16_fill(bitgen_t *bitgen_state,
+ uint16_t off, uint16_t rng, np.npy_intp cnt,
+ bint use_masked,
+ uint16_t *out) nogil
+ void random_bounded_uint8_fill(bitgen_t *bitgen_state,
+ uint8_t off, uint8_t rng, np.npy_intp cnt,
+ bint use_masked,
+ uint8_t *out) nogil
+ void random_bounded_bool_fill(bitgen_t *bitgen_state,
+ np.npy_bool off, np.npy_bool rng, np.npy_intp cnt,
+ bint use_masked,
+ np.npy_bool *out) nogil
+
+
+
+_integers_types = {'bool': (0, 2),
+ 'int8': (-2**7, 2**7),
+ 'int16': (-2**15, 2**15),
+ 'int32': (-2**31, 2**31),
+ 'int64': (-2**63, 2**63),
+ 'uint8': (0, 2**8),
+ 'uint16': (0, 2**16),
+ 'uint32': (0, 2**32),
+ 'uint64': (0, 2**64)}
+{{
+py:
+type_info = (('uint32', 'uint32', 'uint64', 'NPY_UINT64', 0, 0, 0, '0X100000000ULL'),
+ ('uint16', 'uint16', 'uint32', 'NPY_UINT32', 1, 16, 0, '0X10000UL'),
+ ('uint8', 'uint8', 'uint16', 'NPY_UINT16', 3, 8, 0, '0X100UL'),
+ ('bool','bool', 'uint8', 'NPY_UINT8', 31, 1, 0, '0x2UL'),
+ ('int32', 'uint32', 'uint64', 'NPY_INT64', 0, 0, '-0x80000000LL', '0x80000000LL'),
+ ('int16', 'uint16', 'uint32', 'NPY_INT32', 1, 16, '-0x8000LL', '0x8000LL' ),
+ ('int8', 'uint8', 'uint16', 'NPY_INT16', 3, 8, '-0x80LL', '0x80LL' ),
+)}}
+{{for nptype, utype, nptype_up, npctype, remaining, bitshift, lb, ub in type_info}}
+{{ py: otype = nptype + '_' if nptype == 'bool' else nptype }}
+cdef object _rand_{{nptype}}_broadcast(np.ndarray low, np.ndarray high, object size,
+ bint use_masked, bint closed,
+ bitgen_t *state, object lock):
+ """
+ Array path for smaller integer types
+
+ This path is simpler since the high value in the open interval [low, high)
+ must be in-range for the next larger type, {{nptype_up}}. Here we case to
+ this type for checking and the recast to {{nptype}} when producing the
+ random integers.
+ """
+ cdef {{utype}}_t rng, last_rng, off, val, mask, out_val, is_open
+ cdef uint32_t buf
+ cdef {{utype}}_t *out_data
+ cdef {{nptype_up}}_t low_v, high_v
+ cdef np.ndarray low_arr, high_arr, out_arr
+ cdef np.npy_intp i, cnt
+ cdef np.broadcast it
+ cdef int buf_rem = 0
+
+ # Array path
+ is_open = not closed
+ low_arr = <np.ndarray>low
+ high_arr = <np.ndarray>high
+ if np.any(np.less(low_arr, {{lb}})):
+ raise ValueError('low is out of bounds for {{nptype}}')
+ if closed:
+ high_comp = np.greater_equal
+ low_high_comp = np.greater
+ else:
+ high_comp = np.greater
+ low_high_comp = np.greater_equal
+
+ if np.any(high_comp(high_arr, {{ub}})):
+ raise ValueError('high is out of bounds for {{nptype}}')
+ if np.any(low_high_comp(low_arr, high_arr)):
+ comp = '>' if closed else '>='
+ raise ValueError('low {comp} high'.format(comp=comp))
+
+ low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
+ high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
+
+ if size is not None:
+ out_arr = <np.ndarray>np.empty(size, np.{{otype}})
+ else:
+ it = np.PyArray_MultiIterNew2(low_arr, high_arr)
+ out_arr = <np.ndarray>np.empty(it.shape, np.{{otype}})
+
+ it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
+ out_data = <{{utype}}_t *>np.PyArray_DATA(out_arr)
+ cnt = np.PyArray_SIZE(out_arr)
+ mask = last_rng = 0
+ with lock, nogil:
+ for i in range(cnt):
+ low_v = (<{{nptype_up}}_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
+ high_v = (<{{nptype_up}}_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ # Subtract 1 since generator produces values on the closed int [off, off+rng]
+ rng = <{{utype}}_t>((high_v - is_open) - low_v)
+ off = <{{utype}}_t>(<{{nptype_up}}_t>low_v)
+
+ if rng != last_rng:
+ # Smallest bit mask >= max
+ mask = <{{utype}}_t>_gen_mask(rng)
+
+ out_data[i] = random_buffered_bounded_{{utype}}(state, off, rng, mask, use_masked, &buf_rem, &buf)
+
+ np.PyArray_MultiIter_NEXT(it)
+ return out_arr
+{{endfor}}
+{{
+py:
+big_type_info = (('uint64', 'uint64', 'NPY_UINT64', '0x0ULL', '0xFFFFFFFFFFFFFFFFULL'),
+ ('int64', 'uint64', 'NPY_INT64', '-0x8000000000000000LL', '0x7FFFFFFFFFFFFFFFLL' )
+)}}
+{{for nptype, utype, npctype, lb, ub in big_type_info}}
+{{ py: otype = nptype}}
+cdef object _rand_{{nptype}}_broadcast(object low, object high, object size,
+ bint use_masked, bint closed,
+ bitgen_t *state, object lock):
+ """
+ Array path for 64-bit integer types
+
+ Requires special treatment since the high value can be out-of-range for
+ the largest (64 bit) integer type since the generator is specified on the
+ interval [low,high).
+
+ The internal generator does not have this issue since it generates from
+ the closes interval [low, high-1] and high-1 is always in range for the
+ 64 bit integer type.
+ """
+
+ cdef np.ndarray low_arr, high_arr, out_arr, highm1_arr
+ cdef np.npy_intp i, cnt, n
+ cdef np.broadcast it
+ cdef object closed_upper
+ cdef uint64_t *out_data
+ cdef {{nptype}}_t *highm1_data
+ cdef {{nptype}}_t low_v, high_v
+ cdef uint64_t rng, last_rng, val, mask, off, out_val
+
+ low_arr = <np.ndarray>low
+ high_arr = <np.ndarray>high
+
+ if np.any(np.less(low_arr, {{lb}})):
+ raise ValueError('low is out of bounds for {{nptype}}')
+ dt = high_arr.dtype
+ if closed or np.issubdtype(dt, np.integer):
+ # Avoid object dtype path if already an integer
+ high_lower_comp = np.less if closed else np.less_equal
+ if np.any(high_lower_comp(high_arr, {{lb}})):
+ comp = '>' if closed else '>='
+ raise ValueError('low {comp} high'.format(comp=comp))
+ high_m1 = high_arr if closed else high_arr - dt.type(1)
+ if np.any(np.greater(high_m1, {{ub}})):
+ raise ValueError('high is out of bounds for {{nptype}}')
+ highm1_arr = <np.ndarray>np.PyArray_FROM_OTF(high_m1, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
+ else:
+ # If input is object or a floating type
+ highm1_arr = <np.ndarray>np.empty_like(high_arr, dtype=np.{{otype}})
+ highm1_data = <{{nptype}}_t *>np.PyArray_DATA(highm1_arr)
+ cnt = np.PyArray_SIZE(high_arr)
+ flat = high_arr.flat
+ for i in range(cnt):
+ # Subtract 1 since generator produces values on the closed int [off, off+rng]
+ closed_upper = int(flat[i]) - 1
+ if closed_upper > {{ub}}:
+ raise ValueError('high is out of bounds for {{nptype}}')
+ if closed_upper < {{lb}}:
+ comp = '>' if closed else '>='
+ raise ValueError('low {comp} high'.format(comp=comp))
+ highm1_data[i] = <{{nptype}}_t>closed_upper
+
+ if np.any(np.greater(low_arr, highm1_arr)):
+ comp = '>' if closed else '>='
+ raise ValueError('low {comp} high'.format(comp=comp))
+
+ high_arr = highm1_arr
+ low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
+
+ if size is not None:
+ out_arr = <np.ndarray>np.empty(size, np.{{otype}})
+ else:
+ it = np.PyArray_MultiIterNew2(low_arr, high_arr)
+ out_arr = <np.ndarray>np.empty(it.shape, np.{{otype}})
+
+ it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
+ out_data = <uint64_t *>np.PyArray_DATA(out_arr)
+ n = np.PyArray_SIZE(out_arr)
+ mask = last_rng = 0
+ with lock, nogil:
+ for i in range(n):
+ low_v = (<{{nptype}}_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
+ high_v = (<{{nptype}}_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ # Generator produces values on the closed int [off, off+rng], -1 subtracted above
+ rng = <{{utype}}_t>(high_v - low_v)
+ off = <{{utype}}_t>(<{{nptype}}_t>low_v)
+
+ if rng != last_rng:
+ mask = _gen_mask(rng)
+ out_data[i] = random_bounded_uint64(state, off, rng, mask, use_masked)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return out_arr
+{{endfor}}
+{{
+py:
+type_info = (('uint64', 'uint64', '0x0ULL', '0xFFFFFFFFFFFFFFFFULL'),
+ ('uint32', 'uint32', '0x0UL', '0XFFFFFFFFUL'),
+ ('uint16', 'uint16', '0x0UL', '0XFFFFUL'),
+ ('uint8', 'uint8', '0x0UL', '0XFFUL'),
+ ('bool', 'bool', '0x0UL', '0x1UL'),
+ ('int64', 'uint64', '-0x8000000000000000LL', '0x7FFFFFFFFFFFFFFFL'),
+ ('int32', 'uint32', '-0x80000000L', '0x7FFFFFFFL'),
+ ('int16', 'uint16', '-0x8000L', '0x7FFFL' ),
+ ('int8', 'uint8', '-0x80L', '0x7FL' )
+)}}
+{{for nptype, utype, lb, ub in type_info}}
+{{ py: otype = nptype + '_' if nptype == 'bool' else nptype }}
+cdef object _rand_{{nptype}}(object low, object high, object size,
+ bint use_masked, bint closed,
+ bitgen_t *state, object lock):
+ """
+ _rand_{{nptype}}(low, high, size, use_masked, *state, lock)
+
+ Return random `np.{{otype}}` integers from `low` (inclusive) to `high` (exclusive).
+
+ Return random integers from the "discrete uniform" distribution in the
+ interval [`low`, `high`). If `high` is None (the default),
+ then results are from [0, `low`). On entry the arguments are presumed
+ to have been validated for size and order for the `np.{{otype}}` type.
+
+ Parameters
+ ----------
+ low : int or array-like
+ Lowest (signed) integer to be drawn from the distribution (unless
+ ``high=None``, in which case this parameter is the *highest* such
+ integer).
+ high : int or array-like
+ If provided, one above the largest (signed) integer to be drawn from the
+ distribution (see above for behavior if ``high=None``).
+ size : int or tuple of ints
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ use_masked : bool
+ If True then rejection sampling with a range mask is used else Lemire's algorithm is used.
+ closed : bool
+ If True then sample from [low, high]. If False, sample [low, high)
+ state : bit generator
+ Bit generator state to use in the core random number generators
+ lock : threading.Lock
+ Lock to prevent multiple using a single generator simultaneously
+
+ Returns
+ -------
+ out : python scalar or ndarray of np.{{otype}}
+ `size`-shaped array of random integers from the appropriate
+ distribution, or a single such random int if `size` not provided.
+
+ Notes
+ -----
+ The internal integer generator produces values from the closed
+ interval [low, high-(not closed)]. This requires some care since
+ high can be out-of-range for {{utype}}. The scalar path leaves
+ integers as Python integers until the 1 has been subtracted to
+ avoid needing to cast to a larger type.
+ """
+ cdef np.ndarray out_arr, low_arr, high_arr
+ cdef {{utype}}_t rng, off, out_val
+ cdef {{utype}}_t *out_data
+ cdef np.npy_intp i, n, cnt
+
+ if size is not None:
+ if (np.prod(size) == 0):
+ return np.empty(size, dtype=np.{{otype}})
+
+ low_arr = <np.ndarray>np.array(low, copy=False)
+ high_arr = <np.ndarray>np.array(high, copy=False)
+ low_ndim = np.PyArray_NDIM(low_arr)
+ high_ndim = np.PyArray_NDIM(high_arr)
+ if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
+ (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
+ low = int(low_arr)
+ high = int(high_arr)
+ # Subtract 1 since internal generator produces on closed interval [low, high]
+ if not closed:
+ high -= 1
+
+ if low < {{lb}}:
+ raise ValueError("low is out of bounds for {{nptype}}")
+ if high > {{ub}}:
+ raise ValueError("high is out of bounds for {{nptype}}")
+ if low > high: # -1 already subtracted, closed interval
+ comp = '>' if closed else '>='
+ raise ValueError('low {comp} high'.format(comp=comp))
+
+ rng = <{{utype}}_t>(high - low)
+ off = <{{utype}}_t>(<{{nptype}}_t>low)
+ if size is None:
+ with lock:
+ random_bounded_{{utype}}_fill(state, off, rng, 1, use_masked, &out_val)
+ return np.{{otype}}(<{{nptype}}_t>out_val)
+ else:
+ out_arr = <np.ndarray>np.empty(size, np.{{otype}})
+ cnt = np.PyArray_SIZE(out_arr)
+ out_data = <{{utype}}_t *>np.PyArray_DATA(out_arr)
+ with lock, nogil:
+ random_bounded_{{utype}}_fill(state, off, rng, cnt, use_masked, out_data)
+ return out_arr
+ return _rand_{{nptype}}_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock)
+{{endfor}}
--- /dev/null
+#cython: language_level=3
+
+from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t
+
+import numpy as np
+cimport numpy as np
+
+from numpy.random cimport bitgen_t
+
+cdef double POISSON_LAM_MAX
+cdef double LEGACY_POISSON_LAM_MAX
+cdef uint64_t MAXSIZE
+
+cdef enum ConstraintType:
+ CONS_NONE
+ CONS_NON_NEGATIVE
+ CONS_POSITIVE
+ CONS_POSITIVE_NOT_NAN
+ CONS_BOUNDED_0_1
+ CONS_BOUNDED_0_1_NOTNAN
+ CONS_BOUNDED_GT_0_1
+ CONS_GT_1
+ CONS_GTE_1
+ CONS_POISSON
+ LEGACY_CONS_POISSON
+
+ctypedef ConstraintType constraint_type
+
+cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method)
+cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output)
+cdef object prepare_cffi(bitgen_t *bitgen)
+cdef object prepare_ctypes(bitgen_t *bitgen)
+cdef int check_constraint(double val, object name, constraint_type cons) except -1
+cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1
+
+cdef extern from "include/aligned_malloc.h":
+ cdef void *PyArray_realloc_aligned(void *p, size_t n)
+ cdef void *PyArray_malloc_aligned(size_t n)
+ cdef void *PyArray_calloc_aligned(size_t n, size_t s)
+ cdef void PyArray_free_aligned(void *p)
+
+ctypedef double (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil
+ctypedef double (*random_double_0)(void *state) nogil
+ctypedef double (*random_double_1)(void *state, double a) nogil
+ctypedef double (*random_double_2)(void *state, double a, double b) nogil
+ctypedef double (*random_double_3)(void *state, double a, double b, double c) nogil
+
+ctypedef double (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) nogil
+ctypedef float (*random_float_0)(bitgen_t *state) nogil
+ctypedef float (*random_float_1)(bitgen_t *state, float a) nogil
+
+ctypedef int64_t (*random_uint_0)(void *state) nogil
+ctypedef int64_t (*random_uint_d)(void *state, double a) nogil
+ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) nogil
+ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) nogil
+ctypedef int64_t (*random_uint_i)(void *state, int64_t a) nogil
+ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) nogil
+
+ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) nogil
+ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) nogil
+
+ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) nogil
+ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) nogil
+
+cdef double kahan_sum(double *darr, np.npy_intp n)
+
+cdef inline double uint64_to_double(uint64_t rnd) nogil:
+ return (rnd >> 11) * (1.0 / 9007199254740992.0)
+
+cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out)
+
+cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out)
+
+cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out)
+
+cdef object wrap_int(object val, object bits)
+
+cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size)
+
+cdef object cont(void *func, void *state, object size, object lock, int narg,
+ object a, object a_name, constraint_type a_constraint,
+ object b, object b_name, constraint_type b_constraint,
+ object c, object c_name, constraint_type c_constraint,
+ object out)
+
+cdef object disc(void *func, void *state, object size, object lock,
+ int narg_double, int narg_int64,
+ object a, object a_name, constraint_type a_constraint,
+ object b, object b_name, constraint_type b_constraint,
+ object c, object c_name, constraint_type c_constraint)
+
+cdef object cont_f(void *func, bitgen_t *state, object size, object lock,
+ object a, object a_name, constraint_type a_constraint,
+ object out)
+
+cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
+ np.ndarray c_arr, object c_name, constraint_type c_constraint)
+
+cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
+ np.ndarray c_arr, object c_name, constraint_type c_constraint)
--- /dev/null
+#!python
+#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
+from collections import namedtuple
+from cpython cimport PyFloat_AsDouble
+import sys
+import numpy as np
+cimport numpy as np
+
+from libc.stdint cimport uintptr_t
+
+__all__ = ['interface']
+
+np.import_array()
+
+interface = namedtuple('interface', ['state_address', 'state', 'next_uint64',
+ 'next_uint32', 'next_double',
+ 'bit_generator'])
+
+cdef double LEGACY_POISSON_LAM_MAX = <double>np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10
+cdef double POISSON_LAM_MAX = <double>np.iinfo('int64').max - np.sqrt(np.iinfo('int64').max)*10
+
+cdef uint64_t MAXSIZE = <uint64_t>sys.maxsize
+
+
+cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method):
+ """Benchmark command used by BitGenerator"""
+ cdef Py_ssize_t i
+ if method==u'uint64':
+ with lock, nogil:
+ for i in range(cnt):
+ bitgen.next_uint64(bitgen.state)
+ elif method==u'double':
+ with lock, nogil:
+ for i in range(cnt):
+ bitgen.next_double(bitgen.state)
+ else:
+ raise ValueError('Unknown method')
+
+
+cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output):
+ """
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying PRNG
+
+ Parameters
+ ----------
+ bitgen : BitGenerator
+ Address of the bit generator struct
+ lock : Threading.Lock
+ Lock provided by the bit generator
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
+
+ Returns
+ -------
+ out : uint or ndarray
+ Drawn samples.
+
+ Notes
+ -----
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
+ """
+ cdef np.ndarray randoms
+ cdef uint64_t *randoms_data
+ cdef Py_ssize_t i, n
+
+ if not output:
+ if size is None:
+ with lock:
+ bitgen.next_raw(bitgen.state)
+ return None
+ n = np.asarray(size).sum()
+ with lock, nogil:
+ for i in range(n):
+ bitgen.next_raw(bitgen.state)
+ return None
+
+ if size is None:
+ with lock:
+ return bitgen.next_raw(bitgen.state)
+
+ randoms = <np.ndarray>np.empty(size, np.uint64)
+ randoms_data = <uint64_t*>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ with lock, nogil:
+ for i in range(n):
+ randoms_data[i] = bitgen.next_raw(bitgen.state)
+ return randoms
+
+cdef object prepare_cffi(bitgen_t *bitgen):
+ """
+ Bundles the interfaces to interact with a BitGenerator using cffi
+
+ Parameters
+ ----------
+ bitgen : pointer
+ A pointer to a BitGenerator instance
+
+ Returns
+ -------
+ interface : namedtuple
+ The functions required to interface with the BitGenerator using cffi
+
+ * state_address - Memory address of the state struct
+ * state - pointer to the state struct
+ * next_uint64 - function pointer to produce 64 bit integers
+ * next_uint32 - function pointer to produce 32 bit integers
+ * next_double - function pointer to produce doubles
+ * bit_generator - pointer to the BitGenerator struct
+ """
+ try:
+ import cffi
+ except ImportError:
+ raise ImportError('cffi cannot be imported.')
+
+ ffi = cffi.FFI()
+ _cffi = interface(<uintptr_t>bitgen.state,
+ ffi.cast('void *', <uintptr_t>bitgen.state),
+ ffi.cast('uint64_t (*)(void *)', <uintptr_t>bitgen.next_uint64),
+ ffi.cast('uint32_t (*)(void *)', <uintptr_t>bitgen.next_uint32),
+ ffi.cast('double (*)(void *)', <uintptr_t>bitgen.next_double),
+ ffi.cast('void *', <uintptr_t>bitgen))
+ return _cffi
+
+cdef object prepare_ctypes(bitgen_t *bitgen):
+ """
+ Bundles the interfaces to interact with a BitGenerator using ctypes
+
+ Parameters
+ ----------
+ bitgen : pointer
+ A pointer to a BitGenerator instance
+
+ Returns
+ -------
+ interface : namedtuple
+ The functions required to interface with the BitGenerator using ctypes:
+
+ * state_address - Memory address of the state struct
+ * state - pointer to the state struct
+ * next_uint64 - function pointer to produce 64 bit integers
+ * next_uint32 - function pointer to produce 32 bit integers
+ * next_double - function pointer to produce doubles
+ * bit_generator - pointer to the BitGenerator struct
+ """
+ import ctypes
+
+ _ctypes = interface(<uintptr_t>bitgen.state,
+ ctypes.c_void_p(<uintptr_t>bitgen.state),
+ ctypes.cast(<uintptr_t>bitgen.next_uint64,
+ ctypes.CFUNCTYPE(ctypes.c_uint64,
+ ctypes.c_void_p)),
+ ctypes.cast(<uintptr_t>bitgen.next_uint32,
+ ctypes.CFUNCTYPE(ctypes.c_uint32,
+ ctypes.c_void_p)),
+ ctypes.cast(<uintptr_t>bitgen.next_double,
+ ctypes.CFUNCTYPE(ctypes.c_double,
+ ctypes.c_void_p)),
+ ctypes.c_void_p(<uintptr_t>bitgen))
+ return _ctypes
+
+cdef double kahan_sum(double *darr, np.npy_intp n):
+ cdef double c, y, t, sum
+ cdef np.npy_intp i
+ sum = darr[0]
+ c = 0.0
+ for i in range(1, n):
+ y = darr[i] - c
+ t = sum + y
+ c = (t-sum) - y
+ sum = t
+ return sum
+
+
+cdef object wrap_int(object val, object bits):
+ """Wraparound to place an integer into the interval [0, 2**bits)"""
+ mask = ~(~int(0) << bits)
+ return val & mask
+
+
+cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size):
+ """Convert a large integer to an array of unsigned integers"""
+ len = bits // uint_size
+ value = np.asarray(value)
+ if uint_size == 32:
+ dtype = np.uint32
+ elif uint_size == 64:
+ dtype = np.uint64
+ else:
+ raise ValueError('Unknown uint_size')
+ if value.shape == ():
+ value = int(value)
+ upper = int(2)**int(bits)
+ if value < 0 or value >= upper:
+ raise ValueError('{name} must be positive and '
+ 'less than 2**{bits}.'.format(name=name, bits=bits))
+
+ out = np.empty(len, dtype=dtype)
+ for i in range(len):
+ out[i] = value % 2**int(uint_size)
+ value >>= int(uint_size)
+ else:
+ out = value.astype(dtype)
+ if out.shape != (len,):
+ raise ValueError('{name} must have {len} elements when using '
+ 'array form'.format(name=name, len=len))
+ return out
+
+
+cdef check_output(object out, object dtype, object size):
+ if out is None:
+ return
+ cdef np.ndarray out_array = <np.ndarray>out
+ if not (np.PyArray_CHKFLAGS(out_array, np.NPY_CARRAY) or
+ np.PyArray_CHKFLAGS(out_array, np.NPY_FARRAY)):
+ raise ValueError('Supplied output array is not contiguous, writable or aligned.')
+ if out_array.dtype != dtype:
+ raise TypeError('Supplied output array has the wrong type. '
+ 'Expected {0}, got {1}'.format(np.dtype(dtype), out_array.dtype))
+ if size is not None:
+ try:
+ tup_size = tuple(size)
+ except TypeError:
+ tup_size = tuple([size])
+ if tup_size != out.shape:
+ raise ValueError('size must match out.shape when used together')
+
+
+cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out):
+ cdef random_double_fill random_func = (<random_double_fill>func)
+ cdef double out_val
+ cdef double *out_array_data
+ cdef np.ndarray out_array
+ cdef np.npy_intp i, n
+
+ if size is None and out is None:
+ with lock:
+ random_func(state, 1, &out_val)
+ return out_val
+
+ if out is not None:
+ check_output(out, np.float64, size)
+ out_array = <np.ndarray>out
+ else:
+ out_array = <np.ndarray>np.empty(size, np.double)
+
+ n = np.PyArray_SIZE(out_array)
+ out_array_data = <double *>np.PyArray_DATA(out_array)
+ with lock, nogil:
+ random_func(state, n, out_array_data)
+ return out_array
+
+cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out):
+ cdef random_float_fill random_func = (<random_float_fill>func)
+ cdef float out_val
+ cdef float *out_array_data
+ cdef np.ndarray out_array
+ cdef np.npy_intp i, n
+
+ if size is None and out is None:
+ with lock:
+ random_func(state, 1, &out_val)
+ return out_val
+
+ if out is not None:
+ check_output(out, np.float32, size)
+ out_array = <np.ndarray>out
+ else:
+ out_array = <np.ndarray>np.empty(size, np.float32)
+
+ n = np.PyArray_SIZE(out_array)
+ out_array_data = <float *>np.PyArray_DATA(out_array)
+ with lock, nogil:
+ random_func(state, n, out_array_data)
+ return out_array
+
+cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out):
+ cdef random_double_0 random_func = (<random_double_0>func)
+ cdef float *out_array_data
+ cdef np.ndarray out_array
+ cdef np.npy_intp i, n
+
+ if size is None and out is None:
+ with lock:
+ return <float>random_func(state)
+
+ if out is not None:
+ check_output(out, np.float32, size)
+ out_array = <np.ndarray>out
+ else:
+ out_array = <np.ndarray>np.empty(size, np.float32)
+
+ n = np.PyArray_SIZE(out_array)
+ out_array_data = <float *>np.PyArray_DATA(out_array)
+ with lock, nogil:
+ for i in range(n):
+ out_array_data[i] = <float>random_func(state)
+ return out_array
+
+
+cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1:
+ if cons == CONS_NON_NEGATIVE:
+ if np.any(np.logical_and(np.logical_not(np.isnan(val)), np.signbit(val))):
+ raise ValueError(name + " < 0")
+ elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN:
+ if cons == CONS_POSITIVE_NOT_NAN and np.any(np.isnan(val)):
+ raise ValueError(name + " must not be NaN")
+ elif np.any(np.less_equal(val, 0)):
+ raise ValueError(name + " <= 0")
+ elif cons == CONS_BOUNDED_0_1:
+ if not np.all(np.greater_equal(val, 0)) or \
+ not np.all(np.less_equal(val, 1)):
+ raise ValueError("{0} < 0, {0} > 1 or {0} contains NaNs".format(name))
+ elif cons == CONS_BOUNDED_GT_0_1:
+ if not np.all(np.greater(val, 0)) or not np.all(np.less_equal(val, 1)):
+ raise ValueError("{0} <= 0, {0} > 1 or {0} contains NaNs".format(name))
+ elif cons == CONS_GT_1:
+ if not np.all(np.greater(val, 1)):
+ raise ValueError("{0} <= 1 or {0} contains NaNs".format(name))
+ elif cons == CONS_GTE_1:
+ if not np.all(np.greater_equal(val, 1)):
+ raise ValueError("{0} < 1 or {0} contains NaNs".format(name))
+ elif cons == CONS_POISSON:
+ if not np.all(np.less_equal(val, POISSON_LAM_MAX)):
+ raise ValueError("{0} value too large".format(name))
+ elif not np.all(np.greater_equal(val, 0.0)):
+ raise ValueError("{0} < 0 or {0} contains NaNs".format(name))
+ elif cons == LEGACY_CONS_POISSON:
+ if not np.all(np.less_equal(val, LEGACY_POISSON_LAM_MAX)):
+ raise ValueError("{0} value too large".format(name))
+ elif not np.all(np.greater_equal(val, 0.0)):
+ raise ValueError("{0} < 0 or {0} contains NaNs".format(name))
+
+ return 0
+
+
+cdef int check_constraint(double val, object name, constraint_type cons) except -1:
+ cdef bint is_nan
+ if cons == CONS_NON_NEGATIVE:
+ if not np.isnan(val) and np.signbit(val):
+ raise ValueError(name + " < 0")
+ elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN:
+ if cons == CONS_POSITIVE_NOT_NAN and np.isnan(val):
+ raise ValueError(name + " must not be NaN")
+ elif val <= 0:
+ raise ValueError(name + " <= 0")
+ elif cons == CONS_BOUNDED_0_1:
+ if not (val >= 0) or not (val <= 1):
+ raise ValueError("{0} < 0, {0} > 1 or {0} is NaN".format(name))
+ elif cons == CONS_BOUNDED_GT_0_1:
+ if not val >0 or not val <= 1:
+ raise ValueError("{0} <= 0, {0} > 1 or {0} contains NaNs".format(name))
+ elif cons == CONS_GT_1:
+ if not (val > 1):
+ raise ValueError("{0} <= 1 or {0} is NaN".format(name))
+ elif cons == CONS_GTE_1:
+ if not (val >= 1):
+ raise ValueError("{0} < 1 or {0} is NaN".format(name))
+ elif cons == CONS_POISSON:
+ if not (val >= 0):
+ raise ValueError("{0} < 0 or {0} is NaN".format(name))
+ elif not (val <= POISSON_LAM_MAX):
+ raise ValueError(name + " value too large")
+ elif cons == LEGACY_CONS_POISSON:
+ if not (val >= 0):
+ raise ValueError("{0} < 0 or {0} is NaN".format(name))
+ elif not (val <= LEGACY_POISSON_LAM_MAX):
+ raise ValueError(name + " value too large")
+
+ return 0
+
+cdef object cont_broadcast_1(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ object out):
+
+ cdef np.ndarray randoms
+ cdef double a_val
+ cdef double *randoms_data
+ cdef np.broadcast it
+ cdef random_double_1 f = (<random_double_1>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if size is not None and out is None:
+ randoms = <np.ndarray>np.empty(size, np.double)
+ elif out is None:
+ randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr), np.PyArray_DIMS(a_arr), np.NPY_DOUBLE)
+ else:
+ randoms = <np.ndarray>out
+
+ randoms_data = <double *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+ it = np.PyArray_MultiIterNew2(randoms, a_arr)
+
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ randoms_data[i] = f(state, a_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object cont_broadcast_2(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint):
+ cdef np.ndarray randoms
+ cdef double a_val, b_val
+ cdef double *randoms_data
+ cdef np.broadcast it
+ cdef random_double_2 f = (<random_double_2>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if b_constraint != CONS_NONE:
+ check_array_constraint(b_arr, b_name, b_constraint)
+
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.double)
+ else:
+ it = np.PyArray_MultiIterNew2(a_arr, b_arr)
+ randoms = <np.ndarray>np.empty(it.shape, np.double)
+ # randoms = np.PyArray_SimpleNew(it.nd, np.PyArray_DIMS(it), np.NPY_DOUBLE)
+
+ randoms_data = <double *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ b_val = (<double*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ randoms_data[i] = f(state, a_val, b_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
+ np.ndarray c_arr, object c_name, constraint_type c_constraint):
+ cdef np.ndarray randoms
+ cdef double a_val, b_val, c_val
+ cdef double *randoms_data
+ cdef np.broadcast it
+ cdef random_double_3 f = (<random_double_3>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if b_constraint != CONS_NONE:
+ check_array_constraint(b_arr, b_name, b_constraint)
+
+ if c_constraint != CONS_NONE:
+ check_array_constraint(c_arr, c_name, c_constraint)
+
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.double)
+ else:
+ it = np.PyArray_MultiIterNew3(a_arr, b_arr, c_arr)
+ # randoms = np.PyArray_SimpleNew(it.nd, np.PyArray_DIMS(it), np.NPY_DOUBLE)
+ randoms = <np.ndarray>np.empty(it.shape, np.double)
+
+ randoms_data = <double *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew4(randoms, a_arr, b_arr, c_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ b_val = (<double*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ c_val = (<double*>np.PyArray_MultiIter_DATA(it, 3))[0]
+ randoms_data[i] = f(state, a_val, b_val, c_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object cont(void *func, void *state, object size, object lock, int narg,
+ object a, object a_name, constraint_type a_constraint,
+ object b, object b_name, constraint_type b_constraint,
+ object c, object c_name, constraint_type c_constraint,
+ object out):
+
+ cdef np.ndarray a_arr, b_arr, c_arr
+ cdef double _a = 0.0, _b = 0.0, _c = 0.0
+ cdef bint is_scalar = True
+ check_output(out, np.float64, size)
+ if narg > 0:
+ a_arr = <np.ndarray>np.PyArray_FROM_OTF(a, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0
+ if narg > 1:
+ b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
+ if narg == 3:
+ c_arr = <np.ndarray>np.PyArray_FROM_OTF(c, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(c_arr) == 0
+
+ if not is_scalar:
+ if narg == 1:
+ return cont_broadcast_1(func, state, size, lock,
+ a_arr, a_name, a_constraint,
+ out)
+ elif narg == 2:
+ return cont_broadcast_2(func, state, size, lock,
+ a_arr, a_name, a_constraint,
+ b_arr, b_name, b_constraint)
+ else:
+ return cont_broadcast_3(func, state, size, lock,
+ a_arr, a_name, a_constraint,
+ b_arr, b_name, b_constraint,
+ c_arr, c_name, c_constraint)
+
+ if narg > 0:
+ _a = PyFloat_AsDouble(a)
+ if a_constraint != CONS_NONE and is_scalar:
+ check_constraint(_a, a_name, a_constraint)
+ if narg > 1:
+ _b = PyFloat_AsDouble(b)
+ if b_constraint != CONS_NONE:
+ check_constraint(_b, b_name, b_constraint)
+ if narg == 3:
+ _c = PyFloat_AsDouble(c)
+ if c_constraint != CONS_NONE and is_scalar:
+ check_constraint(_c, c_name, c_constraint)
+
+ if size is None and out is None:
+ with lock:
+ if narg == 0:
+ return (<random_double_0>func)(state)
+ elif narg == 1:
+ return (<random_double_1>func)(state, _a)
+ elif narg == 2:
+ return (<random_double_2>func)(state, _a, _b)
+ elif narg == 3:
+ return (<random_double_3>func)(state, _a, _b, _c)
+
+ cdef np.npy_intp i, n
+ cdef np.ndarray randoms
+ if out is None:
+ randoms = <np.ndarray>np.empty(size)
+ else:
+ randoms = <np.ndarray>out
+ n = np.PyArray_SIZE(randoms)
+
+ cdef double *randoms_data = <double *>np.PyArray_DATA(randoms)
+ cdef random_double_0 f0
+ cdef random_double_1 f1
+ cdef random_double_2 f2
+ cdef random_double_3 f3
+
+ with lock, nogil:
+ if narg == 0:
+ f0 = (<random_double_0>func)
+ for i in range(n):
+ randoms_data[i] = f0(state)
+ elif narg == 1:
+ f1 = (<random_double_1>func)
+ for i in range(n):
+ randoms_data[i] = f1(state, _a)
+ elif narg == 2:
+ f2 = (<random_double_2>func)
+ for i in range(n):
+ randoms_data[i] = f2(state, _a, _b)
+ elif narg == 3:
+ f3 = (<random_double_3>func)
+ for i in range(n):
+ randoms_data[i] = f3(state, _a, _b, _c)
+
+ if out is None:
+ return randoms
+ else:
+ return out
+
+cdef object discrete_broadcast_d(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint):
+
+ cdef np.ndarray randoms
+ cdef int64_t *randoms_data
+ cdef np.broadcast it
+ cdef random_uint_d f = (<random_uint_d>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if size is not None:
+ randoms = np.empty(size, np.int64)
+ else:
+ # randoms = np.empty(np.shape(a_arr), np.double)
+ randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr), np.PyArray_DIMS(a_arr), np.NPY_INT64)
+
+ randoms_data = <int64_t *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew2(randoms, a_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ randoms_data[i] = f(state, a_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object discrete_broadcast_dd(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint):
+ cdef np.ndarray randoms
+ cdef int64_t *randoms_data
+ cdef np.broadcast it
+ cdef random_uint_dd f = (<random_uint_dd>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+ if b_constraint != CONS_NONE:
+ check_array_constraint(b_arr, b_name, b_constraint)
+
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ else:
+ it = np.PyArray_MultiIterNew2(a_arr, b_arr)
+ randoms = <np.ndarray>np.empty(it.shape, np.int64)
+ # randoms = np.PyArray_SimpleNew(it.nd, np.PyArray_DIMS(it), np.NPY_INT64)
+
+ randoms_data = <int64_t *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ b_val = (<double*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ randoms_data[i] = f(state, a_val, b_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object discrete_broadcast_di(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint):
+ cdef np.ndarray randoms
+ cdef int64_t *randoms_data
+ cdef np.broadcast it
+ cdef random_uint_di f = (<random_uint_di>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if b_constraint != CONS_NONE:
+ check_array_constraint(b_arr, b_name, b_constraint)
+
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ else:
+ it = np.PyArray_MultiIterNew2(a_arr, b_arr)
+ randoms = <np.ndarray>np.empty(it.shape, np.int64)
+
+ randoms_data = <int64_t *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ b_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ (<int64_t*>np.PyArray_MultiIter_DATA(it, 0))[0] = f(state, a_val, b_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
+ np.ndarray c_arr, object c_name, constraint_type c_constraint):
+ cdef np.ndarray randoms
+ cdef int64_t *randoms_data
+ cdef np.broadcast it
+ cdef random_uint_iii f = (<random_uint_iii>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if b_constraint != CONS_NONE:
+ check_array_constraint(b_arr, b_name, b_constraint)
+
+ if c_constraint != CONS_NONE:
+ check_array_constraint(c_arr, c_name, c_constraint)
+
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ else:
+ it = np.PyArray_MultiIterNew3(a_arr, b_arr, c_arr)
+ randoms = <np.ndarray>np.empty(it.shape, np.int64)
+
+ randoms_data = <int64_t *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew4(randoms, a_arr, b_arr, c_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ b_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ c_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 3))[0]
+ randoms_data[i] = f(state, a_val, b_val, c_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object discrete_broadcast_i(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint):
+ cdef np.ndarray randoms
+ cdef int64_t *randoms_data
+ cdef np.broadcast it
+ cdef random_uint_i f = (<random_uint_i>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ else:
+ randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr), np.PyArray_DIMS(a_arr), np.NPY_INT64)
+
+ randoms_data = <int64_t *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew2(randoms, a_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ randoms_data[i] = f(state, a_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+# Needs double <vec>, double-double <vec>, double-int64_t<vec>, int64_t <vec>, int64_t-int64_t-int64_t
+cdef object disc(void *func, void *state, object size, object lock,
+ int narg_double, int narg_int64,
+ object a, object a_name, constraint_type a_constraint,
+ object b, object b_name, constraint_type b_constraint,
+ object c, object c_name, constraint_type c_constraint):
+
+ cdef double _da = 0, _db = 0
+ cdef int64_t _ia = 0, _ib = 0, _ic = 0
+ cdef bint is_scalar = True
+ if narg_double > 0:
+ a_arr = <np.ndarray>np.PyArray_FROM_OTF(a, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0
+ if narg_double > 1:
+ b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
+ elif narg_int64 == 1:
+ b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_INT64, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
+ else:
+ if narg_int64 > 0:
+ a_arr = <np.ndarray>np.PyArray_FROM_OTF(a, np.NPY_INT64, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0
+ if narg_int64 > 1:
+ b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_INT64, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
+ if narg_int64 > 2:
+ c_arr = <np.ndarray>np.PyArray_FROM_OTF(c, np.NPY_INT64, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(c_arr) == 0
+
+ if not is_scalar:
+ if narg_int64 == 0:
+ if narg_double == 1:
+ return discrete_broadcast_d(func, state, size, lock,
+ a_arr, a_name, a_constraint)
+ elif narg_double == 2:
+ return discrete_broadcast_dd(func, state, size, lock,
+ a_arr, a_name, a_constraint,
+ b_arr, b_name, b_constraint)
+ elif narg_int64 == 1:
+ if narg_double == 0:
+ return discrete_broadcast_i(func, state, size, lock,
+ a_arr, a_name, a_constraint)
+ elif narg_double == 1:
+ return discrete_broadcast_di(func, state, size, lock,
+ a_arr, a_name, a_constraint,
+ b_arr, b_name, b_constraint)
+ else:
+ raise NotImplementedError("No vector path available")
+
+ if narg_double > 0:
+ _da = PyFloat_AsDouble(a)
+ if a_constraint != CONS_NONE and is_scalar:
+ check_constraint(_da, a_name, a_constraint)
+
+ if narg_double > 1:
+ _db = PyFloat_AsDouble(b)
+ if b_constraint != CONS_NONE and is_scalar:
+ check_constraint(_db, b_name, b_constraint)
+ elif narg_int64 == 1:
+ _ib = <int64_t>b
+ if b_constraint != CONS_NONE and is_scalar:
+ check_constraint(<double>_ib, b_name, b_constraint)
+ else:
+ if narg_int64 > 0:
+ _ia = <int64_t>a
+ if a_constraint != CONS_NONE and is_scalar:
+ check_constraint(<double>_ia, a_name, a_constraint)
+ if narg_int64 > 1:
+ _ib = <int64_t>b
+ if b_constraint != CONS_NONE and is_scalar:
+ check_constraint(<double>_ib, b_name, b_constraint)
+ if narg_int64 > 2:
+ _ic = <int64_t>c
+ if c_constraint != CONS_NONE and is_scalar:
+ check_constraint(<double>_ic, c_name, c_constraint)
+
+ if size is None:
+ with lock:
+ if narg_int64 == 0:
+ if narg_double == 0:
+ return (<random_uint_0>func)(state)
+ elif narg_double == 1:
+ return (<random_uint_d>func)(state, _da)
+ elif narg_double == 2:
+ return (<random_uint_dd>func)(state, _da, _db)
+ elif narg_int64 == 1:
+ if narg_double == 0:
+ return (<random_uint_i>func)(state, _ia)
+ if narg_double == 1:
+ return (<random_uint_di>func)(state, _da, _ib)
+ else:
+ return (<random_uint_iii>func)(state, _ia, _ib, _ic)
+
+ cdef np.npy_intp i, n
+ cdef np.ndarray randoms = <np.ndarray>np.empty(size, np.int64)
+ cdef np.int64_t *randoms_data
+ cdef random_uint_0 f0
+ cdef random_uint_d fd
+ cdef random_uint_dd fdd
+ cdef random_uint_di fdi
+ cdef random_uint_i fi
+ cdef random_uint_iii fiii
+
+ n = np.PyArray_SIZE(randoms)
+ randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
+
+ with lock, nogil:
+ if narg_int64 == 0:
+ if narg_double == 0:
+ f0 = (<random_uint_0>func)
+ for i in range(n):
+ randoms_data[i] = f0(state)
+ elif narg_double == 1:
+ fd = (<random_uint_d>func)
+ for i in range(n):
+ randoms_data[i] = fd(state, _da)
+ elif narg_double == 2:
+ fdd = (<random_uint_dd>func)
+ for i in range(n):
+ randoms_data[i] = fdd(state, _da, _db)
+ elif narg_int64 == 1:
+ if narg_double == 0:
+ fi = (<random_uint_i>func)
+ for i in range(n):
+ randoms_data[i] = fi(state, _ia)
+ if narg_double == 1:
+ fdi = (<random_uint_di>func)
+ for i in range(n):
+ randoms_data[i] = fdi(state, _da, _ib)
+ else:
+ fiii = (<random_uint_iii>func)
+ for i in range(n):
+ randoms_data[i] = fiii(state, _ia, _ib, _ic)
+
+ return randoms
+
+
+cdef object cont_broadcast_1_f(void *func, bitgen_t *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ object out):
+
+ cdef np.ndarray randoms
+ cdef float a_val
+ cdef float *randoms_data
+ cdef np.broadcast it
+ cdef random_float_1 f = (<random_float_1>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if size is not None and out is None:
+ randoms = <np.ndarray>np.empty(size, np.float32)
+ elif out is None:
+ randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr),
+ np.PyArray_DIMS(a_arr),
+ np.NPY_FLOAT32)
+ else:
+ randoms = <np.ndarray>out
+
+ randoms_data = <float *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+ it = np.PyArray_MultiIterNew2(randoms, a_arr)
+
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<float*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ randoms_data[i] = f(state, a_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object cont_f(void *func, bitgen_t *state, object size, object lock,
+ object a, object a_name, constraint_type a_constraint,
+ object out):
+
+ cdef np.ndarray a_arr, b_arr, c_arr
+ cdef float _a
+ cdef bint is_scalar = True
+ cdef int requirements = np.NPY_ALIGNED | np.NPY_FORCECAST
+ check_output(out, np.float32, size)
+ a_arr = <np.ndarray>np.PyArray_FROMANY(a, np.NPY_FLOAT32, 0, 0, requirements)
+ is_scalar = np.PyArray_NDIM(a_arr) == 0
+
+ if not is_scalar:
+ return cont_broadcast_1_f(func, state, size, lock, a_arr, a_name, a_constraint, out)
+
+ _a = <float>PyFloat_AsDouble(a)
+ if a_constraint != CONS_NONE:
+ check_constraint(_a, a_name, a_constraint)
+
+ if size is None and out is None:
+ with lock:
+ return (<random_float_1>func)(state, _a)
+
+ cdef np.npy_intp i, n
+ cdef np.ndarray randoms
+ if out is None:
+ randoms = <np.ndarray>np.empty(size, np.float32)
+ else:
+ randoms = <np.ndarray>out
+ n = np.PyArray_SIZE(randoms)
+
+ cdef float *randoms_data = <float *>np.PyArray_DATA(randoms)
+ cdef random_float_1 f1 = <random_float_1>func
+
+ with lock, nogil:
+ for i in range(n):
+ randoms_data[i] = f1(state, _a)
+
+ if out is None:
+ return randoms
+ else:
+ return out
--- /dev/null
+"""
+Use cffi to access any of the underlying C functions from distributions.h
+"""
+import os
+import numpy as np
+import cffi
+from .parse import parse_distributions_h
+ffi = cffi.FFI()
+
+inc_dir = os.path.join(np.get_include(), 'numpy')
+
+# Basic numpy types
+ffi.cdef('''
+ typedef intptr_t npy_intp;
+ typedef unsigned char npy_bool;
+
+''')
+
+parse_distributions_h(ffi, inc_dir)
+
+lib = ffi.dlopen(np.random._generator.__file__)
+
+# Compare the distributions.h random_standard_normal_fill to
+# Generator.standard_random
+bit_gen = np.random.PCG64()
+rng = np.random.Generator(bit_gen)
+state = bit_gen.state
+
+interface = rng.bit_generator.cffi
+n = 100
+vals_cffi = ffi.new('double[%d]' % n)
+lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi)
+
+# reset the state
+bit_gen.state = state
+
+vals = rng.standard_normal(n)
+
+for i in range(n):
+ assert vals[i] == vals_cffi[i]
--- /dev/null
+import os
+
+
+def parse_distributions_h(ffi, inc_dir):
+ """
+ Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef
+
+ Read the function declarations without the "#define ..." macros that will
+ be filled in when loading the library.
+ """
+
+ with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid:
+ s = []
+ for line in fid:
+ # massage the include file
+ if line.strip().startswith('#'):
+ continue
+ s.append(line)
+ ffi.cdef('\n'.join(s))
+
+ with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid:
+ s = []
+ in_skip = 0
+ for line in fid:
+ # massage the include file
+ if line.strip().startswith('#'):
+ continue
+
+ # skip any inlined function definition
+ # which starts with 'static NPY_INLINE xxx(...) {'
+ # and ends with a closing '}'
+ if line.strip().startswith('static NPY_INLINE'):
+ in_skip += line.count('{')
+ continue
+ elif in_skip > 0:
+ in_skip += line.count('{')
+ in_skip -= line.count('}')
+ continue
+
+ # replace defines with their value or remove them
+ line = line.replace('DECLDIR', '')
+ line = line.replace('NPY_INLINE', '')
+ line = line.replace('RAND_INT_TYPE', 'int64_t')
+ s.append(line)
+ ffi.cdef('\n'.join(s))
+
--- /dev/null
+#!/usr/bin/env python
+#cython: language_level=3
+
+from libc.stdint cimport uint32_t
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+from numpy.random cimport bitgen_t
+from numpy.random import PCG64
+
+np.import_array()
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def uniform_mean(Py_ssize_t n):
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef double[::1] random_values
+ cdef np.ndarray randoms
+
+ x = PCG64()
+ capsule = x.capsule
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n)
+ # Best practice is to acquire the lock whenever generating random values.
+ # This prevents other threads from modifying the state. Acquiring the lock
+ # is only necessary if if the GIL is also released, as in this example.
+ with x.lock, nogil:
+ for i in range(n):
+ random_values[i] = rng.next_double(rng.state)
+ randoms = np.asarray(random_values)
+ return randoms.mean()
+
+
+# This function is declared nogil so it can be used without the GIL below
+cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil:
+ cdef uint32_t mask, delta, val
+ mask = delta = ub - lb
+ mask |= mask >> 1
+ mask |= mask >> 2
+ mask |= mask >> 4
+ mask |= mask >> 8
+ mask |= mask >> 16
+
+ val = rng.next_uint32(rng.state) & mask
+ while val > delta:
+ val = rng.next_uint32(rng.state) & mask
+
+ return lb + val
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n):
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef uint32_t[::1] out
+ cdef const char *capsule_name = "BitGenerator"
+
+ x = PCG64()
+ out = np.empty(n, dtype=np.uint32)
+ capsule = x.capsule
+
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ rng = <bitgen_t *>PyCapsule_GetPointer(capsule, capsule_name)
+
+ with x.lock, nogil:
+ for i in range(n):
+ out[i] = bounded_uint(lb, ub, rng)
+ return np.asarray(out)
--- /dev/null
+#!/usr/bin/env python
+#cython: language_level=3
+"""
+This file shows how the to use a BitGenerator to create a distribution.
+"""
+import numpy as np
+cimport numpy as np
+cimport cython
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+from libc.stdint cimport uint16_t, uint64_t
+from numpy.random cimport bitgen_t
+from numpy.random import PCG64
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def uniforms(Py_ssize_t n):
+ """
+ Create an array of `n` uniformly distributed doubles.
+ A 'real' distribution would want to process the values into
+ some non-uniform distribution
+ """
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef double[::1] random_values
+
+ x = PCG64()
+ capsule = x.capsule
+ # Optional check that the capsule if from a BitGenerator
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ # Cast the pointer
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n, dtype='float64')
+ with x.lock, nogil:
+ for i in range(n):
+ # Call the function
+ random_values[i] = rng.next_double(rng.state)
+ randoms = np.asarray(random_values)
+
+ return randoms
+
+# cython example 2
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def uint10_uniforms(Py_ssize_t n):
+ """Uniform 10 bit integers stored as 16-bit unsigned integers"""
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef uint16_t[::1] random_values
+ cdef int bits_remaining
+ cdef int width = 10
+ cdef uint64_t buff, mask = 0x3FF
+
+ x = PCG64()
+ capsule = x.capsule
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n, dtype='uint16')
+ # Best practice is to release GIL and acquire the lock
+ bits_remaining = 0
+ with x.lock, nogil:
+ for i in range(n):
+ if bits_remaining < width:
+ buff = rng.next_uint64(rng.state)
+ random_values[i] = buff & mask
+ buff >>= width
+
+ randoms = np.asarray(random_values)
+ return randoms
+
--- /dev/null
+#!/usr/bin/env python3
+"""
+Build the Cython demonstrations of low-level access to NumPy random
+
+Usage: python setup.py build_ext -i
+"""
+
+import numpy as np
+from distutils.core import setup
+from Cython.Build import cythonize
+from setuptools.extension import Extension
+from os.path import join, abspath, dirname
+
+path = abspath(dirname(__file__))
+
+extending = Extension("extending",
+ sources=[join(path, 'extending.pyx')],
+ include_dirs=[
+ np.get_include(),
+ join(path, '..', '..')
+ ],
+ )
+distributions = Extension("extending_distributions",
+ sources=[join(path, 'extending_distributions.pyx')],
+ include_dirs=[np.get_include()])
+
+extensions = [extending, distributions]
+
+setup(
+ ext_modules=cythonize(extensions)
+)
--- /dev/null
+import numpy as np
+import numba as nb
+
+from numpy.random import PCG64
+from timeit import timeit
+
+bit_gen = PCG64()
+next_d = bit_gen.cffi.next_double
+state_addr = bit_gen.cffi.state_address
+
+def normals(n, state):
+ out = np.empty(n)
+ for i in range((n + 1) // 2):
+ x1 = 2.0 * next_d(state) - 1.0
+ x2 = 2.0 * next_d(state) - 1.0
+ r2 = x1 * x1 + x2 * x2
+ while r2 >= 1.0 or r2 == 0.0:
+ x1 = 2.0 * next_d(state) - 1.0
+ x2 = 2.0 * next_d(state) - 1.0
+ r2 = x1 * x1 + x2 * x2
+ f = np.sqrt(-2.0 * np.log(r2) / r2)
+ out[2 * i] = f * x1
+ if 2 * i + 1 < n:
+ out[2 * i + 1] = f * x2
+ return out
+
+# Compile using Numba
+normalsj = nb.jit(normals, nopython=True)
+# Must use state address not state with numba
+n = 10000
+
+def numbacall():
+ return normalsj(n, state_addr)
+
+rg = np.random.Generator(PCG64())
+
+def numpycall():
+ return rg.normal(size=n)
+
+# Check that the functions work
+r1 = numbacall()
+r2 = numpycall()
+assert r1.shape == (n,)
+assert r1.shape == r2.shape
+
+t1 = timeit(numbacall, number=1000)
+print('{:.2f} secs for {} PCG64 (Numba/PCG64) gaussian randoms'.format(t1, n))
+t2 = timeit(numpycall, number=1000)
+print('{:.2f} secs for {} PCG64 (NumPy/PCG64) gaussian randoms'.format(t2, n))
+
+# example 2
+
+next_u32 = bit_gen.ctypes.next_uint32
+ctypes_state = bit_gen.ctypes.state
+
+@nb.jit(nopython=True)
+def bounded_uint(lb, ub, state):
+ mask = delta = ub - lb
+ mask |= mask >> 1
+ mask |= mask >> 2
+ mask |= mask >> 4
+ mask |= mask >> 8
+ mask |= mask >> 16
+
+ val = next_u32(state) & mask
+ while val > delta:
+ val = next_u32(state) & mask
+
+ return lb + val
+
+
+print(bounded_uint(323, 2394691, ctypes_state.value))
+
+
+@nb.jit(nopython=True)
+def bounded_uints(lb, ub, n, state):
+ out = np.empty(n, dtype=np.uint32)
+ for i in range(n):
+ out[i] = bounded_uint(lb, ub, state)
+
+
+bounded_uints(323, 2394691, 10000000, ctypes_state.value)
+
+
--- /dev/null
+r"""
+Building the required library in this example requires a source distribution
+of NumPy or clone of the NumPy git repository since distributions.c is not
+included in binary distributions.
+
+On *nix, execute in numpy/random/src/distributions
+
+export ${PYTHON_VERSION}=3.8 # Python version
+export PYTHON_INCLUDE=#path to Python's include folder, usually \
+ ${PYTHON_HOME}/include/python${PYTHON_VERSION}m
+export NUMPY_INCLUDE=#path to numpy's include folder, usually \
+ ${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include
+gcc -shared -o libdistributions.so -fPIC distributions.c \
+ -I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE}
+mv libdistributions.so ../../_examples/numba/
+
+On Windows
+
+rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example
+set PYTHON_HOME=c:\Anaconda
+set PYTHON_VERSION=38
+cl.exe /LD .\distributions.c -DDLL_EXPORT \
+ -I%PYTHON_HOME%\lib\site-packages\numpy\core\include \
+ -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib
+move distributions.dll ../../_examples/numba/
+"""
+import os
+
+import numba as nb
+import numpy as np
+from cffi import FFI
+
+from numpy.random import PCG64
+
+ffi = FFI()
+if os.path.exists('./distributions.dll'):
+ lib = ffi.dlopen('./distributions.dll')
+elif os.path.exists('./libdistributions.so'):
+ lib = ffi.dlopen('./libdistributions.so')
+else:
+ raise RuntimeError('Required DLL/so file was not found.')
+
+ffi.cdef("""
+double random_standard_normal(void *bitgen_state);
+""")
+x = PCG64()
+xffi = x.cffi
+bit_generator = xffi.bit_generator
+
+random_standard_normal = lib.random_standard_normal
+
+
+def normals(n, bit_generator):
+ out = np.empty(n)
+ for i in range(n):
+ out[i] = random_standard_normal(bit_generator)
+ return out
+
+
+normalsj = nb.jit(normals, nopython=True)
+
+# Numba requires a memory address for void *
+# Can also get address from x.ctypes.bit_generator.value
+bit_generator_address = int(ffi.cast('uintptr_t', bit_generator))
+
+norm = normalsj(1000, bit_generator_address)
+print(norm[:12])
--- /dev/null
+#!python
+#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
+import operator
+import warnings
+
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+from cpython cimport (Py_INCREF, PyFloat_AsDouble)
+
+cimport cython
+import numpy as np
+cimport numpy as np
+from numpy.core.multiarray import normalize_axis_index
+
+from libc cimport string
+from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
+ int32_t, int64_t, INT64_MAX, SIZE_MAX)
+from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64,
+ _rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16,
+ _rand_uint8, _gen_mask)
+from ._bounded_integers import _integers_types
+from ._pcg64 import PCG64
+from numpy.random cimport bitgen_t
+from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
+ CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1,
+ CONS_GT_1, CONS_POSITIVE_NOT_NAN, CONS_POISSON,
+ double_fill, cont, kahan_sum, cont_broadcast_3, float_fill, cont_f,
+ check_array_constraint, check_constraint, disc, discrete_broadcast_iii,
+ )
+
+
+cdef extern from "numpy/random/distributions.h":
+
+ struct s_binomial_t:
+ int has_binomial
+ double psave
+ int64_t nsave
+ double r
+ double q
+ double fm
+ int64_t m
+ double p1
+ double xm
+ double xl
+ double xr
+ double c
+ double laml
+ double lamr
+ double p2
+ double p3
+ double p4
+
+ ctypedef s_binomial_t binomial_t
+
+ double random_standard_uniform(bitgen_t *bitgen_state) nogil
+ void random_standard_uniform_fill(bitgen_t* bitgen_state, np.npy_intp cnt, double *out) nogil
+ double random_standard_exponential(bitgen_t *bitgen_state) nogil
+ double random_standard_exponential_f(bitgen_t *bitgen_state) nogil
+ void random_standard_exponential_fill(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil
+ void random_standard_exponential_fill_f(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil
+ void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil
+ void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil
+ double random_standard_normal(bitgen_t* bitgen_state) nogil
+ void random_standard_normal_fill(bitgen_t *bitgen_state, np.npy_intp count, double *out) nogil
+ void random_standard_normal_fill_f(bitgen_t *bitgen_state, np.npy_intp count, float *out) nogil
+ double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil
+
+ float random_standard_uniform_f(bitgen_t *bitgen_state) nogil
+ void random_standard_uniform_fill_f(bitgen_t* bitgen_state, np.npy_intp cnt, float *out) nogil
+ float random_standard_normal_f(bitgen_t* bitgen_state) nogil
+ float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
+
+ int64_t random_positive_int64(bitgen_t *bitgen_state) nogil
+ int32_t random_positive_int32(bitgen_t *bitgen_state) nogil
+ int64_t random_positive_int(bitgen_t *bitgen_state) nogil
+ uint64_t random_uint(bitgen_t *bitgen_state) nogil
+
+ double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil
+
+ double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil
+ float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil
+
+ double random_exponential(bitgen_t *bitgen_state, double scale) nogil
+ double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil
+ double random_beta(bitgen_t *bitgen_state, double a, double b) nogil
+ double random_chisquare(bitgen_t *bitgen_state, double df) nogil
+ double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil
+ double random_standard_cauchy(bitgen_t *bitgen_state) nogil
+ double random_pareto(bitgen_t *bitgen_state, double a) nogil
+ double random_weibull(bitgen_t *bitgen_state, double a) nogil
+ double random_power(bitgen_t *bitgen_state, double a) nogil
+ double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil
+ double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil
+ double random_standard_t(bitgen_t *bitgen_state, double df) nogil
+ double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
+ double nonc) nogil
+ double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
+ double dfden, double nonc) nogil
+ double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil
+ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil
+ double random_triangular(bitgen_t *bitgen_state, double left, double mode,
+ double right) nogil
+
+ int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil
+ int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil
+ int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil
+ int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil
+ int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad,
+ int64_t sample) nogil
+
+ uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil
+
+ # Generate random uint64 numbers in closed interval [off, off + rng].
+ uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
+ uint64_t off, uint64_t rng,
+ uint64_t mask, bint use_masked) nogil
+
+ void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix,
+ double *pix, np.npy_intp d, binomial_t *binomial) nogil
+
+ int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates) nogil
+ void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates) nogil
+
+np.import_array()
+
+
+cdef int64_t _safe_sum_nonneg_int64(size_t num_colors, int64_t *colors):
+ """
+ Sum the values in the array `colors`.
+
+ Return -1 if an overflow occurs.
+ The values in *colors are assumed to be nonnegative.
+ """
+ cdef size_t i
+ cdef int64_t sum
+
+ sum = 0
+ for i in range(num_colors):
+ if colors[i] > INT64_MAX - sum:
+ return -1
+ sum += colors[i]
+ return sum
+
+
+cdef bint _check_bit_generator(object bitgen):
+ """Check if an object satisfies the BitGenerator interface.
+ """
+ if not hasattr(bitgen, "capsule"):
+ return False
+ cdef const char *name = "BitGenerator"
+ return PyCapsule_IsValid(bitgen.capsule, name)
+
+
+cdef class Generator:
+ """
+ Generator(bit_generator)
+
+ Container for the BitGenerators.
+
+ ``Generator`` exposes a number of methods for generating random
+ numbers drawn from a variety of probability distributions. In addition to
+ the distribution-specific arguments, each method takes a keyword argument
+ `size` that defaults to ``None``. If `size` is ``None``, then a single
+ value is generated and returned. If `size` is an integer, then a 1-D
+ array filled with generated values is returned. If `size` is a tuple,
+ then an array with that shape is filled and returned.
+
+ The function :func:`numpy.random.default_rng` will instantiate
+ a `Generator` with numpy's default `BitGenerator`.
+
+ **No Compatibility Guarantee**
+
+ ``Generator`` does not provide a version compatibility guarantee. In
+ particular, as better algorithms evolve the bit stream may change.
+
+ Parameters
+ ----------
+ bit_generator : BitGenerator
+ BitGenerator to use as the core generator.
+
+ Notes
+ -----
+ The Python stdlib module `random` contains pseudo-random number generator
+ with a number of methods that are similar to the ones available in
+ ``Generator``. It uses Mersenne Twister, and this bit generator can
+ be accessed using ``MT19937``. ``Generator``, besides being
+ NumPy-aware, has the advantage that it provides a much larger number
+ of probability distributions to choose from.
+
+ Examples
+ --------
+ >>> from numpy.random import Generator, PCG64
+ >>> rg = Generator(PCG64())
+ >>> rg.standard_normal()
+ -0.203 # random
+
+ See Also
+ --------
+ default_rng : Recommended constructor for `Generator`.
+ """
+ cdef public object _bit_generator
+ cdef bitgen_t _bitgen
+ cdef binomial_t _binomial
+ cdef object lock
+ _poisson_lam_max = POISSON_LAM_MAX
+
+ def __init__(self, bit_generator):
+ self._bit_generator = bit_generator
+
+ capsule = bit_generator.capsule
+ cdef const char *name = "BitGenerator"
+ if not PyCapsule_IsValid(capsule, name):
+ raise ValueError("Invalid bit generator'. The bit generator must "
+ "be instantiated.")
+ self._bitgen = (<bitgen_t *> PyCapsule_GetPointer(capsule, name))[0]
+ self.lock = bit_generator.lock
+
+ def __repr__(self):
+ return self.__str__() + ' at 0x{:X}'.format(id(self))
+
+ def __str__(self):
+ _str = self.__class__.__name__
+ _str += '(' + self.bit_generator.__class__.__name__ + ')'
+ return _str
+
+ # Pickling support:
+ def __getstate__(self):
+ return self.bit_generator.state
+
+ def __setstate__(self, state):
+ self.bit_generator.state = state
+
+ def __reduce__(self):
+ from ._pickle import __generator_ctor
+ return __generator_ctor, (self.bit_generator.state['bit_generator'],), self.bit_generator.state
+
+ @property
+ def bit_generator(self):
+ """
+ Gets the bit generator instance used by the generator
+
+ Returns
+ -------
+ bit_generator : BitGenerator
+ The bit generator instance used by the generator
+ """
+ return self._bit_generator
+
+ def random(self, size=None, dtype=np.float64, out=None):
+ """
+ random(size=None, dtype='d', out=None)
+
+ Return random floats in the half-open interval [0.0, 1.0).
+
+ Results are from the "continuous uniform" distribution over the
+ stated interval. To sample :math:`Unif[a, b), b > a` multiply
+ the output of `random` by `(b-a)` and add `a`::
+
+ (b - a) * random() + a
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ dtype : {str, dtype}, optional
+ Desired dtype of the result, either 'd' (or 'float64') or 'f'
+ (or 'float32'). All dtypes are determined by their name. The
+ default value is 'd'.
+ out : ndarray, optional
+ Alternative output array in which to place the result. If size is not None,
+ it must have the same shape as the provided size and must match the type of
+ the output values.
+
+ Returns
+ -------
+ out : float or ndarray of floats
+ Array of random floats of shape `size` (unless ``size=None``, in which
+ case a single float is returned).
+
+ Examples
+ --------
+ >>> rng = np.random.default_rng()
+ >>> rng.random()
+ 0.47108547995356098 # random
+ >>> type(rng.random())
+ <class 'float'>
+ >>> rng.random((5,))
+ array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428]) # random
+
+ Three-by-two array of random numbers from [-5, 0):
+
+ >>> 5 * rng.random((3, 2)) - 5
+ array([[-3.99149989, -0.52338984], # random
+ [-2.99091858, -0.79479508],
+ [-1.23204345, -1.75224494]])
+
+ """
+ cdef double temp
+ key = np.dtype(dtype).name
+ if key == 'float64':
+ return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, out)
+ elif key == 'float32':
+ return float_fill(&random_standard_uniform_fill_f, &self._bitgen, size, self.lock, out)
+ else:
+ raise TypeError('Unsupported dtype "%s" for random' % key)
+
+ def beta(self, a, b, size=None):
+ """
+ beta(a, b, size=None)
+
+ Draw samples from a Beta distribution.
+
+ The Beta distribution is a special case of the Dirichlet distribution,
+ and is related to the Gamma distribution. It has the probability
+ distribution function
+
+ .. math:: f(x; a,b) = \\frac{1}{B(\\alpha, \\beta)} x^{\\alpha - 1}
+ (1 - x)^{\\beta - 1},
+
+ where the normalization, B, is the beta function,
+
+ .. math:: B(\\alpha, \\beta) = \\int_0^1 t^{\\alpha - 1}
+ (1 - t)^{\\beta - 1} dt.
+
+ It is often seen in Bayesian inference and order statistics.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Alpha, positive (>0).
+ b : float or array_like of floats
+ Beta, positive (>0).
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` and ``b`` are both scalars.
+ Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized beta distribution.
+
+ """
+ return cont(&random_beta, &self._bitgen, size, self.lock, 2,
+ a, 'a', CONS_POSITIVE,
+ b, 'b', CONS_POSITIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def exponential(self, scale=1.0, size=None):
+ """
+ exponential(scale=1.0, size=None)
+
+ Draw samples from an exponential distribution.
+
+ Its probability density function is
+
+ .. math:: f(x; \\frac{1}{\\beta}) = \\frac{1}{\\beta} \\exp(-\\frac{x}{\\beta}),
+
+ for ``x > 0`` and 0 elsewhere. :math:`\\beta` is the scale parameter,
+ which is the inverse of the rate parameter :math:`\\lambda = 1/\\beta`.
+ The rate parameter is an alternative, widely used parameterization
+ of the exponential distribution [3]_.
+
+ The exponential distribution is a continuous analogue of the
+ geometric distribution. It describes many common situations, such as
+ the size of raindrops measured over many rainstorms [1]_, or the time
+ between page requests to Wikipedia [2]_.
+
+ Parameters
+ ----------
+ scale : float or array_like of floats
+ The scale parameter, :math:`\\beta = 1/\\lambda`. Must be
+ non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``scale`` is a scalar. Otherwise,
+ ``np.array(scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized exponential distribution.
+
+ References
+ ----------
+ .. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
+ Random Signal Principles", 4th ed, 2001, p. 57.
+ .. [2] Wikipedia, "Poisson process",
+ https://en.wikipedia.org/wiki/Poisson_process
+ .. [3] Wikipedia, "Exponential distribution",
+ https://en.wikipedia.org/wiki/Exponential_distribution
+
+ """
+ return cont(&random_exponential, &self._bitgen, size, self.lock, 1,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ def standard_exponential(self, size=None, dtype=np.float64, method=u'zig', out=None):
+ """
+ standard_exponential(size=None, dtype='d', method='zig', out=None)
+
+ Draw samples from the standard exponential distribution.
+
+ `standard_exponential` is identical to the exponential distribution
+ with a scale parameter of 1.
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ dtype : dtype, optional
+ Desired dtype of the result, either 'd' (or 'float64') or 'f'
+ (or 'float32'). All dtypes are determined by their name. The
+ default value is 'd'.
+ method : str, optional
+ Either 'inv' or 'zig'. 'inv' uses the default inverse CDF method.
+ 'zig' uses the much faster Ziggurat method of Marsaglia and Tsang.
+ out : ndarray, optional
+ Alternative output array in which to place the result. If size is not None,
+ it must have the same shape as the provided size and must match the type of
+ the output values.
+
+ Returns
+ -------
+ out : float or ndarray
+ Drawn samples.
+
+ Examples
+ --------
+ Output a 3x8000 array:
+
+ >>> n = np.random.default_rng().standard_exponential((3, 8000))
+
+ """
+ key = np.dtype(dtype).name
+ if key == 'float64':
+ if method == u'zig':
+ return double_fill(&random_standard_exponential_fill, &self._bitgen, size, self.lock, out)
+ else:
+ return double_fill(&random_standard_exponential_inv_fill, &self._bitgen, size, self.lock, out)
+ elif key == 'float32':
+ if method == u'zig':
+ return float_fill(&random_standard_exponential_fill_f, &self._bitgen, size, self.lock, out)
+ else:
+ return float_fill(&random_standard_exponential_inv_fill_f, &self._bitgen, size, self.lock, out)
+ else:
+ raise TypeError('Unsupported dtype "%s" for standard_exponential'
+ % key)
+
+ def integers(self, low, high=None, size=None, dtype=np.int64, endpoint=False):
+ """
+ integers(low, high=None, size=None, dtype='int64', endpoint=False)
+
+ Return random integers from `low` (inclusive) to `high` (exclusive), or
+ if endpoint=True, `low` (inclusive) to `high` (inclusive). Replaces
+ `RandomState.randint` (with endpoint=False) and
+ `RandomState.random_integers` (with endpoint=True)
+
+ Return random integers from the "discrete uniform" distribution of
+ the specified dtype. If `high` is None (the default), then results are
+ from 0 to `low`.
+
+ Parameters
+ ----------
+ low : int or array-like of ints
+ Lowest (signed) integers to be drawn from the distribution (unless
+ ``high=None``, in which case this parameter is 0 and this value is
+ used for `high`).
+ high : int or array-like of ints, optional
+ If provided, one above the largest (signed) integer to be drawn
+ from the distribution (see above for behavior if ``high=None``).
+ If array-like, must contain integer values
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ dtype : {str, dtype}, optional
+ Desired dtype of the result. All dtypes are determined by their
+ name, i.e., 'int64', 'int', etc, so byteorder is not available
+ and a specific precision may have different C types depending
+ on the platform. The default value is `np.int_`.
+ endpoint : bool, optional
+ If true, sample from the interval [low, high] instead of the
+ default [low, high)
+ Defaults to False
+
+ Returns
+ -------
+ out : int or ndarray of ints
+ `size`-shaped array of random integers from the appropriate
+ distribution, or a single such random int if `size` not provided.
+
+ Notes
+ -----
+ When using broadcasting with uint64 dtypes, the maximum value (2**64)
+ cannot be represented as a standard integer type. The high array (or
+ low if high is None) must have object dtype, e.g., array([2**64]).
+
+ Examples
+ --------
+ >>> rng = np.random.default_rng()
+ >>> rng.integers(2, size=10)
+ array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0]) # random
+ >>> rng.integers(1, size=10)
+ array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+
+ Generate a 2 x 4 array of ints between 0 and 4, inclusive:
+
+ >>> rng.integers(5, size=(2, 4))
+ array([[4, 0, 2, 1],
+ [3, 2, 2, 0]]) # random
+
+ Generate a 1 x 3 array with 3 different upper bounds
+
+ >>> rng.integers(1, [3, 5, 10])
+ array([2, 2, 9]) # random
+
+ Generate a 1 by 3 array with 3 different lower bounds
+
+ >>> rng.integers([1, 5, 7], 10)
+ array([9, 8, 7]) # random
+
+ Generate a 2 by 4 array using broadcasting with dtype of uint8
+
+ >>> rng.integers([1, 3, 5, 7], [[10], [20]], dtype=np.uint8)
+ array([[ 8, 6, 9, 7],
+ [ 1, 16, 9, 12]], dtype=uint8) # random
+
+ References
+ ----------
+ .. [1] Daniel Lemire., "Fast Random Integer Generation in an Interval",
+ ACM Transactions on Modeling and Computer Simulation 29 (1), 2019,
+ http://arxiv.org/abs/1805.10941.
+
+ """
+ if high is None:
+ high = low
+ low = 0
+
+ dt = np.dtype(dtype)
+ key = dt.name
+ if key not in _integers_types:
+ raise TypeError('Unsupported dtype "%s" for integers' % key)
+ if not dt.isnative:
+ raise ValueError('Providing a dtype with a non-native byteorder '
+ 'is not supported. If you require '
+ 'platform-independent byteorder, call byteswap '
+ 'when required.')
+
+ # Implementation detail: the old API used a masked method to generate
+ # bounded uniform integers. Lemire's method is preferable since it is
+ # faster. randomgen allows a choice, we will always use the faster one.
+ cdef bint _masked = False
+
+ if key == 'int32':
+ ret = _rand_int32(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'int64':
+ ret = _rand_int64(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'int16':
+ ret = _rand_int16(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'int8':
+ ret = _rand_int8(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'uint64':
+ ret = _rand_uint64(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'uint32':
+ ret = _rand_uint32(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'uint16':
+ ret = _rand_uint16(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'uint8':
+ ret = _rand_uint8(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'bool':
+ ret = _rand_bool(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+
+ if size is None and dtype in (bool, int, np.compat.long):
+ if np.array(ret).shape == ():
+ return dtype(ret)
+ return ret
+
+ def bytes(self, np.npy_intp length):
+ """
+ bytes(length)
+
+ Return random bytes.
+
+ Parameters
+ ----------
+ length : int
+ Number of random bytes.
+
+ Returns
+ -------
+ out : str
+ String of length `length`.
+
+ Examples
+ --------
+ >>> np.random.default_rng().bytes(10)
+ ' eh\\x85\\x022SZ\\xbf\\xa4' #random
+
+ """
+ cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
+ # Interpret the uint32s as little-endian to convert them to bytes
+ # consistently.
+ return self.integers(0, 4294967296, size=n_uint32,
+ dtype=np.uint32).astype('<u4').tobytes()[:length]
+
+ @cython.wraparound(True)
+ def choice(self, a, size=None, replace=True, p=None, axis=0, bint shuffle=True):
+ """
+ choice(a, size=None, replace=True, p=None, axis=0):
+
+ Generates a random sample from a given 1-D array
+
+ Parameters
+ ----------
+ a : 1-D array-like or int
+ If an ndarray, a random sample is generated from its elements.
+ If an int, the random sample is generated as if a were np.arange(a)
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn from the 1-d `a`. If `a` has more
+ than one dimension, the `size` shape will be inserted into the
+ `axis` dimension, so the output ``ndim`` will be ``a.ndim - 1 +
+ len(size)``. Default is None, in which case a single value is
+ returned.
+ replace : boolean, optional
+ Whether the sample is with or without replacement
+ p : 1-D array-like, optional
+ The probabilities associated with each entry in a.
+ If not given the sample assumes a uniform distribution over all
+ entries in a.
+ axis : int, optional
+ The axis along which the selection is performed. The default, 0,
+ selects by row.
+ shuffle : boolean, optional
+ Whether the sample is shuffled when sampling without replacement.
+ Default is True, False provides a speedup.
+
+ Returns
+ -------
+ samples : single item or ndarray
+ The generated random samples
+
+ Raises
+ ------
+ ValueError
+ If a is an int and less than zero, if p is not 1-dimensional, if
+ a is array-like with a size 0, if p is not a vector of
+ probabilities, if a and p have different lengths, or if
+ replace=False and the sample size is greater than the population
+ size.
+
+ See Also
+ --------
+ integers, shuffle, permutation
+
+ Examples
+ --------
+ Generate a uniform random sample from np.arange(5) of size 3:
+
+ >>> rng = np.random.default_rng()
+ >>> rng.choice(5, 3)
+ array([0, 3, 4]) # random
+ >>> #This is equivalent to rng.integers(0,5,3)
+
+ Generate a non-uniform random sample from np.arange(5) of size 3:
+
+ >>> rng.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])
+ array([3, 3, 0]) # random
+
+ Generate a uniform random sample from np.arange(5) of size 3 without
+ replacement:
+
+ >>> rng.choice(5, 3, replace=False)
+ array([3,1,0]) # random
+ >>> #This is equivalent to rng.permutation(np.arange(5))[:3]
+
+ Generate a non-uniform random sample from np.arange(5) of size
+ 3 without replacement:
+
+ >>> rng.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
+ array([2, 3, 0]) # random
+
+ Any of the above can be repeated with an arbitrary array-like
+ instead of just integers. For instance:
+
+ >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
+ >>> rng.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
+ array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'], # random
+ dtype='<U11')
+
+ """
+
+ cdef int64_t val, t, loc, size_i, pop_size_i
+ cdef int64_t *idx_data
+ cdef np.npy_intp j
+ cdef uint64_t set_size, mask
+ cdef uint64_t[::1] hash_set
+ # Format and Verify input
+ a = np.array(a, copy=False)
+ if a.ndim == 0:
+ try:
+ # __index__ must return an integer by python rules.
+ pop_size = operator.index(a.item())
+ except TypeError:
+ raise ValueError("a must be 1-dimensional or an integer")
+ if pop_size <= 0 and np.prod(size) != 0:
+ raise ValueError("a must be greater than 0 unless no samples are taken")
+ else:
+ pop_size = a.shape[axis]
+ if pop_size == 0 and np.prod(size) != 0:
+ raise ValueError("'a' cannot be empty unless no samples are taken")
+
+ if p is not None:
+ d = len(p)
+
+ atol = np.sqrt(np.finfo(np.float64).eps)
+ if isinstance(p, np.ndarray):
+ if np.issubdtype(p.dtype, np.floating):
+ atol = max(atol, np.sqrt(np.finfo(p.dtype).eps))
+
+ p = <np.ndarray>np.PyArray_FROM_OTF(
+ p, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ pix = <double*>np.PyArray_DATA(p)
+
+ if p.ndim != 1:
+ raise ValueError("'p' must be 1-dimensional")
+ if p.size != pop_size:
+ raise ValueError("'a' and 'p' must have same size")
+ p_sum = kahan_sum(pix, d)
+ if np.isnan(p_sum):
+ raise ValueError("probabilities contain NaN")
+ if np.logical_or.reduce(p < 0):
+ raise ValueError("probabilities are not non-negative")
+ if abs(p_sum - 1.) > atol:
+ raise ValueError("probabilities do not sum to 1")
+
+ shape = size
+ if shape is not None:
+ size = np.prod(shape, dtype=np.intp)
+ else:
+ size = 1
+
+ # Actual sampling
+ if replace:
+ if p is not None:
+ cdf = p.cumsum()
+ cdf /= cdf[-1]
+ uniform_samples = self.random(shape)
+ idx = cdf.searchsorted(uniform_samples, side='right')
+ idx = np.array(idx, copy=False, dtype=np.int64) # searchsorted returns a scalar
+ else:
+ idx = self.integers(0, pop_size, size=shape, dtype=np.int64)
+ else:
+ if size > pop_size:
+ raise ValueError("Cannot take a larger sample than "
+ "population when 'replace=False'")
+ elif size < 0:
+ raise ValueError("negative dimensions are not allowed")
+
+ if p is not None:
+ if np.count_nonzero(p > 0) < size:
+ raise ValueError("Fewer non-zero entries in p than size")
+ n_uniq = 0
+ p = p.copy()
+ found = np.zeros(shape, dtype=np.int64)
+ flat_found = found.ravel()
+ while n_uniq < size:
+ x = self.random((size - n_uniq,))
+ if n_uniq > 0:
+ p[flat_found[0:n_uniq]] = 0
+ cdf = np.cumsum(p)
+ cdf /= cdf[-1]
+ new = cdf.searchsorted(x, side='right')
+ _, unique_indices = np.unique(new, return_index=True)
+ unique_indices.sort()
+ new = new.take(unique_indices)
+ flat_found[n_uniq:n_uniq + new.size] = new
+ n_uniq += new.size
+ idx = found
+ else:
+ size_i = size
+ pop_size_i = pop_size
+ # This is a heuristic tuning. should be improvable
+ if shuffle:
+ cutoff = 50
+ else:
+ cutoff = 20
+ if pop_size_i > 10000 and (size_i > (pop_size_i // cutoff)):
+ # Tail shuffle size elements
+ idx = np.PyArray_Arange(0, pop_size_i, 1, np.NPY_INT64)
+ idx_data = <int64_t*>(<np.ndarray>idx).data
+ with self.lock, nogil:
+ self._shuffle_int(pop_size_i, max(pop_size_i - size_i, 1),
+ idx_data)
+ # Copy to allow potentially large array backing idx to be gc
+ idx = idx[(pop_size - size):].copy()
+ else:
+ # Floyd's algorithm
+ idx = np.empty(size, dtype=np.int64)
+ idx_data = <int64_t*>np.PyArray_DATA(<np.ndarray>idx)
+ # smallest power of 2 larger than 1.2 * size
+ set_size = <uint64_t>(1.2 * size_i)
+ mask = _gen_mask(set_size)
+ set_size = 1 + mask
+ hash_set = np.full(set_size, <uint64_t>-1, np.uint64)
+ with self.lock, cython.wraparound(False), nogil:
+ for j in range(pop_size_i - size_i, pop_size_i):
+ val = random_bounded_uint64(&self._bitgen, 0, j, 0, 0)
+ loc = val & mask
+ while hash_set[loc] != <uint64_t>-1 and hash_set[loc] != <uint64_t>val:
+ loc = (loc + 1) & mask
+ if hash_set[loc] == <uint64_t>-1: # then val not in hash_set
+ hash_set[loc] = val
+ idx_data[j - pop_size_i + size_i] = val
+ else: # we need to insert j instead
+ loc = j & mask
+ while hash_set[loc] != <uint64_t>-1:
+ loc = (loc + 1) & mask
+ hash_set[loc] = j
+ idx_data[j - pop_size_i + size_i] = j
+ if shuffle:
+ self._shuffle_int(size_i, 1, idx_data)
+ if shape is not None:
+ idx.shape = shape
+
+ if shape is None and isinstance(idx, np.ndarray):
+ # In most cases a scalar will have been made an array
+ idx = idx.item(0)
+
+ # Use samples as indices for a if a is array-like
+ if a.ndim == 0:
+ return idx
+
+ if shape is not None and idx.ndim == 0:
+ # If size == () then the user requested a 0-d array as opposed to
+ # a scalar object when size is None. However a[idx] is always a
+ # scalar and not an array. So this makes sure the result is an
+ # array, taking into account that np.array(item) may not work
+ # for object arrays.
+ res = np.empty((), dtype=a.dtype)
+ res[()] = a[idx]
+ return res
+
+ # asarray downcasts on 32-bit platforms, always safe
+ # no-op on 64-bit platforms
+ return a.take(np.asarray(idx, dtype=np.intp), axis=axis)
+
+ def uniform(self, low=0.0, high=1.0, size=None):
+ """
+ uniform(low=0.0, high=1.0, size=None)
+
+ Draw samples from a uniform distribution.
+
+ Samples are uniformly distributed over the half-open interval
+ ``[low, high)`` (includes low, but excludes high). In other words,
+ any value within the given interval is equally likely to be drawn
+ by `uniform`.
+
+ Parameters
+ ----------
+ low : float or array_like of floats, optional
+ Lower boundary of the output interval. All values generated will be
+ greater than or equal to low. The default value is 0.
+ high : float or array_like of floats
+ Upper boundary of the output interval. All values generated will be
+ less than high. The default value is 1.0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``low`` and ``high`` are both scalars.
+ Otherwise, ``np.broadcast(low, high).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized uniform distribution.
+
+ See Also
+ --------
+ integers : Discrete uniform distribution, yielding integers.
+ random : Floats uniformly distributed over ``[0, 1)``.
+
+ Notes
+ -----
+ The probability density function of the uniform distribution is
+
+ .. math:: p(x) = \\frac{1}{b - a}
+
+ anywhere within the interval ``[a, b)``, and zero elsewhere.
+
+ When ``high`` == ``low``, values of ``low`` will be returned.
+ If ``high`` < ``low``, the results are officially undefined
+ and may eventually raise an error, i.e. do not rely on this
+ function to behave when passed arguments satisfying that
+ inequality condition.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> s = np.random.default_rng().uniform(-1,0,1000)
+
+ All values are within the given interval:
+
+ >>> np.all(s >= -1)
+ True
+ >>> np.all(s < 0)
+ True
+
+ Display the histogram of the samples, along with the
+ probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 15, density=True)
+ >>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
+ >>> plt.show()
+
+ """
+ cdef bint is_scalar = True
+ cdef np.ndarray alow, ahigh, arange
+ cdef double _low, _high, range
+ cdef object temp
+
+ alow = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ ahigh = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_DOUBLE, np.NPY_ALIGNED)
+
+ if np.PyArray_NDIM(alow) == np.PyArray_NDIM(ahigh) == 0:
+ _low = PyFloat_AsDouble(low)
+ _high = PyFloat_AsDouble(high)
+ range = _high - _low
+ if not np.isfinite(range):
+ raise OverflowError('Range exceeds valid bounds')
+
+ return cont(&random_uniform, &self._bitgen, size, self.lock, 2,
+ _low, '', CONS_NONE,
+ range, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ temp = np.subtract(ahigh, alow)
+ # needed to get around Pyrex's automatic reference-counting
+ # rules because EnsureArray steals a reference
+ Py_INCREF(temp)
+
+ arange = <np.ndarray>np.PyArray_EnsureArray(temp)
+ if not np.all(np.isfinite(arange)):
+ raise OverflowError('Range exceeds valid bounds')
+ return cont(&random_uniform, &self._bitgen, size, self.lock, 2,
+ alow, '', CONS_NONE,
+ arange, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ # Complicated, continuous distributions:
+ def standard_normal(self, size=None, dtype=np.float64, out=None):
+ """
+ standard_normal(size=None, dtype='d', out=None)
+
+ Draw samples from a standard Normal distribution (mean=0, stdev=1).
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ dtype : {str, dtype}, optional
+ Desired dtype of the result, either 'd' (or 'float64') or 'f'
+ (or 'float32'). All dtypes are determined by their name. The
+ default value is 'd'.
+ out : ndarray, optional
+ Alternative output array in which to place the result. If size is not None,
+ it must have the same shape as the provided size and must match the type of
+ the output values.
+
+ Returns
+ -------
+ out : float or ndarray
+ A floating-point array of shape ``size`` of drawn samples, or a
+ single sample if ``size`` was not specified.
+
+ See Also
+ --------
+ normal :
+ Equivalent function with additional ``loc`` and ``scale`` arguments
+ for setting the mean and standard deviation.
+
+ Notes
+ -----
+ For random samples from :math:`N(\\mu, \\sigma^2)`, use one of::
+
+ mu + sigma * gen.standard_normal(size=...)
+ gen.normal(mu, sigma, size=...)
+
+ Examples
+ --------
+ >>> rng = np.random.default_rng()
+ >>> rng.standard_normal()
+ 2.1923875335537315 #random
+
+ >>> s = rng.standard_normal(8000)
+ >>> s
+ array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, # random
+ -0.38672696, -0.4685006 ]) # random
+ >>> s.shape
+ (8000,)
+ >>> s = rng.standard_normal(size=(3, 4, 2))
+ >>> s.shape
+ (3, 4, 2)
+
+ Two-by-four array of samples from :math:`N(3, 6.25)`:
+
+ >>> 3 + 2.5 * rng.standard_normal(size=(2, 4))
+ array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
+ [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
+
+ """
+ key = np.dtype(dtype).name
+ if key == 'float64':
+ return double_fill(&random_standard_normal_fill, &self._bitgen, size, self.lock, out)
+ elif key == 'float32':
+ return float_fill(&random_standard_normal_fill_f, &self._bitgen, size, self.lock, out)
+
+ else:
+ raise TypeError('Unsupported dtype "%s" for standard_normal' % key)
+
+ def normal(self, loc=0.0, scale=1.0, size=None):
+ """
+ normal(loc=0.0, scale=1.0, size=None)
+
+ Draw random samples from a normal (Gaussian) distribution.
+
+ The probability density function of the normal distribution, first
+ derived by De Moivre and 200 years later by both Gauss and Laplace
+ independently [2]_, is often called the bell curve because of
+ its characteristic shape (see the example below).
+
+ The normal distributions occurs often in nature. For example, it
+ describes the commonly occurring distribution of samples influenced
+ by a large number of tiny, random disturbances, each with its own
+ unique distribution [2]_.
+
+ Parameters
+ ----------
+ loc : float or array_like of floats
+ Mean ("centre") of the distribution.
+ scale : float or array_like of floats
+ Standard deviation (spread or "width") of the distribution. Must be
+ non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``loc`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized normal distribution.
+
+ See Also
+ --------
+ scipy.stats.norm : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Gaussian distribution is
+
+ .. math:: p(x) = \\frac{1}{\\sqrt{ 2 \\pi \\sigma^2 }}
+ e^{ - \\frac{ (x - \\mu)^2 } {2 \\sigma^2} },
+
+ where :math:`\\mu` is the mean and :math:`\\sigma` the standard
+ deviation. The square of the standard deviation, :math:`\\sigma^2`,
+ is called the variance.
+
+ The function has its peak at the mean, and its "spread" increases with
+ the standard deviation (the function reaches 0.607 times its maximum at
+ :math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that
+ :meth:`normal` is more likely to return samples lying close to the
+ mean, rather than those far away.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Normal distribution",
+ https://en.wikipedia.org/wiki/Normal_distribution
+ .. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability,
+ Random Variables and Random Signal Principles", 4th ed., 2001,
+ pp. 51, 51, 125.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> mu, sigma = 0, 0.1 # mean and standard deviation
+ >>> s = np.random.default_rng().normal(mu, sigma, 1000)
+
+ Verify the mean and the variance:
+
+ >>> abs(mu - np.mean(s))
+ 0.0 # may vary
+
+ >>> abs(sigma - np.std(s, ddof=1))
+ 0.1 # may vary
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
+ >>> plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
+ ... np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
+ ... linewidth=2, color='r')
+ >>> plt.show()
+
+ Two-by-four array of samples from N(3, 6.25):
+
+ >>> np.random.default_rng().normal(3, 2.5, size=(2, 4))
+ array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
+ [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
+
+ """
+ return cont(&random_normal, &self._bitgen, size, self.lock, 2,
+ loc, '', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ def standard_gamma(self, shape, size=None, dtype=np.float64, out=None):
+ """
+ standard_gamma(shape, size=None, dtype='d', out=None)
+
+ Draw samples from a standard Gamma distribution.
+
+ Samples are drawn from a Gamma distribution with specified parameters,
+ shape (sometimes designated "k") and scale=1.
+
+ Parameters
+ ----------
+ shape : float or array_like of floats
+ Parameter, must be non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``shape`` is a scalar. Otherwise,
+ ``np.array(shape).size`` samples are drawn.
+ dtype : {str, dtype}, optional
+ Desired dtype of the result, either 'd' (or 'float64') or 'f'
+ (or 'float32'). All dtypes are determined by their name. The
+ default value is 'd'.
+ out : ndarray, optional
+ Alternative output array in which to place the result. If size is
+ not None, it must have the same shape as the provided size and
+ must match the type of the output values.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized standard gamma distribution.
+
+ See Also
+ --------
+ scipy.stats.gamma : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Gamma distribution is
+
+ .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},
+
+ where :math:`k` is the shape and :math:`\\theta` the scale,
+ and :math:`\\Gamma` is the Gamma function.
+
+ The Gamma distribution is often used to model the times to failure of
+ electronic components, and arises naturally in processes for which the
+ waiting times between Poisson distributed events are relevant.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
+ Wolfram Web Resource.
+ http://mathworld.wolfram.com/GammaDistribution.html
+ .. [2] Wikipedia, "Gamma distribution",
+ https://en.wikipedia.org/wiki/Gamma_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> shape, scale = 2., 1. # mean and width
+ >>> s = np.random.default_rng().standard_gamma(shape, 1000000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> import scipy.special as sps # doctest: +SKIP
+ >>> count, bins, ignored = plt.hist(s, 50, density=True)
+ >>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ # doctest: +SKIP
+ ... (sps.gamma(shape) * scale**shape))
+ >>> plt.plot(bins, y, linewidth=2, color='r') # doctest: +SKIP
+ >>> plt.show()
+
+ """
+ cdef void *func
+ key = np.dtype(dtype).name
+ if key == 'float64':
+ return cont(&random_standard_gamma, &self._bitgen, size, self.lock, 1,
+ shape, 'shape', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ out)
+ if key == 'float32':
+ return cont_f(&random_standard_gamma_f, &self._bitgen, size, self.lock,
+ shape, 'shape', CONS_NON_NEGATIVE,
+ out)
+ else:
+ raise TypeError('Unsupported dtype "%s" for standard_gamma' % key)
+
+ def gamma(self, shape, scale=1.0, size=None):
+ """
+ gamma(shape, scale=1.0, size=None)
+
+ Draw samples from a Gamma distribution.
+
+ Samples are drawn from a Gamma distribution with specified parameters,
+ `shape` (sometimes designated "k") and `scale` (sometimes designated
+ "theta"), where both parameters are > 0.
+
+ Parameters
+ ----------
+ shape : float or array_like of floats
+ The shape of the gamma distribution. Must be non-negative.
+ scale : float or array_like of floats, optional
+ The scale of the gamma distribution. Must be non-negative.
+ Default is equal to 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``shape`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(shape, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized gamma distribution.
+
+ See Also
+ --------
+ scipy.stats.gamma : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Gamma distribution is
+
+ .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},
+
+ where :math:`k` is the shape and :math:`\\theta` the scale,
+ and :math:`\\Gamma` is the Gamma function.
+
+ The Gamma distribution is often used to model the times to failure of
+ electronic components, and arises naturally in processes for which the
+ waiting times between Poisson distributed events are relevant.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
+ Wolfram Web Resource.
+ http://mathworld.wolfram.com/GammaDistribution.html
+ .. [2] Wikipedia, "Gamma distribution",
+ https://en.wikipedia.org/wiki/Gamma_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> shape, scale = 2., 2. # mean=4, std=2*sqrt(2)
+ >>> s = np.random.default_rng().gamma(shape, scale, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> import scipy.special as sps # doctest: +SKIP
+ >>> count, bins, ignored = plt.hist(s, 50, density=True)
+ >>> y = bins**(shape-1)*(np.exp(-bins/scale) / # doctest: +SKIP
+ ... (sps.gamma(shape)*scale**shape))
+ >>> plt.plot(bins, y, linewidth=2, color='r') # doctest: +SKIP
+ >>> plt.show()
+
+ """
+ return cont(&random_gamma, &self._bitgen, size, self.lock, 2,
+ shape, 'shape', CONS_NON_NEGATIVE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def f(self, dfnum, dfden, size=None):
+ """
+ f(dfnum, dfden, size=None)
+
+ Draw samples from an F distribution.
+
+ Samples are drawn from an F distribution with specified parameters,
+ `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
+ freedom in denominator), where both parameters must be greater than
+ zero.
+
+ The random variate of the F distribution (also known as the
+ Fisher distribution) is a continuous probability distribution
+ that arises in ANOVA tests, and is the ratio of two chi-square
+ variates.
+
+ Parameters
+ ----------
+ dfnum : float or array_like of floats
+ Degrees of freedom in numerator, must be > 0.
+ dfden : float or array_like of float
+ Degrees of freedom in denominator, must be > 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``dfnum`` and ``dfden`` are both scalars.
+ Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Fisher distribution.
+
+ See Also
+ --------
+ scipy.stats.f : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The F statistic is used to compare in-group variances to between-group
+ variances. Calculating the distribution depends on the sampling, and
+ so it is a function of the respective degrees of freedom in the
+ problem. The variable `dfnum` is the number of samples minus one, the
+ between-groups degrees of freedom, while `dfden` is the within-groups
+ degrees of freedom, the sum of the number of samples in each group
+ minus the number of groups.
+
+ References
+ ----------
+ .. [1] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
+ Fifth Edition, 2002.
+ .. [2] Wikipedia, "F-distribution",
+ https://en.wikipedia.org/wiki/F-distribution
+
+ Examples
+ --------
+ An example from Glantz[1], pp 47-40:
+
+ Two groups, children of diabetics (25 people) and children from people
+ without diabetes (25 controls). Fasting blood glucose was measured,
+ case group had a mean value of 86.1, controls had a mean value of
+ 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these
+ data consistent with the null hypothesis that the parents diabetic
+ status does not affect their children's blood glucose levels?
+ Calculating the F statistic from the data gives a value of 36.01.
+
+ Draw samples from the distribution:
+
+ >>> dfnum = 1. # between group degrees of freedom
+ >>> dfden = 48. # within groups degrees of freedom
+ >>> s = np.random.default_rng().f(dfnum, dfden, 1000)
+
+ The lower bound for the top 1% of the samples is :
+
+ >>> np.sort(s)[-10]
+ 7.61988120985 # random
+
+ So there is about a 1% chance that the F statistic will exceed 7.62,
+ the measured value is 36, so the null hypothesis is rejected at the 1%
+ level.
+
+ """
+ return cont(&random_f, &self._bitgen, size, self.lock, 2,
+ dfnum, 'dfnum', CONS_POSITIVE,
+ dfden, 'dfden', CONS_POSITIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def noncentral_f(self, dfnum, dfden, nonc, size=None):
+ """
+ noncentral_f(dfnum, dfden, nonc, size=None)
+
+ Draw samples from the noncentral F distribution.
+
+ Samples are drawn from an F distribution with specified parameters,
+ `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
+ freedom in denominator), where both parameters > 1.
+ `nonc` is the non-centrality parameter.
+
+ Parameters
+ ----------
+ dfnum : float or array_like of floats
+ Numerator degrees of freedom, must be > 0.
+
+ .. versionchanged:: 1.14.0
+ Earlier NumPy versions required dfnum > 1.
+ dfden : float or array_like of floats
+ Denominator degrees of freedom, must be > 0.
+ nonc : float or array_like of floats
+ Non-centrality parameter, the sum of the squares of the numerator
+ means, must be >= 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``dfnum``, ``dfden``, and ``nonc``
+ are all scalars. Otherwise, ``np.broadcast(dfnum, dfden, nonc).size``
+ samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized noncentral Fisher distribution.
+
+ Notes
+ -----
+ When calculating the power of an experiment (power = probability of
+ rejecting the null hypothesis when a specific alternative is true) the
+ non-central F statistic becomes important. When the null hypothesis is
+ true, the F statistic follows a central F distribution. When the null
+ hypothesis is not true, then it follows a non-central F statistic.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Noncentral F-Distribution."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/NoncentralF-Distribution.html
+ .. [2] Wikipedia, "Noncentral F-distribution",
+ https://en.wikipedia.org/wiki/Noncentral_F-distribution
+
+ Examples
+ --------
+ In a study, testing for a specific alternative to the null hypothesis
+ requires use of the Noncentral F distribution. We need to calculate the
+ area in the tail of the distribution that exceeds the value of the F
+ distribution for the null hypothesis. We'll plot the two probability
+ distributions for comparison.
+
+ >>> rng = np.random.default_rng()
+ >>> dfnum = 3 # between group deg of freedom
+ >>> dfden = 20 # within groups degrees of freedom
+ >>> nonc = 3.0
+ >>> nc_vals = rng.noncentral_f(dfnum, dfden, nonc, 1000000)
+ >>> NF = np.histogram(nc_vals, bins=50, density=True)
+ >>> c_vals = rng.f(dfnum, dfden, 1000000)
+ >>> F = np.histogram(c_vals, bins=50, density=True)
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(F[1][1:], F[0])
+ >>> plt.plot(NF[1][1:], NF[0])
+ >>> plt.show()
+
+ """
+ return cont(&random_noncentral_f, &self._bitgen, size, self.lock, 3,
+ dfnum, 'dfnum', CONS_POSITIVE,
+ dfden, 'dfden', CONS_POSITIVE,
+ nonc, 'nonc', CONS_NON_NEGATIVE, None)
+
+ def chisquare(self, df, size=None):
+ """
+ chisquare(df, size=None)
+
+ Draw samples from a chi-square distribution.
+
+ When `df` independent random variables, each with standard normal
+ distributions (mean 0, variance 1), are squared and summed, the
+ resulting distribution is chi-square (see Notes). This distribution
+ is often used in hypothesis testing.
+
+ Parameters
+ ----------
+ df : float or array_like of floats
+ Number of degrees of freedom, must be > 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``df`` is a scalar. Otherwise,
+ ``np.array(df).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized chi-square distribution.
+
+ Raises
+ ------
+ ValueError
+ When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)
+ is given.
+
+ Notes
+ -----
+ The variable obtained by summing the squares of `df` independent,
+ standard normally distributed random variables:
+
+ .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i
+
+ is chi-square distributed, denoted
+
+ .. math:: Q \\sim \\chi^2_k.
+
+ The probability density function of the chi-squared distribution is
+
+ .. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}
+ x^{k/2 - 1} e^{-x/2},
+
+ where :math:`\\Gamma` is the gamma function,
+
+ .. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt.
+
+ References
+ ----------
+ .. [1] NIST "Engineering Statistics Handbook"
+ https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
+
+ Examples
+ --------
+ >>> np.random.default_rng().chisquare(2,4)
+ array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
+
+ """
+ return cont(&random_chisquare, &self._bitgen, size, self.lock, 1,
+ df, 'df', CONS_POSITIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def noncentral_chisquare(self, df, nonc, size=None):
+ """
+ noncentral_chisquare(df, nonc, size=None)
+
+ Draw samples from a noncentral chi-square distribution.
+
+ The noncentral :math:`\\chi^2` distribution is a generalization of
+ the :math:`\\chi^2` distribution.
+
+ Parameters
+ ----------
+ df : float or array_like of floats
+ Degrees of freedom, must be > 0.
+
+ .. versionchanged:: 1.10.0
+ Earlier NumPy versions required dfnum > 1.
+ nonc : float or array_like of floats
+ Non-centrality, must be non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``df`` and ``nonc`` are both scalars.
+ Otherwise, ``np.broadcast(df, nonc).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized noncentral chi-square distribution.
+
+ Notes
+ -----
+ The probability density function for the noncentral Chi-square
+ distribution is
+
+ .. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}
+ \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}
+ P_{Y_{df+2i}}(x),
+
+ where :math:`Y_{q}` is the Chi-square with q degrees of freedom.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Noncentral chi-squared distribution"
+ https://en.wikipedia.org/wiki/Noncentral_chi-squared_distribution
+
+ Examples
+ --------
+ Draw values from the distribution and plot the histogram
+
+ >>> rng = np.random.default_rng()
+ >>> import matplotlib.pyplot as plt
+ >>> values = plt.hist(rng.noncentral_chisquare(3, 20, 100000),
+ ... bins=200, density=True)
+ >>> plt.show()
+
+ Draw values from a noncentral chisquare with very small noncentrality,
+ and compare to a chisquare.
+
+ >>> plt.figure()
+ >>> values = plt.hist(rng.noncentral_chisquare(3, .0000001, 100000),
+ ... bins=np.arange(0., 25, .1), density=True)
+ >>> values2 = plt.hist(rng.chisquare(3, 100000),
+ ... bins=np.arange(0., 25, .1), density=True)
+ >>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')
+ >>> plt.show()
+
+ Demonstrate how large values of non-centrality lead to a more symmetric
+ distribution.
+
+ >>> plt.figure()
+ >>> values = plt.hist(rng.noncentral_chisquare(3, 20, 100000),
+ ... bins=200, density=True)
+ >>> plt.show()
+
+ """
+ return cont(&random_noncentral_chisquare, &self._bitgen, size, self.lock, 2,
+ df, 'df', CONS_POSITIVE,
+ nonc, 'nonc', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def standard_cauchy(self, size=None):
+ """
+ standard_cauchy(size=None)
+
+ Draw samples from a standard Cauchy distribution with mode = 0.
+
+ Also known as the Lorentz distribution.
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ samples : ndarray or scalar
+ The drawn samples.
+
+ Notes
+ -----
+ The probability density function for the full Cauchy distribution is
+
+ .. math:: P(x; x_0, \\gamma) = \\frac{1}{\\pi \\gamma \\bigl[ 1+
+ (\\frac{x-x_0}{\\gamma})^2 \\bigr] }
+
+ and the Standard Cauchy distribution just sets :math:`x_0=0` and
+ :math:`\\gamma=1`
+
+ The Cauchy distribution arises in the solution to the driven harmonic
+ oscillator problem, and also describes spectral line broadening. It
+ also describes the distribution of values at which a line tilted at
+ a random angle will cut the x axis.
+
+ When studying hypothesis tests that assume normality, seeing how the
+ tests perform on data from a Cauchy distribution is a good indicator of
+ their sensitivity to a heavy-tailed distribution, since the Cauchy looks
+ very much like a Gaussian distribution, but with heavier tails.
+
+ References
+ ----------
+ .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "Cauchy
+ Distribution",
+ https://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm
+ .. [2] Weisstein, Eric W. "Cauchy Distribution." From MathWorld--A
+ Wolfram Web Resource.
+ http://mathworld.wolfram.com/CauchyDistribution.html
+ .. [3] Wikipedia, "Cauchy distribution"
+ https://en.wikipedia.org/wiki/Cauchy_distribution
+
+ Examples
+ --------
+ Draw samples and plot the distribution:
+
+ >>> import matplotlib.pyplot as plt
+ >>> s = np.random.default_rng().standard_cauchy(1000000)
+ >>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well
+ >>> plt.hist(s, bins=100)
+ >>> plt.show()
+
+ """
+ return cont(&random_standard_cauchy, &self._bitgen, size, self.lock, 0,
+ 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, None)
+
+ def standard_t(self, df, size=None):
+ """
+ standard_t(df, size=None)
+
+ Draw samples from a standard Student's t distribution with `df` degrees
+ of freedom.
+
+ A special case of the hyperbolic distribution. As `df` gets
+ large, the result resembles that of the standard normal
+ distribution (`standard_normal`).
+
+ Parameters
+ ----------
+ df : float or array_like of floats
+ Degrees of freedom, must be > 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``df`` is a scalar. Otherwise,
+ ``np.array(df).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized standard Student's t distribution.
+
+ Notes
+ -----
+ The probability density function for the t distribution is
+
+ .. math:: P(x, df) = \\frac{\\Gamma(\\frac{df+1}{2})}{\\sqrt{\\pi df}
+ \\Gamma(\\frac{df}{2})}\\Bigl( 1+\\frac{x^2}{df} \\Bigr)^{-(df+1)/2}
+
+ The t test is based on an assumption that the data come from a
+ Normal distribution. The t test provides a way to test whether
+ the sample mean (that is the mean calculated from the data) is
+ a good estimate of the true mean.
+
+ The derivation of the t-distribution was first published in
+ 1908 by William Gosset while working for the Guinness Brewery
+ in Dublin. Due to proprietary issues, he had to publish under
+ a pseudonym, and so he used the name Student.
+
+ References
+ ----------
+ .. [1] Dalgaard, Peter, "Introductory Statistics With R",
+ Springer, 2002.
+ .. [2] Wikipedia, "Student's t-distribution"
+ https://en.wikipedia.org/wiki/Student's_t-distribution
+
+ Examples
+ --------
+ From Dalgaard page 83 [1]_, suppose the daily energy intake for 11
+ women in kilojoules (kJ) is:
+
+ >>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\
+ ... 7515, 8230, 8770])
+
+ Does their energy intake deviate systematically from the recommended
+ value of 7725 kJ?
+
+ We have 10 degrees of freedom, so is the sample mean within 95% of the
+ recommended value?
+
+ >>> s = np.random.default_rng().standard_t(10, size=100000)
+ >>> np.mean(intake)
+ 6753.636363636364
+ >>> intake.std(ddof=1)
+ 1142.1232221373727
+
+ Calculate the t statistic, setting the ddof parameter to the unbiased
+ value so the divisor in the standard deviation will be degrees of
+ freedom, N-1.
+
+ >>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))
+ >>> import matplotlib.pyplot as plt
+ >>> h = plt.hist(s, bins=100, density=True)
+
+ For a one-sided t-test, how far out in the distribution does the t
+ statistic appear?
+
+ >>> np.sum(s<t) / float(len(s))
+ 0.0090699999999999999 #random
+
+ So the p-value is about 0.009, which says the null hypothesis has a
+ probability of about 99% of being true.
+
+ """
+ return cont(&random_standard_t, &self._bitgen, size, self.lock, 1,
+ df, 'df', CONS_POSITIVE,
+ 0, '', CONS_NONE,
+ 0, '', CONS_NONE,
+ None)
+
+ def vonmises(self, mu, kappa, size=None):
+ """
+ vonmises(mu, kappa, size=None)
+
+ Draw samples from a von Mises distribution.
+
+ Samples are drawn from a von Mises distribution with specified mode
+ (mu) and dispersion (kappa), on the interval [-pi, pi].
+
+ The von Mises distribution (also known as the circular normal
+ distribution) is a continuous probability distribution on the unit
+ circle. It may be thought of as the circular analogue of the normal
+ distribution.
+
+ Parameters
+ ----------
+ mu : float or array_like of floats
+ Mode ("center") of the distribution.
+ kappa : float or array_like of floats
+ Dispersion of the distribution, has to be >=0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``mu`` and ``kappa`` are both scalars.
+ Otherwise, ``np.broadcast(mu, kappa).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized von Mises distribution.
+
+ See Also
+ --------
+ scipy.stats.vonmises : probability density function, distribution, or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the von Mises distribution is
+
+ .. math:: p(x) = \\frac{e^{\\kappa cos(x-\\mu)}}{2\\pi I_0(\\kappa)},
+
+ where :math:`\\mu` is the mode and :math:`\\kappa` the dispersion,
+ and :math:`I_0(\\kappa)` is the modified Bessel function of order 0.
+
+ The von Mises is named for Richard Edler von Mises, who was born in
+ Austria-Hungary, in what is now the Ukraine. He fled to the United
+ States in 1939 and became a professor at Harvard. He worked in
+ probability theory, aerodynamics, fluid mechanics, and philosophy of
+ science.
+
+ References
+ ----------
+ .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
+ Mathematical Functions with Formulas, Graphs, and Mathematical
+ Tables, 9th printing," New York: Dover, 1972.
+ .. [2] von Mises, R., "Mathematical Theory of Probability
+ and Statistics", New York: Academic Press, 1964.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> mu, kappa = 0.0, 4.0 # mean and dispersion
+ >>> s = np.random.default_rng().vonmises(mu, kappa, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.special import i0 # doctest: +SKIP
+ >>> plt.hist(s, 50, density=True)
+ >>> x = np.linspace(-np.pi, np.pi, num=51)
+ >>> y = np.exp(kappa*np.cos(x-mu))/(2*np.pi*i0(kappa)) # doctest: +SKIP
+ >>> plt.plot(x, y, linewidth=2, color='r') # doctest: +SKIP
+ >>> plt.show()
+
+ """
+ return cont(&random_vonmises, &self._bitgen, size, self.lock, 2,
+ mu, 'mu', CONS_NONE,
+ kappa, 'kappa', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def pareto(self, a, size=None):
+ """
+ pareto(a, size=None)
+
+ Draw samples from a Pareto II or Lomax distribution with
+ specified shape.
+
+ The Lomax or Pareto II distribution is a shifted Pareto
+ distribution. The classical Pareto distribution can be
+ obtained from the Lomax distribution by adding 1 and
+ multiplying by the scale parameter ``m`` (see Notes). The
+ smallest value of the Lomax distribution is zero while for the
+ classical Pareto distribution it is ``mu``, where the standard
+ Pareto distribution has location ``mu = 1``. Lomax can also
+ be considered as a simplified version of the Generalized
+ Pareto distribution (available in SciPy), with the scale set
+ to one and the location set to zero.
+
+ The Pareto distribution must be greater than zero, and is
+ unbounded above. It is also known as the "80-20 rule". In
+ this distribution, 80 percent of the weights are in the lowest
+ 20 percent of the range, while the other 20 percent fill the
+ remaining 80 percent of the range.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Shape of the distribution. Must be positive.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` is a scalar. Otherwise,
+ ``np.array(a).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Pareto distribution.
+
+ See Also
+ --------
+ scipy.stats.lomax : probability density function, distribution or
+ cumulative density function, etc.
+ scipy.stats.genpareto : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Pareto distribution is
+
+ .. math:: p(x) = \\frac{am^a}{x^{a+1}}
+
+ where :math:`a` is the shape and :math:`m` the scale.
+
+ The Pareto distribution, named after the Italian economist
+ Vilfredo Pareto, is a power law probability distribution
+ useful in many real world problems. Outside the field of
+ economics it is generally referred to as the Bradford
+ distribution. Pareto developed the distribution to describe
+ the distribution of wealth in an economy. It has also found
+ use in insurance, web page access statistics, oil field sizes,
+ and many other problems, including the download frequency for
+ projects in Sourceforge [1]_. It is one of the so-called
+ "fat-tailed" distributions.
+
+
+ References
+ ----------
+ .. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of
+ Sourceforge projects.
+ .. [2] Pareto, V. (1896). Course of Political Economy. Lausanne.
+ .. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme
+ Values, Birkhauser Verlag, Basel, pp 23-30.
+ .. [4] Wikipedia, "Pareto distribution",
+ https://en.wikipedia.org/wiki/Pareto_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> a, m = 3., 2. # shape and mode
+ >>> s = (np.random.default_rng().pareto(a, 1000) + 1) * m
+
+ Display the histogram of the samples, along with the probability
+ density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, _ = plt.hist(s, 100, density=True)
+ >>> fit = a*m**a / bins**(a+1)
+ >>> plt.plot(bins, max(count)*fit/max(fit), linewidth=2, color='r')
+ >>> plt.show()
+
+ """
+ return cont(&random_pareto, &self._bitgen, size, self.lock, 1,
+ a, 'a', CONS_POSITIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def weibull(self, a, size=None):
+ """
+ weibull(a, size=None)
+
+ Draw samples from a Weibull distribution.
+
+ Draw samples from a 1-parameter Weibull distribution with the given
+ shape parameter `a`.
+
+ .. math:: X = (-ln(U))^{1/a}
+
+ Here, U is drawn from the uniform distribution over (0,1].
+
+ The more common 2-parameter Weibull, including a scale parameter
+ :math:`\\lambda` is just :math:`X = \\lambda(-ln(U))^{1/a}`.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Shape parameter of the distribution. Must be nonnegative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` is a scalar. Otherwise,
+ ``np.array(a).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Weibull distribution.
+
+ See Also
+ --------
+ scipy.stats.weibull_max
+ scipy.stats.weibull_min
+ scipy.stats.genextreme
+ gumbel
+
+ Notes
+ -----
+ The Weibull (or Type III asymptotic extreme value distribution
+ for smallest values, SEV Type III, or Rosin-Rammler
+ distribution) is one of a class of Generalized Extreme Value
+ (GEV) distributions used in modeling extreme value problems.
+ This class includes the Gumbel and Frechet distributions.
+
+ The probability density for the Weibull distribution is
+
+ .. math:: p(x) = \\frac{a}
+ {\\lambda}(\\frac{x}{\\lambda})^{a-1}e^{-(x/\\lambda)^a},
+
+ where :math:`a` is the shape and :math:`\\lambda` the scale.
+
+ The function has its peak (the mode) at
+ :math:`\\lambda(\\frac{a-1}{a})^{1/a}`.
+
+ When ``a = 1``, the Weibull distribution reduces to the exponential
+ distribution.
+
+ References
+ ----------
+ .. [1] Waloddi Weibull, Royal Technical University, Stockholm,
+ 1939 "A Statistical Theory Of The Strength Of Materials",
+ Ingeniorsvetenskapsakademiens Handlingar Nr 151, 1939,
+ Generalstabens Litografiska Anstalts Forlag, Stockholm.
+ .. [2] Waloddi Weibull, "A Statistical Distribution Function of
+ Wide Applicability", Journal Of Applied Mechanics ASME Paper
+ 1951.
+ .. [3] Wikipedia, "Weibull distribution",
+ https://en.wikipedia.org/wiki/Weibull_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> rng = np.random.default_rng()
+ >>> a = 5. # shape
+ >>> s = rng.weibull(a, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> x = np.arange(1,100.)/50.
+ >>> def weib(x,n,a):
+ ... return (a / n) * (x / n)**(a - 1) * np.exp(-(x / n)**a)
+
+ >>> count, bins, ignored = plt.hist(rng.weibull(5.,1000))
+ >>> x = np.arange(1,100.)/50.
+ >>> scale = count.max()/weib(x, 1., 5.).max()
+ >>> plt.plot(x, weib(x, 1., 5.)*scale)
+ >>> plt.show()
+
+ """
+ return cont(&random_weibull, &self._bitgen, size, self.lock, 1,
+ a, 'a', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def power(self, a, size=None):
+ """
+ power(a, size=None)
+
+ Draws samples in [0, 1] from a power distribution with positive
+ exponent a - 1.
+
+ Also known as the power function distribution.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Parameter of the distribution. Must be non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` is a scalar. Otherwise,
+ ``np.array(a).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized power distribution.
+
+ Raises
+ ------
+ ValueError
+ If a < 1.
+
+ Notes
+ -----
+ The probability density function is
+
+ .. math:: P(x; a) = ax^{a-1}, 0 \\le x \\le 1, a>0.
+
+ The power function distribution is just the inverse of the Pareto
+ distribution. It may also be seen as a special case of the Beta
+ distribution.
+
+ It is used, for example, in modeling the over-reporting of insurance
+ claims.
+
+ References
+ ----------
+ .. [1] Christian Kleiber, Samuel Kotz, "Statistical size distributions
+ in economics and actuarial sciences", Wiley, 2003.
+ .. [2] Heckert, N. A. and Filliben, James J. "NIST Handbook 148:
+ Dataplot Reference Manual, Volume 2: Let Subcommands and Library
+ Functions", National Institute of Standards and Technology
+ Handbook Series, June 2003.
+ https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> rng = np.random.default_rng()
+ >>> a = 5. # shape
+ >>> samples = 1000
+ >>> s = rng.power(a, samples)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, bins=30)
+ >>> x = np.linspace(0, 1, 100)
+ >>> y = a*x**(a-1.)
+ >>> normed_y = samples*np.diff(bins)[0]*y
+ >>> plt.plot(x, normed_y)
+ >>> plt.show()
+
+ Compare the power function distribution to the inverse of the Pareto.
+
+ >>> from scipy import stats # doctest: +SKIP
+ >>> rvs = rng.power(5, 1000000)
+ >>> rvsp = rng.pareto(5, 1000000)
+ >>> xx = np.linspace(0,1,100)
+ >>> powpdf = stats.powerlaw.pdf(xx,5) # doctest: +SKIP
+
+ >>> plt.figure()
+ >>> plt.hist(rvs, bins=50, density=True)
+ >>> plt.plot(xx,powpdf,'r-') # doctest: +SKIP
+ >>> plt.title('power(5)')
+
+ >>> plt.figure()
+ >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
+ >>> plt.plot(xx,powpdf,'r-') # doctest: +SKIP
+ >>> plt.title('inverse of 1 + Generator.pareto(5)')
+
+ >>> plt.figure()
+ >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
+ >>> plt.plot(xx,powpdf,'r-') # doctest: +SKIP
+ >>> plt.title('inverse of stats.pareto(5)')
+
+ """
+ return cont(&random_power, &self._bitgen, size, self.lock, 1,
+ a, 'a', CONS_POSITIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def laplace(self, loc=0.0, scale=1.0, size=None):
+ """
+ laplace(loc=0.0, scale=1.0, size=None)
+
+ Draw samples from the Laplace or double exponential distribution with
+ specified location (or mean) and scale (decay).
+
+ The Laplace distribution is similar to the Gaussian/normal distribution,
+ but is sharper at the peak and has fatter tails. It represents the
+ difference between two independent, identically distributed exponential
+ random variables.
+
+ Parameters
+ ----------
+ loc : float or array_like of floats, optional
+ The position, :math:`\\mu`, of the distribution peak. Default is 0.
+ scale : float or array_like of floats, optional
+ :math:`\\lambda`, the exponential decay. Default is 1. Must be non-
+ negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``loc`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Laplace distribution.
+
+ Notes
+ -----
+ It has the probability density function
+
+ .. math:: f(x; \\mu, \\lambda) = \\frac{1}{2\\lambda}
+ \\exp\\left(-\\frac{|x - \\mu|}{\\lambda}\\right).
+
+ The first law of Laplace, from 1774, states that the frequency
+ of an error can be expressed as an exponential function of the
+ absolute magnitude of the error, which leads to the Laplace
+ distribution. For many problems in economics and health
+ sciences, this distribution seems to model the data better
+ than the standard Gaussian distribution.
+
+ References
+ ----------
+ .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
+ Mathematical Functions with Formulas, Graphs, and Mathematical
+ Tables, 9th printing," New York: Dover, 1972.
+ .. [2] Kotz, Samuel, et. al. "The Laplace Distribution and
+ Generalizations, " Birkhauser, 2001.
+ .. [3] Weisstein, Eric W. "Laplace Distribution."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/LaplaceDistribution.html
+ .. [4] Wikipedia, "Laplace distribution",
+ https://en.wikipedia.org/wiki/Laplace_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution
+
+ >>> loc, scale = 0., 1.
+ >>> s = np.random.default_rng().laplace(loc, scale, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
+ >>> x = np.arange(-8., 8., .01)
+ >>> pdf = np.exp(-abs(x-loc)/scale)/(2.*scale)
+ >>> plt.plot(x, pdf)
+
+ Plot Gaussian for comparison:
+
+ >>> g = (1/(scale * np.sqrt(2 * np.pi)) *
+ ... np.exp(-(x - loc)**2 / (2 * scale**2)))
+ >>> plt.plot(x,g)
+
+ """
+ return cont(&random_laplace, &self._bitgen, size, self.lock, 2,
+ loc, 'loc', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def gumbel(self, loc=0.0, scale=1.0, size=None):
+ """
+ gumbel(loc=0.0, scale=1.0, size=None)
+
+ Draw samples from a Gumbel distribution.
+
+ Draw samples from a Gumbel distribution with specified location and
+ scale. For more information on the Gumbel distribution, see
+ Notes and References below.
+
+ Parameters
+ ----------
+ loc : float or array_like of floats, optional
+ The location of the mode of the distribution. Default is 0.
+ scale : float or array_like of floats, optional
+ The scale parameter of the distribution. Default is 1. Must be non-
+ negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``loc`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Gumbel distribution.
+
+ See Also
+ --------
+ scipy.stats.gumbel_l
+ scipy.stats.gumbel_r
+ scipy.stats.genextreme
+ weibull
+
+ Notes
+ -----
+ The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme
+ Value Type I) distribution is one of a class of Generalized Extreme
+ Value (GEV) distributions used in modeling extreme value problems.
+ The Gumbel is a special case of the Extreme Value Type I distribution
+ for maximums from distributions with "exponential-like" tails.
+
+ The probability density for the Gumbel distribution is
+
+ .. math:: p(x) = \\frac{e^{-(x - \\mu)/ \\beta}}{\\beta} e^{ -e^{-(x - \\mu)/
+ \\beta}},
+
+ where :math:`\\mu` is the mode, a location parameter, and
+ :math:`\\beta` is the scale parameter.
+
+ The Gumbel (named for German mathematician Emil Julius Gumbel) was used
+ very early in the hydrology literature, for modeling the occurrence of
+ flood events. It is also used for modeling maximum wind speed and
+ rainfall rates. It is a "fat-tailed" distribution - the probability of
+ an event in the tail of the distribution is larger than if one used a
+ Gaussian, hence the surprisingly frequent occurrence of 100-year
+ floods. Floods were initially modeled as a Gaussian process, which
+ underestimated the frequency of extreme events.
+
+ It is one of a class of extreme value distributions, the Generalized
+ Extreme Value (GEV) distributions, which also includes the Weibull and
+ Frechet.
+
+ The function has a mean of :math:`\\mu + 0.57721\\beta` and a variance
+ of :math:`\\frac{\\pi^2}{6}\\beta^2`.
+
+ References
+ ----------
+ .. [1] Gumbel, E. J., "Statistics of Extremes,"
+ New York: Columbia University Press, 1958.
+ .. [2] Reiss, R.-D. and Thomas, M., "Statistical Analysis of Extreme
+ Values from Insurance, Finance, Hydrology and Other Fields,"
+ Basel: Birkhauser Verlag, 2001.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> rng = np.random.default_rng()
+ >>> mu, beta = 0, 0.1 # location and scale
+ >>> s = rng.gumbel(mu, beta, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
+ >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
+ ... * np.exp( -np.exp( -(bins - mu) /beta) ),
+ ... linewidth=2, color='r')
+ >>> plt.show()
+
+ Show how an extreme value distribution can arise from a Gaussian process
+ and compare to a Gaussian:
+
+ >>> means = []
+ >>> maxima = []
+ >>> for i in range(0,1000) :
+ ... a = rng.normal(mu, beta, 1000)
+ ... means.append(a.mean())
+ ... maxima.append(a.max())
+ >>> count, bins, ignored = plt.hist(maxima, 30, density=True)
+ >>> beta = np.std(maxima) * np.sqrt(6) / np.pi
+ >>> mu = np.mean(maxima) - 0.57721*beta
+ >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
+ ... * np.exp(-np.exp(-(bins - mu)/beta)),
+ ... linewidth=2, color='r')
+ >>> plt.plot(bins, 1/(beta * np.sqrt(2 * np.pi))
+ ... * np.exp(-(bins - mu)**2 / (2 * beta**2)),
+ ... linewidth=2, color='g')
+ >>> plt.show()
+
+ """
+ return cont(&random_gumbel, &self._bitgen, size, self.lock, 2,
+ loc, 'loc', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def logistic(self, loc=0.0, scale=1.0, size=None):
+ """
+ logistic(loc=0.0, scale=1.0, size=None)
+
+ Draw samples from a logistic distribution.
+
+ Samples are drawn from a logistic distribution with specified
+ parameters, loc (location or mean, also median), and scale (>0).
+
+ Parameters
+ ----------
+ loc : float or array_like of floats, optional
+ Parameter of the distribution. Default is 0.
+ scale : float or array_like of floats, optional
+ Parameter of the distribution. Must be non-negative.
+ Default is 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``loc`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized logistic distribution.
+
+ See Also
+ --------
+ scipy.stats.logistic : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Logistic distribution is
+
+ .. math:: P(x) = P(x) = \\frac{e^{-(x-\\mu)/s}}{s(1+e^{-(x-\\mu)/s})^2},
+
+ where :math:`\\mu` = location and :math:`s` = scale.
+
+ The Logistic distribution is used in Extreme Value problems where it
+ can act as a mixture of Gumbel distributions, in Epidemiology, and by
+ the World Chess Federation (FIDE) where it is used in the Elo ranking
+ system, assuming the performance of each player is a logistically
+ distributed random variable.
+
+ References
+ ----------
+ .. [1] Reiss, R.-D. and Thomas M. (2001), "Statistical Analysis of
+ Extreme Values, from Insurance, Finance, Hydrology and Other
+ Fields," Birkhauser Verlag, Basel, pp 132-133.
+ .. [2] Weisstein, Eric W. "Logistic Distribution." From
+ MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/LogisticDistribution.html
+ .. [3] Wikipedia, "Logistic-distribution",
+ https://en.wikipedia.org/wiki/Logistic_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> loc, scale = 10, 1
+ >>> s = np.random.default_rng().logistic(loc, scale, 10000)
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, bins=50)
+
+ # plot against distribution
+
+ >>> def logist(x, loc, scale):
+ ... return np.exp((loc-x)/scale)/(scale*(1+np.exp((loc-x)/scale))**2)
+ >>> lgst_val = logist(bins, loc, scale)
+ >>> plt.plot(bins, lgst_val * count.max() / lgst_val.max())
+ >>> plt.show()
+
+ """
+ return cont(&random_logistic, &self._bitgen, size, self.lock, 2,
+ loc, 'loc', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def lognormal(self, mean=0.0, sigma=1.0, size=None):
+ """
+ lognormal(mean=0.0, sigma=1.0, size=None)
+
+ Draw samples from a log-normal distribution.
+
+ Draw samples from a log-normal distribution with specified mean,
+ standard deviation, and array shape. Note that the mean and standard
+ deviation are not the values for the distribution itself, but of the
+ underlying normal distribution it is derived from.
+
+ Parameters
+ ----------
+ mean : float or array_like of floats, optional
+ Mean value of the underlying normal distribution. Default is 0.
+ sigma : float or array_like of floats, optional
+ Standard deviation of the underlying normal distribution. Must be
+ non-negative. Default is 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``mean`` and ``sigma`` are both scalars.
+ Otherwise, ``np.broadcast(mean, sigma).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized log-normal distribution.
+
+ See Also
+ --------
+ scipy.stats.lognorm : probability density function, distribution,
+ cumulative density function, etc.
+
+ Notes
+ -----
+ A variable `x` has a log-normal distribution if `log(x)` is normally
+ distributed. The probability density function for the log-normal
+ distribution is:
+
+ .. math:: p(x) = \\frac{1}{\\sigma x \\sqrt{2\\pi}}
+ e^{(-\\frac{(ln(x)-\\mu)^2}{2\\sigma^2})}
+
+ where :math:`\\mu` is the mean and :math:`\\sigma` is the standard
+ deviation of the normally distributed logarithm of the variable.
+ A log-normal distribution results if a random variable is the *product*
+ of a large number of independent, identically-distributed variables in
+ the same way that a normal distribution results if the variable is the
+ *sum* of a large number of independent, identically-distributed
+ variables.
+
+ References
+ ----------
+ .. [1] Limpert, E., Stahel, W. A., and Abbt, M., "Log-normal
+ Distributions across the Sciences: Keys and Clues,"
+ BioScience, Vol. 51, No. 5, May, 2001.
+ https://stat.ethz.ch/~stahel/lognormal/bioscience.pdf
+ .. [2] Reiss, R.D. and Thomas, M., "Statistical Analysis of Extreme
+ Values," Basel: Birkhauser Verlag, 2001, pp. 31-32.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> rng = np.random.default_rng()
+ >>> mu, sigma = 3., 1. # mean and standard deviation
+ >>> s = rng.lognormal(mu, sigma, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 100, density=True, align='mid')
+
+ >>> x = np.linspace(min(bins), max(bins), 10000)
+ >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
+ ... / (x * sigma * np.sqrt(2 * np.pi)))
+
+ >>> plt.plot(x, pdf, linewidth=2, color='r')
+ >>> plt.axis('tight')
+ >>> plt.show()
+
+ Demonstrate that taking the products of random samples from a uniform
+ distribution can be fit well by a log-normal probability density
+ function.
+
+ >>> # Generate a thousand samples: each is the product of 100 random
+ >>> # values, drawn from a normal distribution.
+ >>> rng = rng
+ >>> b = []
+ >>> for i in range(1000):
+ ... a = 10. + rng.standard_normal(100)
+ ... b.append(np.product(a))
+
+ >>> b = np.array(b) / np.min(b) # scale values to be positive
+ >>> count, bins, ignored = plt.hist(b, 100, density=True, align='mid')
+ >>> sigma = np.std(np.log(b))
+ >>> mu = np.mean(np.log(b))
+
+ >>> x = np.linspace(min(bins), max(bins), 10000)
+ >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
+ ... / (x * sigma * np.sqrt(2 * np.pi)))
+
+ >>> plt.plot(x, pdf, color='r', linewidth=2)
+ >>> plt.show()
+
+ """
+ return cont(&random_lognormal, &self._bitgen, size, self.lock, 2,
+ mean, 'mean', CONS_NONE,
+ sigma, 'sigma', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def rayleigh(self, scale=1.0, size=None):
+ """
+ rayleigh(scale=1.0, size=None)
+
+ Draw samples from a Rayleigh distribution.
+
+ The :math:`\\chi` and Weibull distributions are generalizations of the
+ Rayleigh.
+
+ Parameters
+ ----------
+ scale : float or array_like of floats, optional
+ Scale, also equals the mode. Must be non-negative. Default is 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``scale`` is a scalar. Otherwise,
+ ``np.array(scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Rayleigh distribution.
+
+ Notes
+ -----
+ The probability density function for the Rayleigh distribution is
+
+ .. math:: P(x;scale) = \\frac{x}{scale^2}e^{\\frac{-x^2}{2 \\cdotp scale^2}}
+
+ The Rayleigh distribution would arise, for example, if the East
+ and North components of the wind velocity had identical zero-mean
+ Gaussian distributions. Then the wind speed would have a Rayleigh
+ distribution.
+
+ References
+ ----------
+ .. [1] Brighton Webs Ltd., "Rayleigh Distribution,"
+ https://web.archive.org/web/20090514091424/http://brighton-webs.co.uk:80/distributions/rayleigh.asp
+ .. [2] Wikipedia, "Rayleigh distribution"
+ https://en.wikipedia.org/wiki/Rayleigh_distribution
+
+ Examples
+ --------
+ Draw values from the distribution and plot the histogram
+
+ >>> from matplotlib.pyplot import hist
+ >>> rng = np.random.default_rng()
+ >>> values = hist(rng.rayleigh(3, 100000), bins=200, density=True)
+
+ Wave heights tend to follow a Rayleigh distribution. If the mean wave
+ height is 1 meter, what fraction of waves are likely to be larger than 3
+ meters?
+
+ >>> meanvalue = 1
+ >>> modevalue = np.sqrt(2 / np.pi) * meanvalue
+ >>> s = rng.rayleigh(modevalue, 1000000)
+
+ The percentage of waves larger than 3 meters is:
+
+ >>> 100.*sum(s>3)/1000000.
+ 0.087300000000000003 # random
+
+ """
+ return cont(&random_rayleigh, &self._bitgen, size, self.lock, 1,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def wald(self, mean, scale, size=None):
+ """
+ wald(mean, scale, size=None)
+
+ Draw samples from a Wald, or inverse Gaussian, distribution.
+
+ As the scale approaches infinity, the distribution becomes more like a
+ Gaussian. Some references claim that the Wald is an inverse Gaussian
+ with mean equal to 1, but this is by no means universal.
+
+ The inverse Gaussian distribution was first studied in relationship to
+ Brownian motion. In 1956 M.C.K. Tweedie used the name inverse Gaussian
+ because there is an inverse relationship between the time to cover a
+ unit distance and distance covered in unit time.
+
+ Parameters
+ ----------
+ mean : float or array_like of floats
+ Distribution mean, must be > 0.
+ scale : float or array_like of floats
+ Scale parameter, must be > 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``mean`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(mean, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Wald distribution.
+
+ Notes
+ -----
+ The probability density function for the Wald distribution is
+
+ .. math:: P(x;mean,scale) = \\sqrt{\\frac{scale}{2\\pi x^3}}e^
+ \\frac{-scale(x-mean)^2}{2\\cdotp mean^2x}
+
+ As noted above the inverse Gaussian distribution first arise
+ from attempts to model Brownian motion. It is also a
+ competitor to the Weibull for use in reliability modeling and
+ modeling stock returns and interest rate processes.
+
+ References
+ ----------
+ .. [1] Brighton Webs Ltd., Wald Distribution,
+ https://web.archive.org/web/20090423014010/http://www.brighton-webs.co.uk:80/distributions/wald.asp
+ .. [2] Chhikara, Raj S., and Folks, J. Leroy, "The Inverse Gaussian
+ Distribution: Theory : Methodology, and Applications", CRC Press,
+ 1988.
+ .. [3] Wikipedia, "Inverse Gaussian distribution"
+ https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
+
+ Examples
+ --------
+ Draw values from the distribution and plot the histogram:
+
+ >>> import matplotlib.pyplot as plt
+ >>> h = plt.hist(np.random.default_rng().wald(3, 2, 100000), bins=200, density=True)
+ >>> plt.show()
+
+ """
+ return cont(&random_wald, &self._bitgen, size, self.lock, 2,
+ mean, 'mean', CONS_POSITIVE,
+ scale, 'scale', CONS_POSITIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def triangular(self, left, mode, right, size=None):
+ """
+ triangular(left, mode, right, size=None)
+
+ Draw samples from the triangular distribution over the
+ interval ``[left, right]``.
+
+ The triangular distribution is a continuous probability
+ distribution with lower limit left, peak at mode, and upper
+ limit right. Unlike the other distributions, these parameters
+ directly define the shape of the pdf.
+
+ Parameters
+ ----------
+ left : float or array_like of floats
+ Lower limit.
+ mode : float or array_like of floats
+ The value where the peak of the distribution occurs.
+ The value must fulfill the condition ``left <= mode <= right``.
+ right : float or array_like of floats
+ Upper limit, must be larger than `left`.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``left``, ``mode``, and ``right``
+ are all scalars. Otherwise, ``np.broadcast(left, mode, right).size``
+ samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized triangular distribution.
+
+ Notes
+ -----
+ The probability density function for the triangular distribution is
+
+ .. math:: P(x;l, m, r) = \\begin{cases}
+ \\frac{2(x-l)}{(r-l)(m-l)}& \\text{for $l \\leq x \\leq m$},\\\\
+ \\frac{2(r-x)}{(r-l)(r-m)}& \\text{for $m \\leq x \\leq r$},\\\\
+ 0& \\text{otherwise}.
+ \\end{cases}
+
+ The triangular distribution is often used in ill-defined
+ problems where the underlying distribution is not known, but
+ some knowledge of the limits and mode exists. Often it is used
+ in simulations.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Triangular distribution"
+ https://en.wikipedia.org/wiki/Triangular_distribution
+
+ Examples
+ --------
+ Draw values from the distribution and plot the histogram:
+
+ >>> import matplotlib.pyplot as plt
+ >>> h = plt.hist(np.random.default_rng().triangular(-3, 0, 8, 100000), bins=200,
+ ... density=True)
+ >>> plt.show()
+
+ """
+ cdef bint is_scalar = True
+ cdef double fleft, fmode, fright
+ cdef np.ndarray oleft, omode, oright
+
+ oleft = <np.ndarray>np.PyArray_FROM_OTF(left, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ omode = <np.ndarray>np.PyArray_FROM_OTF(mode, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ oright = <np.ndarray>np.PyArray_FROM_OTF(right, np.NPY_DOUBLE, np.NPY_ALIGNED)
+
+ if np.PyArray_NDIM(oleft) == np.PyArray_NDIM(omode) == np.PyArray_NDIM(oright) == 0:
+ fleft = PyFloat_AsDouble(left)
+ fright = PyFloat_AsDouble(right)
+ fmode = PyFloat_AsDouble(mode)
+
+ if fleft > fmode:
+ raise ValueError("left > mode")
+ if fmode > fright:
+ raise ValueError("mode > right")
+ if fleft == fright:
+ raise ValueError("left == right")
+ return cont(&random_triangular, &self._bitgen, size, self.lock, 3,
+ fleft, '', CONS_NONE,
+ fmode, '', CONS_NONE,
+ fright, '', CONS_NONE, None)
+
+ if np.any(np.greater(oleft, omode)):
+ raise ValueError("left > mode")
+ if np.any(np.greater(omode, oright)):
+ raise ValueError("mode > right")
+ if np.any(np.equal(oleft, oright)):
+ raise ValueError("left == right")
+
+ return cont_broadcast_3(&random_triangular, &self._bitgen, size, self.lock,
+ oleft, '', CONS_NONE,
+ omode, '', CONS_NONE,
+ oright, '', CONS_NONE)
+
+ # Complicated, discrete distributions:
+ def binomial(self, n, p, size=None):
+ """
+ binomial(n, p, size=None)
+
+ Draw samples from a binomial distribution.
+
+ Samples are drawn from a binomial distribution with specified
+ parameters, n trials and p probability of success where
+ n an integer >= 0 and p is in the interval [0,1]. (n may be
+ input as a float, but it is truncated to an integer in use)
+
+ Parameters
+ ----------
+ n : int or array_like of ints
+ Parameter of the distribution, >= 0. Floats are also accepted,
+ but they will be truncated to integers.
+ p : float or array_like of floats
+ Parameter of the distribution, >= 0 and <=1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``n`` and ``p`` are both scalars.
+ Otherwise, ``np.broadcast(n, p).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized binomial distribution, where
+ each sample is equal to the number of successes over the n trials.
+
+ See Also
+ --------
+ scipy.stats.binom : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the binomial distribution is
+
+ .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N},
+
+ where :math:`n` is the number of trials, :math:`p` is the probability
+ of success, and :math:`N` is the number of successes.
+
+ When estimating the standard error of a proportion in a population by
+ using a random sample, the normal distribution works well unless the
+ product p*n <=5, where p = population proportion estimate, and n =
+ number of samples, in which case the binomial distribution is used
+ instead. For example, a sample of 15 people shows 4 who are left
+ handed, and 11 who are right handed. Then p = 4/15 = 27%. 0.27*15 = 4,
+ so the binomial distribution should be used in this case.
+
+ References
+ ----------
+ .. [1] Dalgaard, Peter, "Introductory Statistics with R",
+ Springer-Verlag, 2002.
+ .. [2] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
+ Fifth Edition, 2002.
+ .. [3] Lentner, Marvin, "Elementary Applied Statistics", Bogden
+ and Quigley, 1972.
+ .. [4] Weisstein, Eric W. "Binomial Distribution." From MathWorld--A
+ Wolfram Web Resource.
+ http://mathworld.wolfram.com/BinomialDistribution.html
+ .. [5] Wikipedia, "Binomial distribution",
+ https://en.wikipedia.org/wiki/Binomial_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> rng = np.random.default_rng()
+ >>> n, p = 10, .5 # number of trials, probability of each trial
+ >>> s = rng.binomial(n, p, 1000)
+ # result of flipping a coin 10 times, tested 1000 times.
+
+ A real world example. A company drills 9 wild-cat oil exploration
+ wells, each with an estimated probability of success of 0.1. All nine
+ wells fail. What is the probability of that happening?
+
+ Let's do 20,000 trials of the model, and count the number that
+ generate zero positive results.
+
+ >>> sum(rng.binomial(9, 0.1, 20000) == 0)/20000.
+ # answer = 0.38885, or 38%.
+
+ """
+
+ # Uses a custom implementation since self._binomial is required
+ cdef double _dp = 0
+ cdef int64_t _in = 0
+ cdef bint is_scalar = True
+ cdef np.npy_intp i, cnt
+ cdef np.ndarray randoms
+ cdef np.int64_t *randoms_data
+ cdef np.broadcast it
+
+ p_arr = <np.ndarray>np.PyArray_FROM_OTF(p, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(p_arr) == 0
+ n_arr = <np.ndarray>np.PyArray_FROM_OTF(n, np.NPY_INT64, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(n_arr) == 0
+
+ if not is_scalar:
+ check_array_constraint(p_arr, 'p', CONS_BOUNDED_0_1)
+ check_array_constraint(n_arr, 'n', CONS_NON_NEGATIVE)
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ else:
+ it = np.PyArray_MultiIterNew2(p_arr, n_arr)
+ randoms = <np.ndarray>np.empty(it.shape, np.int64)
+
+ randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
+ cnt = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew3(randoms, p_arr, n_arr)
+ with self.lock, nogil:
+ for i in range(cnt):
+ _dp = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ _in = (<int64_t*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ (<int64_t*>np.PyArray_MultiIter_DATA(it, 0))[0] = random_binomial(&self._bitgen, _dp, _in, &self._binomial)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+ _dp = PyFloat_AsDouble(p)
+ _in = <int64_t>n
+ check_constraint(_dp, 'p', CONS_BOUNDED_0_1)
+ check_constraint(<double>_in, 'n', CONS_NON_NEGATIVE)
+
+ if size is None:
+ with self.lock:
+ return random_binomial(&self._bitgen, _dp, _in, &self._binomial)
+
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ cnt = np.PyArray_SIZE(randoms)
+ randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
+
+ with self.lock, nogil:
+ for i in range(cnt):
+ randoms_data[i] = random_binomial(&self._bitgen, _dp, _in,
+ &self._binomial)
+
+ return randoms
+
+ def negative_binomial(self, n, p, size=None):
+ """
+ negative_binomial(n, p, size=None)
+
+ Draw samples from a negative binomial distribution.
+
+ Samples are drawn from a negative binomial distribution with specified
+ parameters, `n` successes and `p` probability of success where `n`
+ is > 0 and `p` is in the interval [0, 1].
+
+ Parameters
+ ----------
+ n : float or array_like of floats
+ Parameter of the distribution, > 0.
+ p : float or array_like of floats
+ Parameter of the distribution, >= 0 and <=1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``n`` and ``p`` are both scalars.
+ Otherwise, ``np.broadcast(n, p).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized negative binomial distribution,
+ where each sample is equal to N, the number of failures that
+ occurred before a total of n successes was reached.
+
+ Notes
+ -----
+ The probability mass function of the negative binomial distribution is
+
+ .. math:: P(N;n,p) = \\frac{\\Gamma(N+n)}{N!\\Gamma(n)}p^{n}(1-p)^{N},
+
+ where :math:`n` is the number of successes, :math:`p` is the
+ probability of success, :math:`N+n` is the number of trials, and
+ :math:`\\Gamma` is the gamma function. When :math:`n` is an integer,
+ :math:`\\frac{\\Gamma(N+n)}{N!\\Gamma(n)} = \\binom{N+n-1}{N}`, which is
+ the more common form of this term in the the pmf. The negative
+ binomial distribution gives the probability of N failures given n
+ successes, with a success on the last trial.
+
+ If one throws a die repeatedly until the third time a "1" appears,
+ then the probability distribution of the number of non-"1"s that
+ appear before the third "1" is a negative binomial distribution.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Negative Binomial Distribution." From
+ MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/NegativeBinomialDistribution.html
+ .. [2] Wikipedia, "Negative binomial distribution",
+ https://en.wikipedia.org/wiki/Negative_binomial_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ A real world example. A company drills wild-cat oil
+ exploration wells, each with an estimated probability of
+ success of 0.1. What is the probability of having one success
+ for each successive well, that is what is the probability of a
+ single success after drilling 5 wells, after 6 wells, etc.?
+
+ >>> s = np.random.default_rng().negative_binomial(1, 0.1, 100000)
+ >>> for i in range(1, 11): # doctest: +SKIP
+ ... probability = sum(s<i) / 100000.
+ ... print(i, "wells drilled, probability of one success =", probability)
+
+ """
+ return disc(&random_negative_binomial, &self._bitgen, size, self.lock, 2, 0,
+ n, 'n', CONS_POSITIVE_NOT_NAN,
+ p, 'p', CONS_BOUNDED_0_1,
+ 0.0, '', CONS_NONE)
+
+ def poisson(self, lam=1.0, size=None):
+ """
+ poisson(lam=1.0, size=None)
+
+ Draw samples from a Poisson distribution.
+
+ The Poisson distribution is the limit of the binomial distribution
+ for large N.
+
+ Parameters
+ ----------
+ lam : float or array_like of floats
+ Expectation of interval, must be >= 0. A sequence of expectation
+ intervals must be broadcastable over the requested size.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``lam`` is a scalar. Otherwise,
+ ``np.array(lam).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Poisson distribution.
+
+ Notes
+ -----
+ The Poisson distribution
+
+ .. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!}
+
+ For events with an expected separation :math:`\\lambda` the Poisson
+ distribution :math:`f(k; \\lambda)` describes the probability of
+ :math:`k` events occurring within the observed
+ interval :math:`\\lambda`.
+
+ Because the output is limited to the range of the C int64 type, a
+ ValueError is raised when `lam` is within 10 sigma of the maximum
+ representable value.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Poisson Distribution."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/PoissonDistribution.html
+ .. [2] Wikipedia, "Poisson distribution",
+ https://en.wikipedia.org/wiki/Poisson_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> import numpy as np
+ >>> rng = np.random.default_rng()
+ >>> s = rng.poisson(5, 10000)
+
+ Display histogram of the sample:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 14, density=True)
+ >>> plt.show()
+
+ Draw each 100 values for lambda 100 and 500:
+
+ >>> s = rng.poisson(lam=(100., 500.), size=(100, 2))
+
+ """
+ return disc(&random_poisson, &self._bitgen, size, self.lock, 1, 0,
+ lam, 'lam', CONS_POISSON,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+
+ def zipf(self, a, size=None):
+ """
+ zipf(a, size=None)
+
+ Draw samples from a Zipf distribution.
+
+ Samples are drawn from a Zipf distribution with specified parameter
+ `a` > 1.
+
+ The Zipf distribution (also known as the zeta distribution) is a
+ continuous probability distribution that satisfies Zipf's law: the
+ frequency of an item is inversely proportional to its rank in a
+ frequency table.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Distribution parameter. Must be greater than 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` is a scalar. Otherwise,
+ ``np.array(a).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Zipf distribution.
+
+ See Also
+ --------
+ scipy.stats.zipf : probability density function, distribution, or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Zipf distribution is
+
+ .. math:: p(x) = \\frac{x^{-a}}{\\zeta(a)},
+
+ where :math:`\\zeta` is the Riemann Zeta function.
+
+ It is named for the American linguist George Kingsley Zipf, who noted
+ that the frequency of any word in a sample of a language is inversely
+ proportional to its rank in the frequency table.
+
+ References
+ ----------
+ .. [1] Zipf, G. K., "Selected Studies of the Principle of Relative
+ Frequency in Language," Cambridge, MA: Harvard Univ. Press,
+ 1932.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> a = 2. # parameter
+ >>> s = np.random.default_rng().zipf(a, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy import special # doctest: +SKIP
+
+ Truncate s values at 50 so plot is interesting:
+
+ >>> count, bins, ignored = plt.hist(s[s<50],
+ ... 50, density=True)
+ >>> x = np.arange(1., 50.)
+ >>> y = x**(-a) / special.zetac(a) # doctest: +SKIP
+ >>> plt.plot(x, y/max(y), linewidth=2, color='r') # doctest: +SKIP
+ >>> plt.show()
+
+ """
+ return disc(&random_zipf, &self._bitgen, size, self.lock, 1, 0,
+ a, 'a', CONS_GT_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+
+ def geometric(self, p, size=None):
+ """
+ geometric(p, size=None)
+
+ Draw samples from the geometric distribution.
+
+ Bernoulli trials are experiments with one of two outcomes:
+ success or failure (an example of such an experiment is flipping
+ a coin). The geometric distribution models the number of trials
+ that must be run in order to achieve success. It is therefore
+ supported on the positive integers, ``k = 1, 2, ...``.
+
+ The probability mass function of the geometric distribution is
+
+ .. math:: f(k) = (1 - p)^{k - 1} p
+
+ where `p` is the probability of success of an individual trial.
+
+ Parameters
+ ----------
+ p : float or array_like of floats
+ The probability of success of an individual trial.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``p`` is a scalar. Otherwise,
+ ``np.array(p).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized geometric distribution.
+
+ Examples
+ --------
+ Draw ten thousand values from the geometric distribution,
+ with the probability of an individual success equal to 0.35:
+
+ >>> z = np.random.default_rng().geometric(p=0.35, size=10000)
+
+ How many trials succeeded after a single run?
+
+ >>> (z == 1).sum() / 10000.
+ 0.34889999999999999 #random
+
+ """
+ return disc(&random_geometric, &self._bitgen, size, self.lock, 1, 0,
+ p, 'p', CONS_BOUNDED_GT_0_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+
+ def hypergeometric(self, ngood, nbad, nsample, size=None):
+ """
+ hypergeometric(ngood, nbad, nsample, size=None)
+
+ Draw samples from a Hypergeometric distribution.
+
+ Samples are drawn from a hypergeometric distribution with specified
+ parameters, `ngood` (ways to make a good selection), `nbad` (ways to make
+ a bad selection), and `nsample` (number of items sampled, which is less
+ than or equal to the sum ``ngood + nbad``).
+
+ Parameters
+ ----------
+ ngood : int or array_like of ints
+ Number of ways to make a good selection. Must be nonnegative and
+ less than 10**9.
+ nbad : int or array_like of ints
+ Number of ways to make a bad selection. Must be nonnegative and
+ less than 10**9.
+ nsample : int or array_like of ints
+ Number of items sampled. Must be nonnegative and less than
+ ``ngood + nbad``.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if `ngood`, `nbad`, and `nsample`
+ are all scalars. Otherwise, ``np.broadcast(ngood, nbad, nsample).size``
+ samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized hypergeometric distribution. Each
+ sample is the number of good items within a randomly selected subset of
+ size `nsample` taken from a set of `ngood` good items and `nbad` bad items.
+
+ See Also
+ --------
+ multivariate_hypergeometric : Draw samples from the multivariate
+ hypergeometric distribution.
+ scipy.stats.hypergeom : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Hypergeometric distribution is
+
+ .. math:: P(x) = \\frac{\\binom{g}{x}\\binom{b}{n-x}}{\\binom{g+b}{n}},
+
+ where :math:`0 \\le x \\le n` and :math:`n-b \\le x \\le g`
+
+ for P(x) the probability of ``x`` good results in the drawn sample,
+ g = `ngood`, b = `nbad`, and n = `nsample`.
+
+ Consider an urn with black and white marbles in it, `ngood` of them
+ are black and `nbad` are white. If you draw `nsample` balls without
+ replacement, then the hypergeometric distribution describes the
+ distribution of black balls in the drawn sample.
+
+ Note that this distribution is very similar to the binomial
+ distribution, except that in this case, samples are drawn without
+ replacement, whereas in the Binomial case samples are drawn with
+ replacement (or the sample space is infinite). As the sample space
+ becomes large, this distribution approaches the binomial.
+
+ The arguments `ngood` and `nbad` each must be less than `10**9`. For
+ extremely large arguments, the algorithm that is used to compute the
+ samples [4]_ breaks down because of loss of precision in floating point
+ calculations. For such large values, if `nsample` is not also large,
+ the distribution can be approximated with the binomial distribution,
+ `binomial(n=nsample, p=ngood/(ngood + nbad))`.
+
+ References
+ ----------
+ .. [1] Lentner, Marvin, "Elementary Applied Statistics", Bogden
+ and Quigley, 1972.
+ .. [2] Weisstein, Eric W. "Hypergeometric Distribution." From
+ MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/HypergeometricDistribution.html
+ .. [3] Wikipedia, "Hypergeometric distribution",
+ https://en.wikipedia.org/wiki/Hypergeometric_distribution
+ .. [4] Stadlober, Ernst, "The ratio of uniforms approach for generating
+ discrete random variates", Journal of Computational and Applied
+ Mathematics, 31, pp. 181-189 (1990).
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> rng = np.random.default_rng()
+ >>> ngood, nbad, nsamp = 100, 2, 10
+ # number of good, number of bad, and number of samples
+ >>> s = rng.hypergeometric(ngood, nbad, nsamp, 1000)
+ >>> from matplotlib.pyplot import hist
+ >>> hist(s)
+ # note that it is very unlikely to grab both bad items
+
+ Suppose you have an urn with 15 white and 15 black marbles.
+ If you pull 15 marbles at random, how likely is it that
+ 12 or more of them are one color?
+
+ >>> s = rng.hypergeometric(15, 15, 15, 100000)
+ >>> sum(s>=12)/100000. + sum(s<=3)/100000.
+ # answer = 0.003 ... pretty unlikely!
+
+ """
+ DEF HYPERGEOM_MAX = 10**9
+ cdef bint is_scalar = True
+ cdef np.ndarray ongood, onbad, onsample
+ cdef int64_t lngood, lnbad, lnsample
+
+ ongood = <np.ndarray>np.PyArray_FROM_OTF(ngood, np.NPY_INT64, np.NPY_ALIGNED)
+ onbad = <np.ndarray>np.PyArray_FROM_OTF(nbad, np.NPY_INT64, np.NPY_ALIGNED)
+ onsample = <np.ndarray>np.PyArray_FROM_OTF(nsample, np.NPY_INT64, np.NPY_ALIGNED)
+
+ if np.PyArray_NDIM(ongood) == np.PyArray_NDIM(onbad) == np.PyArray_NDIM(onsample) == 0:
+
+ lngood = <int64_t>ngood
+ lnbad = <int64_t>nbad
+ lnsample = <int64_t>nsample
+
+ if lngood >= HYPERGEOM_MAX or lnbad >= HYPERGEOM_MAX:
+ raise ValueError("both ngood and nbad must be less than %d" %
+ HYPERGEOM_MAX)
+ if lngood + lnbad < lnsample:
+ raise ValueError("ngood + nbad < nsample")
+ return disc(&random_hypergeometric, &self._bitgen, size, self.lock, 0, 3,
+ lngood, 'ngood', CONS_NON_NEGATIVE,
+ lnbad, 'nbad', CONS_NON_NEGATIVE,
+ lnsample, 'nsample', CONS_NON_NEGATIVE)
+
+ if np.any(ongood >= HYPERGEOM_MAX) or np.any(onbad >= HYPERGEOM_MAX):
+ raise ValueError("both ngood and nbad must be less than %d" %
+ HYPERGEOM_MAX)
+
+ if np.any(np.less(np.add(ongood, onbad), onsample)):
+ raise ValueError("ngood + nbad < nsample")
+
+ return discrete_broadcast_iii(&random_hypergeometric, &self._bitgen, size, self.lock,
+ ongood, 'ngood', CONS_NON_NEGATIVE,
+ onbad, 'nbad', CONS_NON_NEGATIVE,
+ onsample, 'nsample', CONS_NON_NEGATIVE)
+
+ def logseries(self, p, size=None):
+ """
+ logseries(p, size=None)
+
+ Draw samples from a logarithmic series distribution.
+
+ Samples are drawn from a log series distribution with specified
+ shape parameter, 0 < ``p`` < 1.
+
+ Parameters
+ ----------
+ p : float or array_like of floats
+ Shape parameter for the distribution. Must be in the range (0, 1).
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``p`` is a scalar. Otherwise,
+ ``np.array(p).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized logarithmic series distribution.
+
+ See Also
+ --------
+ scipy.stats.logser : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability mass function for the Log Series distribution is
+
+ .. math:: P(k) = \\frac{-p^k}{k \\ln(1-p)},
+
+ where p = probability.
+
+ The log series distribution is frequently used to represent species
+ richness and occurrence, first proposed by Fisher, Corbet, and
+ Williams in 1943 [2]. It may also be used to model the numbers of
+ occupants seen in cars [3].
+
+ References
+ ----------
+ .. [1] Buzas, Martin A.; Culver, Stephen J., Understanding regional
+ species diversity through the log series distribution of
+ occurrences: BIODIVERSITY RESEARCH Diversity & Distributions,
+ Volume 5, Number 5, September 1999 , pp. 187-195(9).
+ .. [2] Fisher, R.A,, A.S. Corbet, and C.B. Williams. 1943. The
+ relation between the number of species and the number of
+ individuals in a random sample of an animal population.
+ Journal of Animal Ecology, 12:42-58.
+ .. [3] D. J. Hand, F. Daly, D. Lunn, E. Ostrowski, A Handbook of Small
+ Data Sets, CRC Press, 1994.
+ .. [4] Wikipedia, "Logarithmic distribution",
+ https://en.wikipedia.org/wiki/Logarithmic_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> a = .6
+ >>> s = np.random.default_rng().logseries(a, 10000)
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s)
+
+ # plot against distribution
+
+ >>> def logseries(k, p):
+ ... return -p**k/(k*np.log(1-p))
+ >>> plt.plot(bins, logseries(bins, a) * count.max()/
+ ... logseries(bins, a).max(), 'r')
+ >>> plt.show()
+
+ """
+ return disc(&random_logseries, &self._bitgen, size, self.lock, 1, 0,
+ p, 'p', CONS_BOUNDED_0_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+
+ # Multivariate distributions:
+ def multivariate_normal(self, mean, cov, size=None, check_valid='warn',
+ tol=1e-8, *, method='svd'):
+ """
+ multivariate_normal(mean, cov, size=None, check_valid='warn', tol=1e-8)
+
+ Draw random samples from a multivariate normal distribution.
+
+ The multivariate normal, multinormal or Gaussian distribution is a
+ generalization of the one-dimensional normal distribution to higher
+ dimensions. Such a distribution is specified by its mean and
+ covariance matrix. These parameters are analogous to the mean
+ (average or "center") and variance (standard deviation, or "width,"
+ squared) of the one-dimensional normal distribution.
+
+ Parameters
+ ----------
+ mean : 1-D array_like, of length N
+ Mean of the N-dimensional distribution.
+ cov : 2-D array_like, of shape (N, N)
+ Covariance matrix of the distribution. It must be symmetric and
+ positive-semidefinite for proper sampling.
+ size : int or tuple of ints, optional
+ Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are
+ generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because
+ each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.
+ If no shape is specified, a single (`N`-D) sample is returned.
+ check_valid : { 'warn', 'raise', 'ignore' }, optional
+ Behavior when the covariance matrix is not positive semidefinite.
+ tol : float, optional
+ Tolerance when checking the singular values in covariance matrix.
+ cov is cast to double before the check.
+ method : { 'svd', 'eigh', 'cholesky'}, optional
+ The cov input is used to compute a factor matrix A such that
+ ``A @ A.T = cov``. This argument is used to select the method
+ used to compute the factor matrix A. The default method 'svd' is
+ the slowest, while 'cholesky' is the fastest but less robust than
+ the slowest method. The method `eigh` uses eigen decomposition to
+ compute A and is faster than svd but slower than cholesky.
+
+ .. versionadded:: 1.18.0
+
+ Returns
+ -------
+ out : ndarray
+ The drawn samples, of shape *size*, if that was provided. If not,
+ the shape is ``(N,)``.
+
+ In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+ value drawn from the distribution.
+
+ Notes
+ -----
+ The mean is a coordinate in N-dimensional space, which represents the
+ location where samples are most likely to be generated. This is
+ analogous to the peak of the bell curve for the one-dimensional or
+ univariate normal distribution.
+
+ Covariance indicates the level to which two variables vary together.
+ From the multivariate normal distribution, we draw N-dimensional
+ samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix
+ element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`.
+ The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its
+ "spread").
+
+ Instead of specifying the full covariance matrix, popular
+ approximations include:
+
+ - Spherical covariance (`cov` is a multiple of the identity matrix)
+ - Diagonal covariance (`cov` has non-negative elements, and only on
+ the diagonal)
+
+ This geometrical property can be seen in two dimensions by plotting
+ generated data-points:
+
+ >>> mean = [0, 0]
+ >>> cov = [[1, 0], [0, 100]] # diagonal covariance
+
+ Diagonal covariance means that points are oriented along x or y-axis:
+
+ >>> import matplotlib.pyplot as plt
+ >>> x, y = np.random.default_rng().multivariate_normal(mean, cov, 5000).T
+ >>> plt.plot(x, y, 'x')
+ >>> plt.axis('equal')
+ >>> plt.show()
+
+ Note that the covariance matrix must be positive semidefinite (a.k.a.
+ nonnegative-definite). Otherwise, the behavior of this method is
+ undefined and backwards compatibility is not guaranteed.
+
+ References
+ ----------
+ .. [1] Papoulis, A., "Probability, Random Variables, and Stochastic
+ Processes," 3rd ed., New York: McGraw-Hill, 1991.
+ .. [2] Duda, R. O., Hart, P. E., and Stork, D. G., "Pattern
+ Classification," 2nd ed., New York: Wiley, 2001.
+
+ Examples
+ --------
+ >>> mean = (1, 2)
+ >>> cov = [[1, 0], [0, 1]]
+ >>> rng = np.random.default_rng()
+ >>> x = rng.multivariate_normal(mean, cov, (3, 3))
+ >>> x.shape
+ (3, 3, 2)
+
+ We can use a different method other than the default to factorize cov:
+ >>> y = rng.multivariate_normal(mean, cov, (3, 3), method='cholesky')
+ >>> y.shape
+ (3, 3, 2)
+
+ The following is probably true, given that 0.6 is roughly twice the
+ standard deviation:
+
+ >>> list((x[0,0,:] - mean) < 0.6)
+ [True, True] # random
+
+ """
+ if method not in {'eigh', 'svd', 'cholesky'}:
+ raise ValueError(
+ "method must be one of {'eigh', 'svd', 'cholesky'}")
+
+ # Check preconditions on arguments
+ mean = np.array(mean)
+ cov = np.array(cov)
+ if size is None:
+ shape = []
+ elif isinstance(size, (int, long, np.integer)):
+ shape = [size]
+ else:
+ shape = size
+
+ if len(mean.shape) != 1:
+ raise ValueError("mean must be 1 dimensional")
+ if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
+ raise ValueError("cov must be 2 dimensional and square")
+ if mean.shape[0] != cov.shape[0]:
+ raise ValueError("mean and cov must have same length")
+
+ # Compute shape of output and create a matrix of independent
+ # standard normally distributed random numbers. The matrix has rows
+ # with the same length as mean and as many rows are necessary to
+ # form a matrix of shape final_shape.
+ final_shape = list(shape[:])
+ final_shape.append(mean.shape[0])
+ x = self.standard_normal(final_shape).reshape(-1, mean.shape[0])
+
+ # Transform matrix of standard normals into matrix where each row
+ # contains multivariate normals with the desired covariance.
+ # Compute A such that dot(transpose(A),A) == cov.
+ # Then the matrix products of the rows of x and A has the desired
+ # covariance. Note that sqrt(s)*v where (u,s,v) is the singular value
+ # decomposition of cov is such an A.
+ #
+ # Also check that cov is positive-semidefinite. If so, the u.T and v
+ # matrices should be equal up to roundoff error if cov is
+ # symmetric and the singular value of the corresponding row is
+ # not zero. We continue to use the SVD rather than Cholesky in
+ # order to preserve current outputs. Note that symmetry has not
+ # been checked.
+
+ # GH10839, ensure double to make tol meaningful
+ cov = cov.astype(np.double)
+ if method == 'svd':
+ from numpy.dual import svd
+ (u, s, vh) = svd(cov)
+ elif method == 'eigh':
+ from numpy.dual import eigh
+ # could call linalg.svd(hermitian=True), but that calculates a vh we don't need
+ (s, u) = eigh(cov)
+ else:
+ from numpy.dual import cholesky
+ l = cholesky(cov)
+
+ # make sure check_valid is ignored whe method == 'cholesky'
+ # since the decomposition will have failed if cov is not valid.
+ if check_valid != 'ignore' and method != 'cholesky':
+ if check_valid != 'warn' and check_valid != 'raise':
+ raise ValueError(
+ "check_valid must equal 'warn', 'raise', or 'ignore'")
+ if method == 'svd':
+ psd = np.allclose(np.dot(vh.T * s, vh), cov, rtol=tol, atol=tol)
+ else:
+ psd = not np.any(s < -tol)
+ if not psd:
+ if check_valid == 'warn':
+ warnings.warn("covariance is not positive-semidefinite.",
+ RuntimeWarning)
+ else:
+ raise ValueError("covariance is not positive-semidefinite.")
+
+ if method == 'cholesky':
+ _factor = l
+ elif method == 'eigh':
+ # if check_valid == 'ignore' we need to ensure that np.sqrt does not
+ # return a NaN if s is a very small negative number that is
+ # approximately zero or when the covariance is not positive-semidefinite
+ _factor = u * np.sqrt(abs(s))
+ else:
+ _factor = np.sqrt(s)[:, None] * vh
+
+ x = np.dot(x, _factor)
+ x += mean
+ x.shape = tuple(final_shape)
+ return x
+
+ def multinomial(self, object n, object pvals, size=None):
+ """
+ multinomial(n, pvals, size=None)
+
+ Draw samples from a multinomial distribution.
+
+ The multinomial distribution is a multivariate generalization of the
+ binomial distribution. Take an experiment with one of ``p``
+ possible outcomes. An example of such an experiment is throwing a dice,
+ where the outcome can be 1 through 6. Each sample drawn from the
+ distribution represents `n` such experiments. Its values,
+ ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the
+ outcome was ``i``.
+
+ Parameters
+ ----------
+ n : int or array-like of ints
+ Number of experiments.
+ pvals : sequence of floats, length p
+ Probabilities of each of the ``p`` different outcomes. These
+ must sum to 1 (however, the last element is always assumed to
+ account for the remaining probability, as long as
+ ``sum(pvals[:-1]) <= 1)``.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ out : ndarray
+ The drawn samples, of shape *size*, if that was provided. If not,
+ the shape is ``(N,)``.
+
+ In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+ value drawn from the distribution.
+
+ Examples
+ --------
+ Throw a dice 20 times:
+
+ >>> rng = np.random.default_rng()
+ >>> rng.multinomial(20, [1/6.]*6, size=1)
+ array([[4, 1, 7, 5, 2, 1]]) # random
+
+ It landed 4 times on 1, once on 2, etc.
+
+ Now, throw the dice 20 times, and 20 times again:
+
+ >>> rng.multinomial(20, [1/6.]*6, size=2)
+ array([[3, 4, 3, 3, 4, 3],
+ [2, 4, 3, 4, 0, 7]]) # random
+
+ For the first run, we threw 3 times 1, 4 times 2, etc. For the second,
+ we threw 2 times 1, 4 times 2, etc.
+
+ Now, do one experiment throwing the dice 10 time, and 10 times again,
+ and another throwing the dice 20 times, and 20 times again:
+
+ >>> rng.multinomial([[10], [20]], [1/6.]*6, size=2)
+ array([[[2, 4, 0, 1, 2, 1],
+ [1, 3, 0, 3, 1, 2]],
+ [[1, 4, 4, 4, 4, 3],
+ [3, 3, 2, 5, 5, 2]]]) # random
+
+ The first array shows the outcomes of throwing the dice 10 times, and
+ the second shows the outcomes from throwing the dice 20 times.
+
+ A loaded die is more likely to land on number 6:
+
+ >>> rng.multinomial(100, [1/7.]*5 + [2/7.])
+ array([11, 16, 14, 17, 16, 26]) # random
+
+ The probability inputs should be normalized. As an implementation
+ detail, the value of the last entry is ignored and assumed to take
+ up any leftover probability mass, but this should not be relied on.
+ A biased coin which has twice as much weight on one side as on the
+ other should be sampled like so:
+
+ >>> rng.multinomial(100, [1.0 / 3, 2.0 / 3]) # RIGHT
+ array([38, 62]) # random
+
+ not like:
+
+ >>> rng.multinomial(100, [1.0, 2.0]) # WRONG
+ Traceback (most recent call last):
+ ValueError: pvals < 0, pvals > 1 or pvals contains NaNs
+
+ """
+
+ cdef np.npy_intp d, i, sz, offset
+ cdef np.ndarray parr, mnarr, on, temp_arr
+ cdef double *pix
+ cdef int64_t *mnix
+ cdef int64_t ni
+ cdef np.broadcast it
+
+ d = len(pvals)
+ on = <np.ndarray>np.PyArray_FROM_OTF(n, np.NPY_INT64, np.NPY_ALIGNED)
+ parr = <np.ndarray>np.PyArray_FROM_OTF(
+ pvals, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ pix = <double*>np.PyArray_DATA(parr)
+ check_array_constraint(parr, 'pvals', CONS_BOUNDED_0_1)
+ if kahan_sum(pix, d-1) > (1.0 + 1e-12):
+ raise ValueError("sum(pvals[:-1]) > 1.0")
+
+ if np.PyArray_NDIM(on) != 0: # vector
+ check_array_constraint(on, 'n', CONS_NON_NEGATIVE)
+ if size is None:
+ it = np.PyArray_MultiIterNew1(on)
+ else:
+ temp = np.empty(size, dtype=np.int8)
+ temp_arr = <np.ndarray>temp
+ it = np.PyArray_MultiIterNew2(on, temp_arr)
+ shape = it.shape + (d,)
+ multin = np.zeros(shape, dtype=np.int64)
+ mnarr = <np.ndarray>multin
+ mnix = <int64_t*>np.PyArray_DATA(mnarr)
+ offset = 0
+ sz = it.size
+ with self.lock, nogil:
+ for i in range(sz):
+ ni = (<int64_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
+ random_multinomial(&self._bitgen, ni, &mnix[offset], pix, d, &self._binomial)
+ offset += d
+ np.PyArray_MultiIter_NEXT(it)
+ return multin
+
+ if size is None:
+ shape = (d,)
+ else:
+ try:
+ shape = (operator.index(size), d)
+ except:
+ shape = tuple(size) + (d,)
+
+ multin = np.zeros(shape, dtype=np.int64)
+ mnarr = <np.ndarray>multin
+ mnix = <int64_t*>np.PyArray_DATA(mnarr)
+ sz = np.PyArray_SIZE(mnarr)
+ ni = n
+ check_constraint(ni, 'n', CONS_NON_NEGATIVE)
+ offset = 0
+ with self.lock, nogil:
+ for i in range(sz // d):
+ random_multinomial(&self._bitgen, ni, &mnix[offset], pix, d, &self._binomial)
+ offset += d
+
+ return multin
+
+ def multivariate_hypergeometric(self, object colors, object nsample,
+ size=None, method='marginals'):
+ """
+ multivariate_hypergeometric(colors, nsample, size=None,
+ method='marginals')
+
+ Generate variates from a multivariate hypergeometric distribution.
+
+ The multivariate hypergeometric distribution is a generalization
+ of the hypergeometric distribution.
+
+ Choose ``nsample`` items at random without replacement from a
+ collection with ``N`` distinct types. ``N`` is the length of
+ ``colors``, and the values in ``colors`` are the number of occurrences
+ of that type in the collection. The total number of items in the
+ collection is ``sum(colors)``. Each random variate generated by this
+ function is a vector of length ``N`` holding the counts of the
+ different types that occurred in the ``nsample`` items.
+
+ The name ``colors`` comes from a common description of the
+ distribution: it is the probability distribution of the number of
+ marbles of each color selected without replacement from an urn
+ containing marbles of different colors; ``colors[i]`` is the number
+ of marbles in the urn with color ``i``.
+
+ Parameters
+ ----------
+ colors : sequence of integers
+ The number of each type of item in the collection from which
+ a sample is drawn. The values in ``colors`` must be nonnegative.
+ To avoid loss of precision in the algorithm, ``sum(colors)``
+ must be less than ``10**9`` when `method` is "marginals".
+ nsample : int
+ The number of items selected. ``nsample`` must not be greater
+ than ``sum(colors)``.
+ size : int or tuple of ints, optional
+ The number of variates to generate, either an integer or a tuple
+ holding the shape of the array of variates. If the given size is,
+ e.g., ``(k, m)``, then ``k * m`` variates are drawn, where one
+ variate is a vector of length ``len(colors)``, and the return value
+ has shape ``(k, m, len(colors))``. If `size` is an integer, the
+ output has shape ``(size, len(colors))``. Default is None, in
+ which case a single variate is returned as an array with shape
+ ``(len(colors),)``.
+ method : string, optional
+ Specify the algorithm that is used to generate the variates.
+ Must be 'count' or 'marginals' (the default). See the Notes
+ for a description of the methods.
+
+ Returns
+ -------
+ variates : ndarray
+ Array of variates drawn from the multivariate hypergeometric
+ distribution.
+
+ See Also
+ --------
+ hypergeometric : Draw samples from the (univariate) hypergeometric
+ distribution.
+
+ Notes
+ -----
+ The two methods do not return the same sequence of variates.
+
+ The "count" algorithm is roughly equivalent to the following numpy
+ code::
+
+ choices = np.repeat(np.arange(len(colors)), colors)
+ selection = np.random.choice(choices, nsample, replace=False)
+ variate = np.bincount(selection, minlength=len(colors))
+
+ The "count" algorithm uses a temporary array of integers with length
+ ``sum(colors)``.
+
+ The "marginals" algorithm generates a variate by using repeated
+ calls to the univariate hypergeometric sampler. It is roughly
+ equivalent to::
+
+ variate = np.zeros(len(colors), dtype=np.int64)
+ # `remaining` is the cumulative sum of `colors` from the last
+ # element to the first; e.g. if `colors` is [3, 1, 5], then
+ # `remaining` is [9, 6, 5].
+ remaining = np.cumsum(colors[::-1])[::-1]
+ for i in range(len(colors)-1):
+ if nsample < 1:
+ break
+ variate[i] = hypergeometric(colors[i], remaining[i+1],
+ nsample)
+ nsample -= variate[i]
+ variate[-1] = nsample
+
+ The default method is "marginals". For some cases (e.g. when
+ `colors` contains relatively small integers), the "count" method
+ can be significantly faster than the "marginals" method. If
+ performance of the algorithm is important, test the two methods
+ with typical inputs to decide which works best.
+
+ .. versionadded:: 1.18.0
+
+ Examples
+ --------
+ >>> colors = [16, 8, 4]
+ >>> seed = 4861946401452
+ >>> gen = np.random.Generator(np.random.PCG64(seed))
+ >>> gen.multivariate_hypergeometric(colors, 6)
+ array([5, 0, 1])
+ >>> gen.multivariate_hypergeometric(colors, 6, size=3)
+ array([[5, 0, 1],
+ [2, 2, 2],
+ [3, 3, 0]])
+ >>> gen.multivariate_hypergeometric(colors, 6, size=(2, 2))
+ array([[[3, 2, 1],
+ [3, 2, 1]],
+ [[4, 1, 1],
+ [3, 2, 1]]])
+ """
+ cdef int64_t nsamp
+ cdef size_t num_colors
+ cdef int64_t total
+ cdef int64_t *colors_ptr
+ cdef int64_t max_index
+ cdef size_t num_variates
+ cdef int64_t *variates_ptr
+ cdef int result
+
+ if method not in ['count', 'marginals']:
+ raise ValueError('method must be "count" or "marginals".')
+
+ try:
+ operator.index(nsample)
+ except TypeError:
+ raise ValueError('nsample must be an integer')
+
+ if nsample < 0:
+ raise ValueError("nsample must be nonnegative.")
+ if nsample > INT64_MAX:
+ raise ValueError("nsample must not exceed %d" % INT64_MAX)
+ nsamp = nsample
+
+ # Validation of colors, a 1-d sequence of nonnegative integers.
+ invalid_colors = False
+ try:
+ colors = np.asarray(colors)
+ if colors.ndim != 1:
+ invalid_colors = True
+ elif colors.size > 0 and not np.issubdtype(colors.dtype,
+ np.integer):
+ invalid_colors = True
+ elif np.any((colors < 0) | (colors > INT64_MAX)):
+ invalid_colors = True
+ except ValueError:
+ invalid_colors = True
+ if invalid_colors:
+ raise ValueError('colors must be a one-dimensional sequence '
+ 'of nonnegative integers not exceeding %d.' %
+ INT64_MAX)
+
+ colors = np.ascontiguousarray(colors, dtype=np.int64)
+ num_colors = colors.size
+
+ colors_ptr = <int64_t *> np.PyArray_DATA(colors)
+
+ total = _safe_sum_nonneg_int64(num_colors, colors_ptr)
+ if total == -1:
+ raise ValueError("sum(colors) must not exceed the maximum value "
+ "of a 64 bit signed integer (%d)" % INT64_MAX)
+
+ if method == 'marginals' and total >= 1000000000:
+ raise ValueError('When method is "marginals", sum(colors) must '
+ 'be less than 1000000000.')
+
+ # The C code that implements the 'count' method will malloc an
+ # array of size total*sizeof(size_t). Here we ensure that that
+ # product does not overflow.
+ if SIZE_MAX > <uint64_t>INT64_MAX:
+ max_index = INT64_MAX // sizeof(size_t)
+ else:
+ max_index = SIZE_MAX // sizeof(size_t)
+ if method == 'count' and total > max_index:
+ raise ValueError("When method is 'count', sum(colors) must not "
+ "exceed %d" % max_index)
+ if nsamp > total:
+ raise ValueError("nsample > sum(colors)")
+
+ # Figure out the shape of the return array.
+ if size is None:
+ shape = (num_colors,)
+ elif np.isscalar(size):
+ shape = (size, num_colors)
+ else:
+ shape = tuple(size) + (num_colors,)
+ variates = np.zeros(shape, dtype=np.int64)
+
+ if num_colors == 0:
+ return variates
+
+ # One variate is a vector of length num_colors.
+ num_variates = variates.size // num_colors
+ variates_ptr = <int64_t *> np.PyArray_DATA(variates)
+
+ if method == 'count':
+ with self.lock, nogil:
+ result = random_multivariate_hypergeometric_count(&self._bitgen,
+ total, num_colors, colors_ptr, nsamp,
+ num_variates, variates_ptr)
+ if result == -1:
+ raise MemoryError("Insufficent memory for multivariate_"
+ "hypergeometric with method='count' and "
+ "sum(colors)=%d" % total)
+ else:
+ with self.lock, nogil:
+ random_multivariate_hypergeometric_marginals(&self._bitgen,
+ total, num_colors, colors_ptr, nsamp,
+ num_variates, variates_ptr)
+ return variates
+
+ def dirichlet(self, object alpha, size=None):
+ """
+ dirichlet(alpha, size=None)
+
+ Draw samples from the Dirichlet distribution.
+
+ Draw `size` samples of dimension k from a Dirichlet distribution. A
+ Dirichlet-distributed random variable can be seen as a multivariate
+ generalization of a Beta distribution. The Dirichlet distribution
+ is a conjugate prior of a multinomial distribution in Bayesian
+ inference.
+
+ Parameters
+ ----------
+ alpha : array
+ Parameter of the distribution (k dimension for sample of
+ dimension k).
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ samples : ndarray,
+ The drawn samples, of shape (size, alpha.ndim).
+
+ Raises
+ -------
+ ValueError
+ If any value in alpha is less than or equal to zero
+
+ Notes
+ -----
+ The Dirichlet distribution is a distribution over vectors
+ :math:`x` that fulfil the conditions :math:`x_i>0` and
+ :math:`\\sum_{i=1}^k x_i = 1`.
+
+ The probability density function :math:`p` of a
+ Dirichlet-distributed random vector :math:`X` is
+ proportional to
+
+ .. math:: p(x) \\propto \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i},
+
+ where :math:`\\alpha` is a vector containing the positive
+ concentration parameters.
+
+ The method uses the following property for computation: let :math:`Y`
+ be a random vector which has components that follow a standard gamma
+ distribution, then :math:`X = \\frac{1}{\\sum_{i=1}^k{Y_i}} Y`
+ is Dirichlet-distributed
+
+ References
+ ----------
+ .. [1] David McKay, "Information Theory, Inference and Learning
+ Algorithms," chapter 23,
+ http://www.inference.org.uk/mackay/itila/
+ .. [2] Wikipedia, "Dirichlet distribution",
+ https://en.wikipedia.org/wiki/Dirichlet_distribution
+
+ Examples
+ --------
+ Taking an example cited in Wikipedia, this distribution can be used if
+ one wanted to cut strings (each of initial length 1.0) into K pieces
+ with different lengths, where each piece had, on average, a designated
+ average length, but allowing some variation in the relative sizes of
+ the pieces.
+
+ >>> s = np.random.default_rng().dirichlet((10, 5, 3), 20).transpose()
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.barh(range(20), s[0])
+ >>> plt.barh(range(20), s[1], left=s[0], color='g')
+ >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')
+ >>> plt.title("Lengths of Strings")
+
+ """
+
+ # =================
+ # Pure python algo
+ # =================
+ # alpha = N.atleast_1d(alpha)
+ # k = alpha.size
+
+ # if n == 1:
+ # val = N.zeros(k)
+ # for i in range(k):
+ # val[i] = sgamma(alpha[i], n)
+ # val /= N.sum(val)
+ # else:
+ # val = N.zeros((k, n))
+ # for i in range(k):
+ # val[i] = sgamma(alpha[i], n)
+ # val /= N.sum(val, axis = 0)
+ # val = val.T
+ # return val
+
+ cdef np.npy_intp k, totsize, i, j
+ cdef np.ndarray alpha_arr, val_arr
+ cdef double *alpha_data
+ cdef double *val_data
+ cdef double acc, invacc
+
+ k = len(alpha)
+ alpha_arr = <np.ndarray>np.PyArray_FROM_OTF(
+ alpha, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ if np.any(np.less_equal(alpha_arr, 0)):
+ raise ValueError('alpha <= 0')
+ alpha_data = <double*>np.PyArray_DATA(alpha_arr)
+
+ if size is None:
+ shape = (k,)
+ else:
+ try:
+ shape = (operator.index(size), k)
+ except:
+ shape = tuple(size) + (k,)
+
+ diric = np.zeros(shape, np.float64)
+ val_arr = <np.ndarray>diric
+ val_data= <double*>np.PyArray_DATA(val_arr)
+
+ i = 0
+ totsize = np.PyArray_SIZE(val_arr)
+ with self.lock, nogil:
+ while i < totsize:
+ acc = 0.0
+ for j in range(k):
+ val_data[i+j] = random_standard_gamma(&self._bitgen,
+ alpha_data[j])
+ acc = acc + val_data[i + j]
+ invacc = 1/acc
+ for j in range(k):
+ val_data[i + j] = val_data[i + j] * invacc
+ i = i + k
+
+ return diric
+
+ # Shuffling and permutations:
+ def shuffle(self, object x, axis=0):
+ """
+ shuffle(x, axis=0)
+
+ Modify a sequence in-place by shuffling its contents.
+
+ The order of sub-arrays is changed but their contents remains the same.
+
+ Parameters
+ ----------
+ x : array_like
+ The array or list to be shuffled.
+ axis : int, optional
+ The axis which `x` is shuffled along. Default is 0.
+ It is only supported on `ndarray` objects.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ >>> rng = np.random.default_rng()
+ >>> arr = np.arange(10)
+ >>> rng.shuffle(arr)
+ >>> arr
+ [1 7 5 2 9 4 3 6 0 8] # random
+
+ >>> arr = np.arange(9).reshape((3, 3))
+ >>> rng.shuffle(arr)
+ >>> arr
+ array([[3, 4, 5], # random
+ [6, 7, 8],
+ [0, 1, 2]])
+
+ >>> arr = np.arange(9).reshape((3, 3))
+ >>> rng.shuffle(arr, axis=1)
+ >>> arr
+ array([[2, 0, 1], # random
+ [5, 3, 4],
+ [8, 6, 7]])
+ """
+ cdef:
+ np.npy_intp i, j, n = len(x), stride, itemsize
+ char* x_ptr
+ char* buf_ptr
+
+ axis = normalize_axis_index(axis, np.ndim(x))
+
+ if type(x) is np.ndarray and x.ndim == 1 and x.size:
+ # Fast, statically typed path: shuffle the underlying buffer.
+ # Only for non-empty, 1d objects of class ndarray (subclasses such
+ # as MaskedArrays may not support this approach).
+ x_ptr = <char*><size_t>np.PyArray_DATA(x)
+ stride = x.strides[0]
+ itemsize = x.dtype.itemsize
+ # As the array x could contain python objects we use a buffer
+ # of bytes for the swaps to avoid leaving one of the objects
+ # within the buffer and erroneously decrementing it's refcount
+ # when the function exits.
+ buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
+ buf_ptr = <char*><size_t>np.PyArray_DATA(buf)
+ with self.lock:
+ # We trick gcc into providing a specialized implementation for
+ # the most common case, yielding a ~33% performance improvement.
+ # Note that apparently, only one branch can ever be specialized.
+ if itemsize == sizeof(np.npy_intp):
+ self._shuffle_raw(n, 1, sizeof(np.npy_intp), stride, x_ptr, buf_ptr)
+ else:
+ self._shuffle_raw(n, 1, itemsize, stride, x_ptr, buf_ptr)
+ elif isinstance(x, np.ndarray) and x.ndim and x.size:
+ x = np.swapaxes(x, 0, axis)
+ buf = np.empty_like(x[0, ...])
+ with self.lock:
+ for i in reversed(range(1, len(x))):
+ j = random_interval(&self._bitgen, i)
+ if i == j:
+ # i == j is not needed and memcpy is undefined.
+ continue
+ buf[...] = x[j]
+ x[j] = x[i]
+ x[i] = buf
+ else:
+ # Untyped path.
+ if axis != 0:
+ raise NotImplementedError("Axis argument is only supported "
+ "on ndarray objects")
+ with self.lock:
+ for i in reversed(range(1, n)):
+ j = random_interval(&self._bitgen, i)
+ x[i], x[j] = x[j], x[i]
+
+ cdef inline _shuffle_raw(self, np.npy_intp n, np.npy_intp first,
+ np.npy_intp itemsize, np.npy_intp stride,
+ char* data, char* buf):
+ """
+ Parameters
+ ----------
+ n
+ Number of elements in data
+ first
+ First observation to shuffle. Shuffles n-1,
+ n-2, ..., first, so that when first=1 the entire
+ array is shuffled
+ itemsize
+ Size in bytes of item
+ stride
+ Array stride
+ data
+ Location of data
+ buf
+ Location of buffer (itemsize)
+ """
+ cdef np.npy_intp i, j
+ for i in reversed(range(first, n)):
+ j = random_interval(&self._bitgen, i)
+ string.memcpy(buf, data + j * stride, itemsize)
+ string.memcpy(data + j * stride, data + i * stride, itemsize)
+ string.memcpy(data + i * stride, buf, itemsize)
+
+ cdef inline void _shuffle_int(self, np.npy_intp n, np.npy_intp first,
+ int64_t* data) nogil:
+ """
+ Parameters
+ ----------
+ n
+ Number of elements in data
+ first
+ First observation to shuffle. Shuffles n-1,
+ n-2, ..., first, so that when first=1 the entire
+ array is shuffled
+ data
+ Location of data
+ """
+ cdef np.npy_intp i, j
+ cdef int64_t temp
+ for i in reversed(range(first, n)):
+ j = random_bounded_uint64(&self._bitgen, 0, i, 0, 0)
+ temp = data[j]
+ data[j] = data[i]
+ data[i] = temp
+
+ def permutation(self, object x, axis=0):
+ """
+ permutation(x, axis=0)
+
+ Randomly permute a sequence, or return a permuted range.
+
+ Parameters
+ ----------
+ x : int or array_like
+ If `x` is an integer, randomly permute ``np.arange(x)``.
+ If `x` is an array, make a copy and shuffle the elements
+ randomly.
+ axis : int, optional
+ The axis which `x` is shuffled along. Default is 0.
+
+ Returns
+ -------
+ out : ndarray
+ Permuted sequence or array range.
+
+ Examples
+ --------
+ >>> rng = np.random.default_rng()
+ >>> rng.permutation(10)
+ array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6]) # random
+
+ >>> rng.permutation([1, 4, 9, 12, 15])
+ array([15, 1, 9, 4, 12]) # random
+
+ >>> arr = np.arange(9).reshape((3, 3))
+ >>> rng.permutation(arr)
+ array([[6, 7, 8], # random
+ [0, 1, 2],
+ [3, 4, 5]])
+
+ >>> rng.permutation("abc")
+ Traceback (most recent call last):
+ ...
+ numpy.AxisError: x must be an integer or at least 1-dimensional
+
+ >>> arr = np.arange(9).reshape((3, 3))
+ >>> rng.permutation(arr, axis=1)
+ array([[0, 2, 1], # random
+ [3, 5, 4],
+ [6, 8, 7]])
+
+ """
+ if isinstance(x, (int, np.integer)):
+ arr = np.arange(x)
+ self.shuffle(arr)
+ return arr
+
+ arr = np.asarray(x)
+
+ axis = normalize_axis_index(axis, arr.ndim)
+
+ # shuffle has fast-path for 1-d
+ if arr.ndim == 1:
+ # Return a copy if same memory
+ if np.may_share_memory(arr, x):
+ arr = np.array(arr)
+ self.shuffle(arr)
+ return arr
+
+ # Shuffle index array, dtype to ensure fast path
+ idx = np.arange(arr.shape[axis], dtype=np.intp)
+ self.shuffle(idx)
+ slices = [slice(None)]*arr.ndim
+ slices[axis] = idx
+ return arr[tuple(slices)]
+
+
+def default_rng(seed=None):
+ """Construct a new Generator with the default BitGenerator (PCG64).
+
+ Parameters
+ ----------
+ seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
+ A seed to initialize the `BitGenerator`. If None, then fresh,
+ unpredictable entropy will be pulled from the OS. If an ``int`` or
+ ``array_like[ints]`` is passed, then it will be passed to
+ `SeedSequence` to derive the initial `BitGenerator` state. One may also
+ pass in a`SeedSequence` instance
+ Additionally, when passed a `BitGenerator`, it will be wrapped by
+ `Generator`. If passed a `Generator`, it will be returned unaltered.
+
+ Returns
+ -------
+ Generator
+ The initialized generator object.
+
+ Notes
+ -----
+ If ``seed`` is not a `BitGenerator` or a `Generator`, a new `BitGenerator`
+ is instantiated. This function does not manage a default global instance.
+ """
+ if _check_bit_generator(seed):
+ # We were passed a BitGenerator, so just wrap it up.
+ return Generator(seed)
+ elif isinstance(seed, Generator):
+ # Pass through a Generator.
+ return seed
+ # Otherwise we need to instantiate a new BitGenerator and Generator as
+ # normal.
+ return Generator(PCG64(seed))
--- /dev/null
+import operator
+
+import numpy as np
+cimport numpy as np
+
+from libc.stdint cimport uint32_t, uint64_t
+from numpy.random cimport BitGenerator, SeedSequence
+
+__all__ = ['MT19937']
+
+np.import_array()
+
+cdef extern from "src/mt19937/mt19937.h":
+
+ struct s_mt19937_state:
+ uint32_t key[624]
+ int pos
+
+ ctypedef s_mt19937_state mt19937_state
+
+ uint64_t mt19937_next64(mt19937_state *state) nogil
+ uint32_t mt19937_next32(mt19937_state *state) nogil
+ double mt19937_next_double(mt19937_state *state) nogil
+ void mt19937_init_by_array(mt19937_state *state, uint32_t *init_key, int key_length)
+ void mt19937_seed(mt19937_state *state, uint32_t seed)
+ void mt19937_jump(mt19937_state *state)
+
+ enum:
+ RK_STATE_LEN
+
+cdef uint64_t mt19937_uint64(void *st) nogil:
+ return mt19937_next64(<mt19937_state *> st)
+
+cdef uint32_t mt19937_uint32(void *st) nogil:
+ return mt19937_next32(<mt19937_state *> st)
+
+cdef double mt19937_double(void *st) nogil:
+ return mt19937_next_double(<mt19937_state *> st)
+
+cdef uint64_t mt19937_raw(void *st) nogil:
+ return <uint64_t>mt19937_next32(<mt19937_state *> st)
+
+cdef class MT19937(BitGenerator):
+ """
+ MT19937(seed=None)
+
+ Container for the Mersenne Twister pseudo-random number generator.
+
+ Parameters
+ ----------
+ seed : {None, int, array_like[ints], SeedSequence}, optional
+ A seed to initialize the `BitGenerator`. If None, then fresh,
+ unpredictable entropy will be pulled from the OS. If an ``int`` or
+ ``array_like[ints]`` is passed, then it will be passed to
+ `SeedSequence` to derive the initial `BitGenerator` state. One may also
+ pass in a `SeedSequence` instance.
+
+ Attributes
+ ----------
+ lock: threading.Lock
+ Lock instance that is shared so that the same bit git generator can
+ be used in multiple Generators without corrupting the state. Code that
+ generates values from a bit generator should hold the bit generator's
+ lock.
+
+ Notes
+ -----
+ ``MT19937`` provides a capsule containing function pointers that produce
+ doubles, and unsigned 32 and 64- bit integers [1]_. These are not
+ directly consumable in Python and must be consumed by a ``Generator``
+ or similar object that supports low-level access.
+
+ The Python stdlib module "random" also contains a Mersenne Twister
+ pseudo-random number generator.
+
+ **State and Seeding**
+
+ The ``MT19937`` state vector consists of a 624-element array of
+ 32-bit unsigned integers plus a single integer value between 0 and 624
+ that indexes the current position within the main array.
+
+ The input seed is processed by `SeedSequence` to fill the whole state. The
+ first element is reset such that only its most significant bit is set.
+
+ **Parallel Features**
+
+ The preferred way to use a BitGenerator in parallel applications is to use
+ the `SeedSequence.spawn` method to obtain entropy values, and to use these
+ to generate new BitGenerators:
+
+ >>> from numpy.random import Generator, MT19937, SeedSequence
+ >>> sg = SeedSequence(1234)
+ >>> rg = [Generator(MT19937(s)) for s in sg.spawn(10)]
+
+ Another method is to use `MT19937.jumped` which advances the state as-if
+ :math:`2^{128}` random numbers have been generated ([1]_, [2]_). This
+ allows the original sequence to be split so that distinct segments can be
+ used in each worker process. All generators should be chained to ensure
+ that the segments come from the same sequence.
+
+ >>> from numpy.random import Generator, MT19937, SeedSequence
+ >>> sg = SeedSequence(1234)
+ >>> bit_generator = MT19937(sg)
+ >>> rg = []
+ >>> for _ in range(10):
+ ... rg.append(Generator(bit_generator))
+ ... # Chain the BitGenerators
+ ... bit_generator = bit_generator.jumped()
+
+ **Compatibility Guarantee**
+
+ ``MT19937`` makes a guarantee that a fixed seed and will always produce
+ the same random integer stream.
+
+ References
+ ----------
+ .. [1] Hiroshi Haramoto, Makoto Matsumoto, and Pierre L\'Ecuyer, "A Fast
+ Jump Ahead Algorithm for Linear Recurrences in a Polynomial Space",
+ Sequences and Their Applications - SETA, 290--298, 2008.
+ .. [2] Hiroshi Haramoto, Makoto Matsumoto, Takuji Nishimura, François
+ Panneton, Pierre L\'Ecuyer, "Efficient Jump Ahead for F2-Linear
+ Random Number Generators", INFORMS JOURNAL ON COMPUTING, Vol. 20,
+ No. 3, Summer 2008, pp. 385-390.
+
+ """
+ cdef mt19937_state rng_state
+
+ def __init__(self, seed=None):
+ BitGenerator.__init__(self, seed)
+ val = self._seed_seq.generate_state(RK_STATE_LEN, np.uint32)
+ # MSB is 1; assuring non-zero initial array
+ self.rng_state.key[0] = 0x80000000UL
+ for i in range(1, RK_STATE_LEN):
+ self.rng_state.key[i] = val[i]
+ self.rng_state.pos = i
+
+ self._bitgen.state = &self.rng_state
+ self._bitgen.next_uint64 = &mt19937_uint64
+ self._bitgen.next_uint32 = &mt19937_uint32
+ self._bitgen.next_double = &mt19937_double
+ self._bitgen.next_raw = &mt19937_raw
+
+ def _legacy_seeding(self, seed):
+ """
+ _legacy_seeding(seed)
+
+ Seed the generator in a backward compatible way. For modern
+ applications, creating a new instance is preferable. Calling this
+ overrides self._seed_seq
+
+ Parameters
+ ----------
+ seed : {None, int, array_like}
+ Random seed initializing the pseudo-random number generator.
+ Can be an integer in [0, 2**32-1], array of integers in
+ [0, 2**32-1], a `SeedSequence, or ``None``. If `seed`
+ is ``None``, then fresh, unpredictable entropy will be pulled from
+ the OS.
+
+ Raises
+ ------
+ ValueError
+ If seed values are out of range for the PRNG.
+ """
+ cdef np.ndarray obj
+ with self.lock:
+ try:
+ if seed is None:
+ seed = SeedSequence()
+ val = seed.generate_state(RK_STATE_LEN)
+ # MSB is 1; assuring non-zero initial array
+ self.rng_state.key[0] = 0x80000000UL
+ for i in range(1, RK_STATE_LEN):
+ self.rng_state.key[i] = val[i]
+ else:
+ if hasattr(seed, 'squeeze'):
+ seed = seed.squeeze()
+ idx = operator.index(seed)
+ if idx > int(2**32 - 1) or idx < 0:
+ raise ValueError("Seed must be between 0 and 2**32 - 1")
+ mt19937_seed(&self.rng_state, seed)
+ except TypeError:
+ obj = np.asarray(seed)
+ if obj.size == 0:
+ raise ValueError("Seed must be non-empty")
+ obj = obj.astype(np.int64, casting='safe')
+ if obj.ndim != 1:
+ raise ValueError("Seed array must be 1-d")
+ if ((obj > int(2**32 - 1)) | (obj < 0)).any():
+ raise ValueError("Seed must be between 0 and 2**32 - 1")
+ obj = obj.astype(np.uint32, casting='unsafe', order='C')
+ mt19937_init_by_array(&self.rng_state, <uint32_t*> obj.data, np.PyArray_DIM(obj, 0))
+ self._seed_seq = None
+
+ cdef jump_inplace(self, iter):
+ """
+ Jump state in-place
+
+ Not part of public API
+
+ Parameters
+ ----------
+ iter : integer, positive
+ Number of times to jump the state of the rng.
+ """
+ cdef np.npy_intp i
+ for i in range(iter):
+ mt19937_jump(&self.rng_state)
+
+
+ def jumped(self, np.npy_intp jumps=1):
+ """
+ jumped(jumps=1)
+
+ Returns a new bit generator with the state jumped
+
+ The state of the returned big generator is jumped as-if
+ 2**(128 * jumps) random numbers have been generated.
+
+ Parameters
+ ----------
+ jumps : integer, positive
+ Number of times to jump the state of the bit generator returned
+
+ Returns
+ -------
+ bit_generator : MT19937
+ New instance of generator jumped iter times
+ """
+ cdef MT19937 bit_generator
+
+ bit_generator = self.__class__()
+ bit_generator.state = self.state
+ bit_generator.jump_inplace(jumps)
+
+ return bit_generator
+
+ @property
+ def state(self):
+ """
+ Get or set the PRNG state
+
+ Returns
+ -------
+ state : dict
+ Dictionary containing the information required to describe the
+ state of the PRNG
+ """
+ key = np.zeros(624, dtype=np.uint32)
+ for i in range(624):
+ key[i] = self.rng_state.key[i]
+
+ return {'bit_generator': self.__class__.__name__,
+ 'state': {'key': key, 'pos': self.rng_state.pos}}
+
+ @state.setter
+ def state(self, value):
+ if isinstance(value, tuple):
+ if value[0] != 'MT19937' or len(value) not in (3, 5):
+ raise ValueError('state is not a legacy MT19937 state')
+ value ={'bit_generator': 'MT19937',
+ 'state': {'key': value[1], 'pos': value[2]}}
+
+ if not isinstance(value, dict):
+ raise TypeError('state must be a dict')
+ bitgen = value.get('bit_generator', '')
+ if bitgen != self.__class__.__name__:
+ raise ValueError('state must be for a {0} '
+ 'PRNG'.format(self.__class__.__name__))
+ key = value['state']['key']
+ for i in range(624):
+ self.rng_state.key[i] = key[i]
+ self.rng_state.pos = value['state']['pos']
--- /dev/null
+import numpy as np
+cimport numpy as np
+
+from libc.stdint cimport uint32_t, uint64_t
+from ._common cimport uint64_to_double, wrap_int
+from numpy.random cimport BitGenerator
+
+__all__ = ['PCG64']
+
+cdef extern from "src/pcg64/pcg64.h":
+ # Use int as generic type, actual type read from pcg64.h and is platform dependent
+ ctypedef int pcg64_random_t
+
+ struct s_pcg64_state:
+ pcg64_random_t *pcg_state
+ int has_uint32
+ uint32_t uinteger
+
+ ctypedef s_pcg64_state pcg64_state
+
+ uint64_t pcg64_next64(pcg64_state *state) nogil
+ uint32_t pcg64_next32(pcg64_state *state) nogil
+ void pcg64_jump(pcg64_state *state)
+ void pcg64_advance(pcg64_state *state, uint64_t *step)
+ void pcg64_set_seed(pcg64_state *state, uint64_t *seed, uint64_t *inc)
+ void pcg64_get_state(pcg64_state *state, uint64_t *state_arr, int *has_uint32, uint32_t *uinteger)
+ void pcg64_set_state(pcg64_state *state, uint64_t *state_arr, int has_uint32, uint32_t uinteger)
+
+cdef uint64_t pcg64_uint64(void* st) nogil:
+ return pcg64_next64(<pcg64_state *>st)
+
+cdef uint32_t pcg64_uint32(void *st) nogil:
+ return pcg64_next32(<pcg64_state *> st)
+
+cdef double pcg64_double(void* st) nogil:
+ return uint64_to_double(pcg64_next64(<pcg64_state *>st))
+
+
+cdef class PCG64(BitGenerator):
+ """
+ PCG64(seed_seq=None)
+
+ BitGenerator for the PCG-64 pseudo-random number generator.
+
+ Parameters
+ ----------
+ seed : {None, int, array_like[ints], SeedSequence}, optional
+ A seed to initialize the `BitGenerator`. If None, then fresh,
+ unpredictable entropy will be pulled from the OS. If an ``int`` or
+ ``array_like[ints]`` is passed, then it will be passed to
+ `SeedSequence` to derive the initial `BitGenerator` state. One may also
+ pass in a `SeedSequence` instance.
+
+ Notes
+ -----
+ PCG-64 is a 128-bit implementation of O'Neill's permutation congruential
+ generator ([1]_, [2]_). PCG-64 has a period of :math:`2^{128}` and supports
+ advancing an arbitrary number of steps as well as :math:`2^{127}` streams.
+ The specific member of the PCG family that we use is PCG XSL RR 128/64
+ as described in the paper ([2]_).
+
+ ``PCG64`` provides a capsule containing function pointers that produce
+ doubles, and unsigned 32 and 64- bit integers. These are not
+ directly consumable in Python and must be consumed by a ``Generator``
+ or similar object that supports low-level access.
+
+ Supports the method :meth:`advance` to advance the RNG an arbitrary number of
+ steps. The state of the PCG-64 RNG is represented by 2 128-bit unsigned
+ integers.
+
+ **State and Seeding**
+
+ The ``PCG64`` state vector consists of 2 unsigned 128-bit values,
+ which are represented externally as Python ints. One is the state of the
+ PRNG, which is advanced by a linear congruential generator (LCG). The
+ second is a fixed odd increment used in the LCG.
+
+ The input seed is processed by `SeedSequence` to generate both values. The
+ increment is not independently settable.
+
+ **Parallel Features**
+
+ The preferred way to use a BitGenerator in parallel applications is to use
+ the `SeedSequence.spawn` method to obtain entropy values, and to use these
+ to generate new BitGenerators:
+
+ >>> from numpy.random import Generator, PCG64, SeedSequence
+ >>> sg = SeedSequence(1234)
+ >>> rg = [Generator(PCG64(s)) for s in sg.spawn(10)]
+
+ **Compatibility Guarantee**
+
+ ``PCG64`` makes a guarantee that a fixed seed and will always produce
+ the same random integer stream.
+
+ References
+ ----------
+ .. [1] `"PCG, A Family of Better Random Number Generators"
+ <http://www.pcg-random.org/>`_
+ .. [2] O'Neill, Melissa E. `"PCG: A Family of Simple Fast Space-Efficient
+ Statistically Good Algorithms for Random Number Generation"
+ <https://www.cs.hmc.edu/tr/hmc-cs-2014-0905.pdf>`_
+ """
+
+ cdef pcg64_state rng_state
+ cdef pcg64_random_t pcg64_random_state
+
+ def __init__(self, seed=None):
+ BitGenerator.__init__(self, seed)
+ self.rng_state.pcg_state = &self.pcg64_random_state
+
+ self._bitgen.state = <void *>&self.rng_state
+ self._bitgen.next_uint64 = &pcg64_uint64
+ self._bitgen.next_uint32 = &pcg64_uint32
+ self._bitgen.next_double = &pcg64_double
+ self._bitgen.next_raw = &pcg64_uint64
+ # Seed the _bitgen
+ val = self._seed_seq.generate_state(4, np.uint64)
+ pcg64_set_seed(&self.rng_state,
+ <uint64_t *>np.PyArray_DATA(val),
+ (<uint64_t *>np.PyArray_DATA(val) + 2))
+ self._reset_state_variables()
+
+ cdef _reset_state_variables(self):
+ self.rng_state.has_uint32 = 0
+ self.rng_state.uinteger = 0
+
+ cdef jump_inplace(self, jumps):
+ """
+ Jump state in-place
+ Not part of public API
+
+ Parameters
+ ----------
+ jumps : integer, positive
+ Number of times to jump the state of the rng.
+
+ Notes
+ -----
+ The step size is phi-1 when multiplied by 2**128 where phi is the
+ golden ratio.
+ """
+ step = 0x9e3779b97f4a7c15f39cc0605cedc835
+ self.advance(step * int(jumps))
+
+ def jumped(self, jumps=1):
+ """
+ jumped(jumps=1)
+
+ Returns a new bit generator with the state jumped.
+
+ Jumps the state as-if jumps * 210306068529402873165736369884012333109
+ random numbers have been generated.
+
+ Parameters
+ ----------
+ jumps : integer, positive
+ Number of times to jump the state of the bit generator returned
+
+ Returns
+ -------
+ bit_generator : PCG64
+ New instance of generator jumped iter times
+
+ Notes
+ -----
+ The step size is phi-1 when multiplied by 2**128 where phi is the
+ golden ratio.
+ """
+ cdef PCG64 bit_generator
+
+ bit_generator = self.__class__()
+ bit_generator.state = self.state
+ bit_generator.jump_inplace(jumps)
+
+ return bit_generator
+
+ @property
+ def state(self):
+ """
+ Get or set the PRNG state
+
+ Returns
+ -------
+ state : dict
+ Dictionary containing the information required to describe the
+ state of the PRNG
+ """
+ cdef np.ndarray state_vec
+ cdef int has_uint32
+ cdef uint32_t uinteger
+
+ # state_vec is state.high, state.low, inc.high, inc.low
+ state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
+ pcg64_get_state(&self.rng_state,
+ <uint64_t *>np.PyArray_DATA(state_vec),
+ &has_uint32, &uinteger)
+ state = int(state_vec[0]) * 2**64 + int(state_vec[1])
+ inc = int(state_vec[2]) * 2**64 + int(state_vec[3])
+ return {'bit_generator': self.__class__.__name__,
+ 'state': {'state': state, 'inc': inc},
+ 'has_uint32': has_uint32,
+ 'uinteger': uinteger}
+
+ @state.setter
+ def state(self, value):
+ cdef np.ndarray state_vec
+ cdef int has_uint32
+ cdef uint32_t uinteger
+ if not isinstance(value, dict):
+ raise TypeError('state must be a dict')
+ bitgen = value.get('bit_generator', '')
+ if bitgen != self.__class__.__name__:
+ raise ValueError('state must be for a {0} '
+ 'RNG'.format(self.__class__.__name__))
+ state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
+ state_vec[0] = value['state']['state'] // 2 ** 64
+ state_vec[1] = value['state']['state'] % 2 ** 64
+ state_vec[2] = value['state']['inc'] // 2 ** 64
+ state_vec[3] = value['state']['inc'] % 2 ** 64
+ has_uint32 = value['has_uint32']
+ uinteger = value['uinteger']
+ pcg64_set_state(&self.rng_state,
+ <uint64_t *>np.PyArray_DATA(state_vec),
+ has_uint32, uinteger)
+
+ def advance(self, delta):
+ """
+ advance(delta)
+
+ Advance the underlying RNG as-if delta draws have occurred.
+
+ Parameters
+ ----------
+ delta : integer, positive
+ Number of draws to advance the RNG. Must be less than the
+ size state variable in the underlying RNG.
+
+ Returns
+ -------
+ self : PCG64
+ RNG advanced delta steps
+
+ Notes
+ -----
+ Advancing a RNG updates the underlying RNG state as-if a given
+ number of calls to the underlying RNG have been made. In general
+ there is not a one-to-one relationship between the number output
+ random values from a particular distribution and the number of
+ draws from the core RNG. This occurs for two reasons:
+
+ * The random values are simulated using a rejection-based method
+ and so, on average, more than one value from the underlying
+ RNG is required to generate an single draw.
+ * The number of bits required to generate a simulated value
+ differs from the number of bits generated by the underlying
+ RNG. For example, two 16-bit integer values can be simulated
+ from a single draw of a 32-bit RNG.
+
+ Advancing the RNG state resets any pre-computed random numbers.
+ This is required to ensure exact reproducibility.
+ """
+ delta = wrap_int(delta, 128)
+
+ cdef np.ndarray d = np.empty(2, dtype=np.uint64)
+ d[0] = delta // 2**64
+ d[1] = delta % 2**64
+ pcg64_advance(&self.rng_state, <uint64_t *>np.PyArray_DATA(d))
+ self._reset_state_variables()
+ return self
--- /dev/null
+from cpython.pycapsule cimport PyCapsule_New
+
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
+
+import numpy as np
+cimport numpy as np
+
+from libc.stdint cimport uint32_t, uint64_t
+from ._common cimport uint64_to_double, int_to_array, wrap_int
+from numpy.random cimport BitGenerator
+
+__all__ = ['Philox']
+
+np.import_array()
+
+DEF PHILOX_BUFFER_SIZE=4
+
+cdef extern from 'src/philox/philox.h':
+ struct s_r123array2x64:
+ uint64_t v[2]
+
+ struct s_r123array4x64:
+ uint64_t v[4]
+
+ ctypedef s_r123array4x64 r123array4x64
+ ctypedef s_r123array2x64 r123array2x64
+
+ ctypedef r123array4x64 philox4x64_ctr_t
+ ctypedef r123array2x64 philox4x64_key_t
+
+ struct s_philox_state:
+ philox4x64_ctr_t *ctr
+ philox4x64_key_t *key
+ int buffer_pos
+ uint64_t buffer[PHILOX_BUFFER_SIZE]
+ int has_uint32
+ uint32_t uinteger
+
+ ctypedef s_philox_state philox_state
+
+ uint64_t philox_next64(philox_state *state) nogil
+ uint32_t philox_next32(philox_state *state) nogil
+ void philox_jump(philox_state *state)
+ void philox_advance(uint64_t *step, philox_state *state)
+
+
+cdef uint64_t philox_uint64(void*st) nogil:
+ return philox_next64(<philox_state *> st)
+
+cdef uint32_t philox_uint32(void *st) nogil:
+ return philox_next32(<philox_state *> st)
+
+cdef double philox_double(void*st) nogil:
+ return uint64_to_double(philox_next64(<philox_state *> st))
+
+cdef class Philox(BitGenerator):
+ """
+ Philox(seed=None, counter=None, key=None)
+
+ Container for the Philox (4x64) pseudo-random number generator.
+
+ Parameters
+ ----------
+ seed : {None, int, array_like[ints], SeedSequence}, optional
+ A seed to initialize the `BitGenerator`. If None, then fresh,
+ unpredictable entropy will be pulled from the OS. If an ``int`` or
+ ``array_like[ints]`` is passed, then it will be passed to
+ `SeedSequence` to derive the initial `BitGenerator` state. One may also
+ pass in a `SeedSequence` instance.
+ counter : {None, int, array_like}, optional
+ Counter to use in the Philox state. Can be either
+ a Python int (long in 2.x) in [0, 2**256) or a 4-element uint64 array.
+ If not provided, the RNG is initialized at 0.
+ key : {None, int, array_like}, optional
+ Key to use in the Philox state. Unlike ``seed``, the value in key is
+ directly set. Can be either a Python int in [0, 2**128) or a 2-element
+ uint64 array. `key` and ``seed`` cannot both be used.
+
+ Attributes
+ ----------
+ lock: threading.Lock
+ Lock instance that is shared so that the same bit git generator can
+ be used in multiple Generators without corrupting the state. Code that
+ generates values from a bit generator should hold the bit generator's
+ lock.
+
+ Notes
+ -----
+ Philox is a 64-bit PRNG that uses a counter-based design based on weaker
+ (and faster) versions of cryptographic functions [1]_. Instances using
+ different values of the key produce independent sequences. Philox has a
+ period of :math:`2^{256} - 1` and supports arbitrary advancing and jumping
+ the sequence in increments of :math:`2^{128}`. These features allow
+ multiple non-overlapping sequences to be generated.
+
+ ``Philox`` provides a capsule containing function pointers that produce
+ doubles, and unsigned 32 and 64- bit integers. These are not
+ directly consumable in Python and must be consumed by a ``Generator``
+ or similar object that supports low-level access.
+
+ **State and Seeding**
+
+ The ``Philox`` state vector consists of a 256-bit value encoded as
+ a 4-element uint64 array and a 128-bit value encoded as a 2-element uint64
+ array. The former is a counter which is incremented by 1 for every 4 64-bit
+ randoms produced. The second is a key which determined the sequence
+ produced. Using different keys produces independent sequences.
+
+ The input ``seed`` is processed by `SeedSequence` to generate the key. The
+ counter is set to 0.
+
+ Alternately, one can omit the ``seed`` parameter and set the ``key`` and
+ ``counter`` directly.
+
+ **Parallel Features**
+
+ The preferred way to use a BitGenerator in parallel applications is to use
+ the `SeedSequence.spawn` method to obtain entropy values, and to use these
+ to generate new BitGenerators:
+
+ >>> from numpy.random import Generator, Philox, SeedSequence
+ >>> sg = SeedSequence(1234)
+ >>> rg = [Generator(Philox(s)) for s in sg.spawn(10)]
+
+ ``Philox`` can be used in parallel applications by calling the ``jumped``
+ method to advances the state as-if :math:`2^{128}` random numbers have
+ been generated. Alternatively, ``advance`` can be used to advance the
+ counter for any positive step in [0, 2**256). When using ``jumped``, all
+ generators should be chained to ensure that the segments come from the same
+ sequence.
+
+ >>> from numpy.random import Generator, Philox
+ >>> bit_generator = Philox(1234)
+ >>> rg = []
+ >>> for _ in range(10):
+ ... rg.append(Generator(bit_generator))
+ ... bit_generator = bit_generator.jumped()
+
+ Alternatively, ``Philox`` can be used in parallel applications by using
+ a sequence of distinct keys where each instance uses different key.
+
+ >>> key = 2**96 + 2**33 + 2**17 + 2**9
+ >>> rg = [Generator(Philox(key=key+i)) for i in range(10)]
+
+ **Compatibility Guarantee**
+
+ ``Philox`` makes a guarantee that a fixed ``seed`` will always produce
+ the same random integer stream.
+
+ Examples
+ --------
+ >>> from numpy.random import Generator, Philox
+ >>> rg = Generator(Philox(1234))
+ >>> rg.standard_normal()
+ 0.123 # random
+
+ References
+ ----------
+ .. [1] John K. Salmon, Mark A. Moraes, Ron O. Dror, and David E. Shaw,
+ "Parallel Random Numbers: As Easy as 1, 2, 3," Proceedings of
+ the International Conference for High Performance Computing,
+ Networking, Storage and Analysis (SC11), New York, NY: ACM, 2011.
+ """
+ cdef philox_state rng_state
+ cdef philox4x64_key_t philox_key
+ cdef philox4x64_ctr_t philox_ctr
+
+ def __init__(self, seed=None, counter=None, key=None):
+ if seed is not None and key is not None:
+ raise ValueError('seed and key cannot be both used')
+ BitGenerator.__init__(self, seed)
+ self.rng_state.ctr = &self.philox_ctr
+ self.rng_state.key = &self.philox_key
+ if key is not None:
+ key = int_to_array(key, 'key', 128, 64)
+ for i in range(2):
+ self.rng_state.key.v[i] = key[i]
+ # The seed sequence is invalid.
+ self._seed_seq = None
+ else:
+ key = self._seed_seq.generate_state(2, np.uint64)
+ for i in range(2):
+ self.rng_state.key.v[i] = key[i]
+ counter = 0 if counter is None else counter
+ counter = int_to_array(counter, 'counter', 256, 64)
+ for i in range(4):
+ self.rng_state.ctr.v[i] = counter[i]
+
+ self._reset_state_variables()
+
+ self._bitgen.state = <void *>&self.rng_state
+ self._bitgen.next_uint64 = &philox_uint64
+ self._bitgen.next_uint32 = &philox_uint32
+ self._bitgen.next_double = &philox_double
+ self._bitgen.next_raw = &philox_uint64
+
+ cdef _reset_state_variables(self):
+ self.rng_state.has_uint32 = 0
+ self.rng_state.uinteger = 0
+ self.rng_state.buffer_pos = PHILOX_BUFFER_SIZE
+ for i in range(PHILOX_BUFFER_SIZE):
+ self.rng_state.buffer[i] = 0
+
+ @property
+ def state(self):
+ """
+ Get or set the PRNG state
+
+ Returns
+ -------
+ state : dict
+ Dictionary containing the information required to describe the
+ state of the PRNG
+ """
+ ctr = np.empty(4, dtype=np.uint64)
+ key = np.empty(2, dtype=np.uint64)
+ buffer = np.empty(PHILOX_BUFFER_SIZE, dtype=np.uint64)
+ for i in range(4):
+ ctr[i] = self.rng_state.ctr.v[i]
+ if i < 2:
+ key[i] = self.rng_state.key.v[i]
+ for i in range(PHILOX_BUFFER_SIZE):
+ buffer[i] = self.rng_state.buffer[i]
+
+ state = {'counter': ctr, 'key': key}
+ return {'bit_generator': self.__class__.__name__,
+ 'state': state,
+ 'buffer': buffer,
+ 'buffer_pos': self.rng_state.buffer_pos,
+ 'has_uint32': self.rng_state.has_uint32,
+ 'uinteger': self.rng_state.uinteger}
+
+ @state.setter
+ def state(self, value):
+ if not isinstance(value, dict):
+ raise TypeError('state must be a dict')
+ bitgen = value.get('bit_generator', '')
+ if bitgen != self.__class__.__name__:
+ raise ValueError('state must be for a {0} '
+ 'PRNG'.format(self.__class__.__name__))
+ for i in range(4):
+ self.rng_state.ctr.v[i] = <uint64_t> value['state']['counter'][i]
+ if i < 2:
+ self.rng_state.key.v[i] = <uint64_t> value['state']['key'][i]
+ for i in range(PHILOX_BUFFER_SIZE):
+ self.rng_state.buffer[i] = <uint64_t> value['buffer'][i]
+
+ self.rng_state.has_uint32 = value['has_uint32']
+ self.rng_state.uinteger = value['uinteger']
+ self.rng_state.buffer_pos = value['buffer_pos']
+
+ cdef jump_inplace(self, iter):
+ """
+ Jump state in-place
+
+ Not part of public API
+
+ Parameters
+ ----------
+ iter : integer, positive
+ Number of times to jump the state of the rng.
+ """
+ self.advance(iter * int(2 ** 128))
+
+ def jumped(self, jumps=1):
+ """
+ jumped(jumps=1)
+
+ Returns a new bit generator with the state jumped
+
+ The state of the returned big generator is jumped as-if
+ 2**(128 * jumps) random numbers have been generated.
+
+ Parameters
+ ----------
+ jumps : integer, positive
+ Number of times to jump the state of the bit generator returned
+
+ Returns
+ -------
+ bit_generator : Philox
+ New instance of generator jumped iter times
+ """
+ cdef Philox bit_generator
+
+ bit_generator = self.__class__()
+ bit_generator.state = self.state
+ bit_generator.jump_inplace(jumps)
+
+ return bit_generator
+
+ def advance(self, delta):
+ """
+ advance(delta)
+
+ Advance the underlying RNG as-if delta draws have occurred.
+
+ Parameters
+ ----------
+ delta : integer, positive
+ Number of draws to advance the RNG. Must be less than the
+ size state variable in the underlying RNG.
+
+ Returns
+ -------
+ self : Philox
+ RNG advanced delta steps
+
+ Notes
+ -----
+ Advancing a RNG updates the underlying RNG state as-if a given
+ number of calls to the underlying RNG have been made. In general
+ there is not a one-to-one relationship between the number output
+ random values from a particular distribution and the number of
+ draws from the core RNG. This occurs for two reasons:
+
+ * The random values are simulated using a rejection-based method
+ and so, on average, more than one value from the underlying
+ RNG is required to generate an single draw.
+ * The number of bits required to generate a simulated value
+ differs from the number of bits generated by the underlying
+ RNG. For example, two 16-bit integer values can be simulated
+ from a single draw of a 32-bit RNG.
+
+ Advancing the RNG state resets any pre-computed random numbers.
+ This is required to ensure exact reproducibility.
+ """
+ delta = wrap_int(delta, 256)
+
+ cdef np.ndarray delta_a
+ delta_a = int_to_array(delta, 'step', 256, 64)
+ philox_advance(<uint64_t *> delta_a.data, &self.rng_state)
+ self._reset_state_variables()
+ return self
from .mtrand import RandomState
-from .philox import Philox
-from .pcg64 import PCG64
-from .sfc64 import SFC64
+from ._philox import Philox
+from ._pcg64 import PCG64
+from ._sfc64 import SFC64
-from .generator import Generator
-from .mt19937 import MT19937
+from ._generator import Generator
+from ._mt19937 import MT19937
BitGenerators = {'MT19937': MT19937,
'PCG64': PCG64,
--- /dev/null
+import numpy as np
+cimport numpy as np
+
+from libc.stdint cimport uint32_t, uint64_t
+from ._common cimport uint64_to_double
+from numpy.random cimport BitGenerator
+
+__all__ = ['SFC64']
+
+cdef extern from "src/sfc64/sfc64.h":
+ struct s_sfc64_state:
+ uint64_t s[4]
+ int has_uint32
+ uint32_t uinteger
+
+ ctypedef s_sfc64_state sfc64_state
+ uint64_t sfc64_next64(sfc64_state *state) nogil
+ uint32_t sfc64_next32(sfc64_state *state) nogil
+ void sfc64_set_seed(sfc64_state *state, uint64_t *seed)
+ void sfc64_get_state(sfc64_state *state, uint64_t *state_arr, int *has_uint32, uint32_t *uinteger)
+ void sfc64_set_state(sfc64_state *state, uint64_t *state_arr, int has_uint32, uint32_t uinteger)
+
+
+cdef uint64_t sfc64_uint64(void* st) nogil:
+ return sfc64_next64(<sfc64_state *>st)
+
+cdef uint32_t sfc64_uint32(void *st) nogil:
+ return sfc64_next32(<sfc64_state *> st)
+
+cdef double sfc64_double(void* st) nogil:
+ return uint64_to_double(sfc64_next64(<sfc64_state *>st))
+
+
+cdef class SFC64(BitGenerator):
+ """
+ SFC64(seed=None)
+
+ BitGenerator for Chris Doty-Humphrey's Small Fast Chaotic PRNG.
+
+ Parameters
+ ----------
+ seed : {None, int, array_like[ints], SeedSequence}, optional
+ A seed to initialize the `BitGenerator`. If None, then fresh,
+ unpredictable entropy will be pulled from the OS. If an ``int`` or
+ ``array_like[ints]`` is passed, then it will be passed to
+ `SeedSequence` to derive the initial `BitGenerator` state. One may also
+ pass in a `SeedSequence` instance.
+
+ Notes
+ -----
+ ``SFC64`` is a 256-bit implementation of Chris Doty-Humphrey's Small Fast
+ Chaotic PRNG ([1]_). ``SFC64`` has a few different cycles that one might be
+ on, depending on the seed; the expected period will be about
+ :math:`2^{255}` ([2]_). ``SFC64`` incorporates a 64-bit counter which means
+ that the absolute minimum cycle length is :math:`2^{64}` and that distinct
+ seeds will not run into each other for at least :math:`2^{64}` iterations.
+
+ ``SFC64`` provides a capsule containing function pointers that produce
+ doubles, and unsigned 32 and 64- bit integers. These are not
+ directly consumable in Python and must be consumed by a ``Generator``
+ or similar object that supports low-level access.
+
+ **State and Seeding**
+
+ The ``SFC64`` state vector consists of 4 unsigned 64-bit values. The last
+ is a 64-bit counter that increments by 1 each iteration.
+
+ The input seed is processed by `SeedSequence` to generate the first
+ 3 values, then the ``SFC64`` algorithm is iterated a small number of times
+ to mix.
+
+ **Compatibility Guarantee**
+
+ ``SFC64`` makes a guarantee that a fixed seed will always produce the same
+ random integer stream.
+
+ References
+ ----------
+ .. [1] `"PractRand"
+ <http://pracrand.sourceforge.net/RNG_engines.txt>`_
+ .. [2] `"Random Invertible Mapping Statistics"
+ <http://www.pcg-random.org/posts/random-invertible-mapping-statistics.html>`_
+ """
+
+ cdef sfc64_state rng_state
+
+ def __init__(self, seed=None):
+ BitGenerator.__init__(self, seed)
+ self._bitgen.state = <void *>&self.rng_state
+ self._bitgen.next_uint64 = &sfc64_uint64
+ self._bitgen.next_uint32 = &sfc64_uint32
+ self._bitgen.next_double = &sfc64_double
+ self._bitgen.next_raw = &sfc64_uint64
+ # Seed the _bitgen
+ val = self._seed_seq.generate_state(3, np.uint64)
+ sfc64_set_seed(&self.rng_state, <uint64_t*>np.PyArray_DATA(val))
+ self._reset_state_variables()
+
+ cdef _reset_state_variables(self):
+ self.rng_state.has_uint32 = 0
+ self.rng_state.uinteger = 0
+
+ @property
+ def state(self):
+ """
+ Get or set the PRNG state
+
+ Returns
+ -------
+ state : dict
+ Dictionary containing the information required to describe the
+ state of the PRNG
+ """
+ cdef np.ndarray state_vec
+ cdef int has_uint32
+ cdef uint32_t uinteger
+
+ state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
+ sfc64_get_state(&self.rng_state,
+ <uint64_t *>np.PyArray_DATA(state_vec),
+ &has_uint32, &uinteger)
+ return {'bit_generator': self.__class__.__name__,
+ 'state': {'state': state_vec},
+ 'has_uint32': has_uint32,
+ 'uinteger': uinteger}
+
+ @state.setter
+ def state(self, value):
+ cdef np.ndarray state_vec
+ cdef int has_uint32
+ cdef uint32_t uinteger
+ if not isinstance(value, dict):
+ raise TypeError('state must be a dict')
+ bitgen = value.get('bit_generator', '')
+ if bitgen != self.__class__.__name__:
+ raise ValueError('state must be for a {0} '
+ 'RNG'.format(self.__class__.__name__))
+ state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
+ state_vec[:] = value['state']['state']
+ has_uint32 = value['has_uint32']
+ uinteger = value['uinteger']
+ sfc64_set_state(&self.rng_state,
+ <uint64_t *>np.PyArray_DATA(state_vec),
+ has_uint32, uinteger)
+++ /dev/null
-
-from .common cimport bitgen_t, uint32_t
-cimport numpy as np
-
-cdef class BitGenerator():
- cdef readonly object _seed_seq
- cdef readonly object lock
- cdef bitgen_t _bitgen
- cdef readonly object _ctypes
- cdef readonly object _cffi
- cdef readonly object capsule
-
-
-cdef class SeedSequence():
- cdef readonly object entropy
- cdef readonly tuple spawn_key
- cdef readonly uint32_t pool_size
- cdef readonly object pool
- cdef readonly uint32_t n_children_spawned
-
- cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
- np.ndarray[np.npy_uint32, ndim=1] entropy_array)
- cdef get_assembled_entropy(self)
-
-cdef class SeedlessSequence():
- pass
+++ /dev/null
-"""
-BitGenerator base class and SeedSequence used to seed the BitGenerators.
-
-SeedSequence is derived from Melissa E. O'Neill's C++11 `std::seed_seq`
-implementation, as it has a lot of nice properties that we want.
-
-https://gist.github.com/imneme/540829265469e673d045
-http://www.pcg-random.org/posts/developing-a-seed_seq-alternative.html
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Melissa E. O'Neill
-Copyright (c) 2019 NumPy Developers
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-"""
-
-import abc
-import sys
-from itertools import cycle
-import re
-
-try:
- from secrets import randbits
-except ImportError:
- # secrets unavailable on python 3.5 and before
- from random import SystemRandom
- randbits = SystemRandom().getrandbits
-
-try:
- from threading import Lock
-except ImportError:
- from dummy_threading import Lock
-
-from cpython.pycapsule cimport PyCapsule_New
-
-import numpy as np
-cimport numpy as np
-
-from libc.stdint cimport uint32_t
-from .common cimport (random_raw, benchmark, prepare_ctypes, prepare_cffi)
-from .distributions cimport bitgen_t
-
-__all__ = ['SeedSequence', 'BitGenerator']
-
-np.import_array()
-
-DECIMAL_RE = re.compile(r'[0-9]+')
-
-cdef uint32_t DEFAULT_POOL_SIZE = 4 # Appears also in docstring for pool_size
-cdef uint32_t INIT_A = 0x43b0d7e5
-cdef uint32_t MULT_A = 0x931e8875
-cdef uint32_t INIT_B = 0x8b51f9dd
-cdef uint32_t MULT_B = 0x58f38ded
-cdef uint32_t MIX_MULT_L = 0xca01f9dd
-cdef uint32_t MIX_MULT_R = 0x4973f715
-cdef uint32_t XSHIFT = np.dtype(np.uint32).itemsize * 8 // 2
-cdef uint32_t MASK32 = 0xFFFFFFFF
-
-def _int_to_uint32_array(n):
- arr = []
- if n < 0:
- raise ValueError("expected non-negative integer")
- if n == 0:
- arr.append(np.uint32(n))
- if isinstance(n, np.unsignedinteger):
- # Cannot do n & MASK32, convert to python int
- n = int(n)
- while n > 0:
- arr.append(np.uint32(n & MASK32))
- n //= (2**32)
- return np.array(arr, dtype=np.uint32)
-
-def _coerce_to_uint32_array(x):
- """ Coerce an input to a uint32 array.
-
- If a `uint32` array, pass it through directly.
- If a non-negative integer, then break it up into `uint32` words, lowest
- bits first.
- If a string starting with "0x", then interpret as a hex integer, as above.
- If a string of decimal digits, interpret as a decimal integer, as above.
- If a sequence of ints or strings, interpret each element as above and
- concatenate.
-
- Note that the handling of `int64` or `uint64` arrays are not just
- straightforward views as `uint32` arrays. If an element is small enough to
- fit into a `uint32`, then it will only take up one `uint32` element in the
- output. This is to make sure that the interpretation of a sequence of
- integers is the same regardless of numpy's default integer type, which
- differs on different platforms.
-
- Parameters
- ----------
- x : int, str, sequence of int or str
-
- Returns
- -------
- seed_array : uint32 array
-
- Examples
- --------
- >>> import numpy as np
- >>> from numpy.random.bit_generator import _coerce_to_uint32_array
- >>> _coerce_to_uint32_array(12345)
- array([12345], dtype=uint32)
- >>> _coerce_to_uint32_array('12345')
- array([12345], dtype=uint32)
- >>> _coerce_to_uint32_array('0x12345')
- array([74565], dtype=uint32)
- >>> _coerce_to_uint32_array([12345, '67890'])
- array([12345, 67890], dtype=uint32)
- >>> _coerce_to_uint32_array(np.array([12345, 67890], dtype=np.uint32))
- array([12345, 67890], dtype=uint32)
- >>> _coerce_to_uint32_array(np.array([12345, 67890], dtype=np.int64))
- array([12345, 67890], dtype=uint32)
- >>> _coerce_to_uint32_array([12345, 0x10deadbeef, 67890, 0xdeadbeef])
- array([ 12345, 3735928559, 16, 67890, 3735928559],
- dtype=uint32)
- >>> _coerce_to_uint32_array(1234567890123456789012345678901234567890)
- array([3460238034, 2898026390, 3235640248, 2697535605, 3],
- dtype=uint32)
- """
- if isinstance(x, np.ndarray) and x.dtype == np.dtype(np.uint32):
- return x.copy()
- elif isinstance(x, str):
- if x.startswith('0x'):
- x = int(x, base=16)
- elif DECIMAL_RE.match(x):
- x = int(x)
- else:
- raise ValueError("unrecognized seed string")
- if isinstance(x, (int, np.integer)):
- return _int_to_uint32_array(x)
- elif isinstance(x, (float, np.inexact)):
- raise TypeError('seed must be integer')
- else:
- if len(x) == 0:
- return np.array([], dtype=np.uint32)
- # Should be a sequence of interpretable-as-ints. Convert each one to
- # a uint32 array and concatenate.
- subseqs = [_coerce_to_uint32_array(v) for v in x]
- return np.concatenate(subseqs)
-
-
-cdef uint32_t hashmix(uint32_t value, uint32_t * hash_const):
- # We are modifying the multiplier as we go along, so it is input-output
- value ^= hash_const[0]
- hash_const[0] *= MULT_A
- value *= hash_const[0]
- value ^= value >> XSHIFT
- return value
-
-cdef uint32_t mix(uint32_t x, uint32_t y):
- cdef uint32_t result = (MIX_MULT_L * x - MIX_MULT_R * y)
- result ^= result >> XSHIFT
- return result
-
-
-class ISeedSequence(abc.ABC):
- """
- Abstract base class for seed sequences.
-
- ``BitGenerator`` implementations should treat any object that adheres to
- this interface as a seed sequence.
-
- See Also
- --------
- SeedSequence, SeedlessSeedSequence
- """
-
- @abc.abstractmethod
- def generate_state(self, n_words, dtype=np.uint32):
- """
- generate_state(n_words, dtype=np.uint32)
-
- Return the requested number of words for PRNG seeding.
-
- A BitGenerator should call this method in its constructor with
- an appropriate `n_words` parameter to properly seed itself.
-
- Parameters
- ----------
- n_words : int
- dtype : np.uint32 or np.uint64, optional
- The size of each word. This should only be either `uint32` or
- `uint64`. Strings (`'uint32'`, `'uint64'`) are fine. Note that
- requesting `uint64` will draw twice as many bits as `uint32` for
- the same `n_words`. This is a convenience for `BitGenerator`s that
- express their states as `uint64` arrays.
-
- Returns
- -------
- state : uint32 or uint64 array, shape=(n_words,)
- """
-
-
-class ISpawnableSeedSequence(ISeedSequence):
- """
- Abstract base class for seed sequences that can spawn.
- """
-
- @abc.abstractmethod
- def spawn(self, n_children):
- """
- spawn(n_children)
-
- Spawn a number of child `SeedSequence` s by extending the
- `spawn_key`.
-
- Parameters
- ----------
- n_children : int
-
- Returns
- -------
- seqs : list of `SeedSequence` s
- """
-
-
-cdef class SeedlessSeedSequence():
- """
- A seed sequence for BitGenerators with no need for seed state.
-
- See Also
- --------
- SeedSequence, ISeedSequence
- """
-
- def generate_state(self, n_words, dtype=np.uint32):
- raise NotImplementedError('seedless SeedSequences cannot generate state')
-
- def spawn(self, n_children):
- return [self] * n_children
-
-
-# We cannot directly subclass a `cdef class` type from an `ABC` in Cython, so
-# we must register it after the fact.
-ISpawnableSeedSequence.register(SeedlessSeedSequence)
-
-
-cdef class SeedSequence():
- """
- SeedSequence(entropy=None, *, spawn_key=(), pool_size=4)
-
- SeedSequence mixes sources of entropy in a reproducible way to set the
- initial state for independent and very probably non-overlapping
- BitGenerators.
-
- Once the SeedSequence is instantiated, you can call the `generate_state`
- method to get an appropriately sized seed. Calling `spawn(n) <spawn>` will
- create ``n`` SeedSequences that can be used to seed independent
- BitGenerators, i.e. for different threads.
-
- Parameters
- ----------
- entropy : {None, int, sequence[int]}, optional
- The entropy for creating a `SeedSequence`.
- spawn_key : {(), sequence[int]}, optional
- A third source of entropy, used internally when calling
- `SeedSequence.spawn`
- pool_size : {int}, optional
- Size of the pooled entropy to store. Default is 4 to give a 128-bit
- entropy pool. 8 (for 256 bits) is another reasonable choice if working
- with larger PRNGs, but there is very little to be gained by selecting
- another value.
- n_children_spawned : {int}, optional
- The number of children already spawned. Only pass this if
- reconstructing a `SeedSequence` from a serialized form.
-
- Notes
- -----
-
- Best practice for achieving reproducible bit streams is to use
- the default ``None`` for the initial entropy, and then use
- `SeedSequence.entropy` to log/pickle the `entropy` for reproducibility:
-
- >>> sq1 = np.random.SeedSequence()
- >>> sq1.entropy
- 243799254704924441050048792905230269161 # random
- >>> sq2 = np.random.SeedSequence(sq1.entropy)
- >>> np.all(sq1.generate_state(10) == sq2.generate_state(10))
- True
- """
-
- def __init__(self, entropy=None, *, spawn_key=(),
- pool_size=DEFAULT_POOL_SIZE, n_children_spawned=0):
- if pool_size < DEFAULT_POOL_SIZE:
- raise ValueError("The size of the entropy pool should be at least "
- f"{DEFAULT_POOL_SIZE}")
- if entropy is None:
- entropy = randbits(pool_size * 32)
- elif not isinstance(entropy, (int, np.integer, list, tuple, range,
- np.ndarray)):
- raise TypeError('SeedSequence expects int or sequence of ints for '
- 'entropy not {}'.format(entropy))
- self.entropy = entropy
- self.spawn_key = tuple(spawn_key)
- self.pool_size = pool_size
- self.n_children_spawned = n_children_spawned
-
- self.pool = np.zeros(pool_size, dtype=np.uint32)
- self.mix_entropy(self.pool, self.get_assembled_entropy())
-
- def __repr__(self):
- lines = [
- f'{type(self).__name__}(',
- f' entropy={self.entropy!r},',
- ]
- # Omit some entries if they are left as the defaults in order to
- # simplify things.
- if self.spawn_key:
- lines.append(f' spawn_key={self.spawn_key!r},')
- if self.pool_size != DEFAULT_POOL_SIZE:
- lines.append(f' pool_size={self.pool_size!r},')
- if self.n_children_spawned != 0:
- lines.append(f' n_children_spawned={self.n_children_spawned!r},')
- lines.append(')')
- text = '\n'.join(lines)
- return text
-
- @property
- def state(self):
- return {k:getattr(self, k) for k in
- ['entropy', 'spawn_key', 'pool_size',
- 'n_children_spawned']
- if getattr(self, k) is not None}
-
- cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
- np.ndarray[np.npy_uint32, ndim=1] entropy_array):
- """ Mix in the given entropy to mixer.
-
- Parameters
- ----------
- mixer : 1D uint32 array, modified in-place
- entropy_array : 1D uint32 array
- """
- cdef uint32_t hash_const[1]
- hash_const[0] = INIT_A
-
- # Add in the entropy up to the pool size.
- for i in range(len(mixer)):
- if i < len(entropy_array):
- mixer[i] = hashmix(entropy_array[i], hash_const)
- else:
- # Our pool size is bigger than our entropy, so just keep
- # running the hash out.
- mixer[i] = hashmix(0, hash_const)
-
- # Mix all bits together so late bits can affect earlier bits.
- for i_src in range(len(mixer)):
- for i_dst in range(len(mixer)):
- if i_src != i_dst:
- mixer[i_dst] = mix(mixer[i_dst],
- hashmix(mixer[i_src], hash_const))
-
- # Add any remaining entropy, mixing each new entropy word with each
- # pool word.
- for i_src in range(len(mixer), len(entropy_array)):
- for i_dst in range(len(mixer)):
- mixer[i_dst] = mix(mixer[i_dst],
- hashmix(entropy_array[i_src], hash_const))
-
- cdef get_assembled_entropy(self):
- """ Convert and assemble all entropy sources into a uniform uint32
- array.
-
- Returns
- -------
- entropy_array : 1D uint32 array
- """
- # Convert run-entropy, program-entropy, and the spawn key into uint32
- # arrays and concatenate them.
-
- # We MUST have at least some run-entropy. The others are optional.
- assert self.entropy is not None
- run_entropy = _coerce_to_uint32_array(self.entropy)
- spawn_entropy = _coerce_to_uint32_array(self.spawn_key)
- entropy_array = np.concatenate([run_entropy, spawn_entropy])
- return entropy_array
-
- @np.errstate(over='ignore')
- def generate_state(self, n_words, dtype=np.uint32):
- """
- generate_state(n_words, dtype=np.uint32)
-
- Return the requested number of words for PRNG seeding.
-
- A BitGenerator should call this method in its constructor with
- an appropriate `n_words` parameter to properly seed itself.
-
- Parameters
- ----------
- n_words : int
- dtype : np.uint32 or np.uint64, optional
- The size of each word. This should only be either `uint32` or
- `uint64`. Strings (`'uint32'`, `'uint64'`) are fine. Note that
- requesting `uint64` will draw twice as many bits as `uint32` for
- the same `n_words`. This is a convenience for `BitGenerator`s that
- express their states as `uint64` arrays.
-
- Returns
- -------
- state : uint32 or uint64 array, shape=(n_words,)
- """
- cdef uint32_t hash_const = INIT_B
- cdef uint32_t data_val
-
- out_dtype = np.dtype(dtype)
- if out_dtype == np.dtype(np.uint32):
- pass
- elif out_dtype == np.dtype(np.uint64):
- n_words *= 2
- else:
- raise ValueError("only support uint32 or uint64")
- state = np.zeros(n_words, dtype=np.uint32)
- src_cycle = cycle(self.pool)
- for i_dst in range(n_words):
- data_val = next(src_cycle)
- data_val ^= hash_const
- hash_const *= MULT_B
- data_val *= hash_const
- data_val ^= data_val >> XSHIFT
- state[i_dst] = data_val
- if out_dtype == np.dtype(np.uint64):
- # For consistency across different endiannesses, view first as
- # little-endian then convert the values to the native endianness.
- state = state.astype('<u4').view('<u8').astype(np.uint64)
- return state
-
- def spawn(self, n_children):
- """
- spawn(n_children)
-
- Spawn a number of child `SeedSequence` s by extending the
- `spawn_key`.
-
- Parameters
- ----------
- n_children : int
-
- Returns
- -------
- seqs : list of `SeedSequence` s
- """
- cdef uint32_t i
-
- seqs = []
- for i in range(self.n_children_spawned,
- self.n_children_spawned + n_children):
- seqs.append(type(self)(
- self.entropy,
- spawn_key=self.spawn_key + (i,),
- pool_size=self.pool_size,
- ))
- self.n_children_spawned += n_children
- return seqs
-
-
-ISpawnableSeedSequence.register(SeedSequence)
-
-
-cdef class BitGenerator():
- """
- BitGenerator(seed=None)
-
- Base Class for generic BitGenerators, which provide a stream
- of random bits based on different algorithms. Must be overridden.
-
- Parameters
- ----------
- seed : {None, int, array_like[ints], ISeedSequence}, optional
- A seed to initialize the `BitGenerator`. If None, then fresh,
- unpredictable entropy will be pulled from the OS. If an ``int`` or
- ``array_like[ints]`` is passed, then it will be passed to
- `SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in an implementor of the `ISeedSequence` interface like
- `SeedSequence`.
-
- Attributes
- ----------
- lock : threading.Lock
- Lock instance that is shared so that the same BitGenerator can
- be used in multiple Generators without corrupting the state. Code that
- generates values from a bit generator should hold the bit generator's
- lock.
-
- See Also
- -------
- SeedSequence
- """
-
- def __init__(self, seed=None):
- self.lock = Lock()
- self._bitgen.state = <void *>0
- if type(self) is BitGenerator:
- raise NotImplementedError('BitGenerator is a base class and cannot be instantized')
-
- self._ctypes = None
- self._cffi = None
-
- cdef const char *name = "BitGenerator"
- self.capsule = PyCapsule_New(<void *>&self._bitgen, name, NULL)
- if not isinstance(seed, ISeedSequence):
- seed = SeedSequence(seed)
- self._seed_seq = seed
-
- # Pickling support:
- def __getstate__(self):
- return self.state
-
- def __setstate__(self, state):
- self.state = state
-
- def __reduce__(self):
- from ._pickle import __bit_generator_ctor
- return __bit_generator_ctor, (self.state['bit_generator'],), self.state
-
- @property
- def state(self):
- """
- Get or set the PRNG state
-
- The base BitGenerator.state must be overridden by a subclass
-
- Returns
- -------
- state : dict
- Dictionary containing the information required to describe the
- state of the PRNG
- """
- raise NotImplementedError('Not implemented in base BitGenerator')
-
- @state.setter
- def state(self, value):
- raise NotImplementedError('Not implemented in base BitGenerator')
-
- def random_raw(self, size=None, output=True):
- """
- random_raw(self, size=None)
-
- Return randoms as generated by the underlying BitGenerator
-
- Parameters
- ----------
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- output : bool, optional
- Output values. Used for performance testing since the generated
- values are not returned.
-
- Returns
- -------
- out : uint or ndarray
- Drawn samples.
-
- Notes
- -----
- This method directly exposes the the raw underlying pseudo-random
- number generator. All values are returned as unsigned 64-bit
- values irrespective of the number of bits produced by the PRNG.
-
- See the class docstring for the number of bits returned.
- """
- return random_raw(&self._bitgen, self.lock, size, output)
-
- def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- '''Used in tests'''
- return benchmark(&self._bitgen, self.lock, cnt, method)
-
- @property
- def ctypes(self):
- """
- ctypes interface
-
- Returns
- -------
- interface : namedtuple
- Named tuple containing ctypes wrapper
-
- * state_address - Memory address of the state struct
- * state - pointer to the state struct
- * next_uint64 - function pointer to produce 64 bit integers
- * next_uint32 - function pointer to produce 32 bit integers
- * next_double - function pointer to produce doubles
- * bitgen - pointer to the bit generator struct
- """
- if self._ctypes is None:
- self._ctypes = prepare_ctypes(&self._bitgen)
-
- return self._ctypes
-
- @property
- def cffi(self):
- """
- CFFI interface
-
- Returns
- -------
- interface : namedtuple
- Named tuple containing CFFI wrapper
-
- * state_address - Memory address of the state struct
- * state - pointer to the state struct
- * next_uint64 - function pointer to produce 64 bit integers
- * next_uint32 - function pointer to produce 32 bit integers
- * next_double - function pointer to produce doubles
- * bitgen - pointer to the bit generator struct
- """
- if self._cffi is None:
- self._cffi = prepare_cffi(&self._bitgen)
- return self._cffi
+++ /dev/null
-from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
- int8_t, int16_t, int32_t, int64_t, intptr_t)
-import numpy as np
-cimport numpy as np
-ctypedef np.npy_bool bool_t
-
-from .common cimport bitgen_t
-
-cdef inline uint64_t _gen_mask(uint64_t max_val) nogil:
- """Mask generator for use in bounded random numbers"""
- # Smallest bit mask >= max
- cdef uint64_t mask = max_val
- mask |= mask >> 1
- mask |= mask >> 2
- mask |= mask >> 4
- mask |= mask >> 8
- mask |= mask >> 16
- mask |= mask >> 32
- return mask
-{{
-py:
-inttypes = ('uint64','uint32','uint16','uint8','bool','int64','int32','int16','int8')
-}}
-{{for inttype in inttypes}}
-cdef object _rand_{{inttype}}(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
-{{endfor}}
+++ /dev/null
-#!python
-#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True
-
-import numpy as np
-cimport numpy as np
-
-from .distributions cimport *
-
-__all__ = []
-
-np.import_array()
-
-_integers_types = {'bool': (0, 2),
- 'int8': (-2**7, 2**7),
- 'int16': (-2**15, 2**15),
- 'int32': (-2**31, 2**31),
- 'int64': (-2**63, 2**63),
- 'uint8': (0, 2**8),
- 'uint16': (0, 2**16),
- 'uint32': (0, 2**32),
- 'uint64': (0, 2**64)}
-{{
-py:
-type_info = (('uint32', 'uint32', 'uint64', 'NPY_UINT64', 0, 0, 0, '0X100000000ULL'),
- ('uint16', 'uint16', 'uint32', 'NPY_UINT32', 1, 16, 0, '0X10000UL'),
- ('uint8', 'uint8', 'uint16', 'NPY_UINT16', 3, 8, 0, '0X100UL'),
- ('bool','bool', 'uint8', 'NPY_UINT8', 31, 1, 0, '0x2UL'),
- ('int32', 'uint32', 'uint64', 'NPY_INT64', 0, 0, '-0x80000000LL', '0x80000000LL'),
- ('int16', 'uint16', 'uint32', 'NPY_INT32', 1, 16, '-0x8000LL', '0x8000LL' ),
- ('int8', 'uint8', 'uint16', 'NPY_INT16', 3, 8, '-0x80LL', '0x80LL' ),
-)}}
-{{for nptype, utype, nptype_up, npctype, remaining, bitshift, lb, ub in type_info}}
-{{ py: otype = nptype + '_' if nptype == 'bool' else nptype }}
-cdef object _rand_{{nptype}}_broadcast(np.ndarray low, np.ndarray high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- Array path for smaller integer types
-
- This path is simpler since the high value in the open interval [low, high)
- must be in-range for the next larger type, {{nptype_up}}. Here we case to
- this type for checking and the recast to {{nptype}} when producing the
- random integers.
- """
- cdef {{utype}}_t rng, last_rng, off, val, mask, out_val, is_open
- cdef uint32_t buf
- cdef {{utype}}_t *out_data
- cdef {{nptype_up}}_t low_v, high_v
- cdef np.ndarray low_arr, high_arr, out_arr
- cdef np.npy_intp i, cnt
- cdef np.broadcast it
- cdef int buf_rem = 0
-
- # Array path
- is_open = not closed
- low_arr = <np.ndarray>low
- high_arr = <np.ndarray>high
- if np.any(np.less(low_arr, {{lb}})):
- raise ValueError('low is out of bounds for {{nptype}}')
- if closed:
- high_comp = np.greater_equal
- low_high_comp = np.greater
- else:
- high_comp = np.greater
- low_high_comp = np.greater_equal
-
- if np.any(high_comp(high_arr, {{ub}})):
- raise ValueError('high is out of bounds for {{nptype}}')
- if np.any(low_high_comp(low_arr, high_arr)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
- high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
-
- if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.{{otype}})
- else:
- it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.{{otype}})
-
- it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
- out_data = <{{utype}}_t *>np.PyArray_DATA(out_arr)
- cnt = np.PyArray_SIZE(out_arr)
- mask = last_rng = 0
- with lock, nogil:
- for i in range(cnt):
- low_v = (<{{nptype_up}}_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
- high_v = (<{{nptype_up}}_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- # Subtract 1 since generator produces values on the closed int [off, off+rng]
- rng = <{{utype}}_t>((high_v - is_open) - low_v)
- off = <{{utype}}_t>(<{{nptype_up}}_t>low_v)
-
- if rng != last_rng:
- # Smallest bit mask >= max
- mask = <{{utype}}_t>_gen_mask(rng)
-
- out_data[i] = random_buffered_bounded_{{utype}}(state, off, rng, mask, use_masked, &buf_rem, &buf)
-
- np.PyArray_MultiIter_NEXT(it)
- return out_arr
-{{endfor}}
-{{
-py:
-big_type_info = (('uint64', 'uint64', 'NPY_UINT64', '0x0ULL', '0xFFFFFFFFFFFFFFFFULL'),
- ('int64', 'uint64', 'NPY_INT64', '-0x8000000000000000LL', '0x7FFFFFFFFFFFFFFFLL' )
-)}}
-{{for nptype, utype, npctype, lb, ub in big_type_info}}
-{{ py: otype = nptype}}
-cdef object _rand_{{nptype}}_broadcast(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- Array path for 64-bit integer types
-
- Requires special treatment since the high value can be out-of-range for
- the largest (64 bit) integer type since the generator is specified on the
- interval [low,high).
-
- The internal generator does not have this issue since it generates from
- the closes interval [low, high-1] and high-1 is always in range for the
- 64 bit integer type.
- """
-
- cdef np.ndarray low_arr, high_arr, out_arr, highm1_arr
- cdef np.npy_intp i, cnt, n
- cdef np.broadcast it
- cdef object closed_upper
- cdef uint64_t *out_data
- cdef {{nptype}}_t *highm1_data
- cdef {{nptype}}_t low_v, high_v
- cdef uint64_t rng, last_rng, val, mask, off, out_val
-
- low_arr = <np.ndarray>low
- high_arr = <np.ndarray>high
-
- if np.any(np.less(low_arr, {{lb}})):
- raise ValueError('low is out of bounds for {{nptype}}')
- dt = high_arr.dtype
- if closed or np.issubdtype(dt, np.integer):
- # Avoid object dtype path if already an integer
- high_lower_comp = np.less if closed else np.less_equal
- if np.any(high_lower_comp(high_arr, {{lb}})):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
- high_m1 = high_arr if closed else high_arr - dt.type(1)
- if np.any(np.greater(high_m1, {{ub}})):
- raise ValueError('high is out of bounds for {{nptype}}')
- highm1_arr = <np.ndarray>np.PyArray_FROM_OTF(high_m1, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
- else:
- # If input is object or a floating type
- highm1_arr = <np.ndarray>np.empty_like(high_arr, dtype=np.{{nptype}})
- highm1_data = <{{nptype}}_t *>np.PyArray_DATA(highm1_arr)
- cnt = np.PyArray_SIZE(high_arr)
- flat = high_arr.flat
- for i in range(cnt):
- # Subtract 1 since generator produces values on the closed int [off, off+rng]
- closed_upper = int(flat[i]) - 1
- if closed_upper > {{ub}}:
- raise ValueError('high is out of bounds for {{nptype}}')
- if closed_upper < {{lb}}:
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
- highm1_data[i] = <{{nptype}}_t>closed_upper
-
- if np.any(np.greater(low_arr, highm1_arr)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- high_arr = highm1_arr
- low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
-
- if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.{{nptype}})
- else:
- it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.{{nptype}})
-
- it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
- out_data = <uint64_t *>np.PyArray_DATA(out_arr)
- n = np.PyArray_SIZE(out_arr)
- mask = last_rng = 0
- with lock, nogil:
- for i in range(n):
- low_v = (<{{nptype}}_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
- high_v = (<{{nptype}}_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- # Generator produces values on the closed int [off, off+rng], -1 subtracted above
- rng = <{{utype}}_t>(high_v - low_v)
- off = <{{utype}}_t>(<{{nptype}}_t>low_v)
-
- if rng != last_rng:
- mask = _gen_mask(rng)
- out_data[i] = random_bounded_uint64(state, off, rng, mask, use_masked)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return out_arr
-{{endfor}}
-{{
-py:
-type_info = (('uint64', 'uint64', '0x0ULL', '0xFFFFFFFFFFFFFFFFULL'),
- ('uint32', 'uint32', '0x0UL', '0XFFFFFFFFUL'),
- ('uint16', 'uint16', '0x0UL', '0XFFFFUL'),
- ('uint8', 'uint8', '0x0UL', '0XFFUL'),
- ('bool', 'bool', '0x0UL', '0x1UL'),
- ('int64', 'uint64', '-0x8000000000000000LL', '0x7FFFFFFFFFFFFFFFL'),
- ('int32', 'uint32', '-0x80000000L', '0x7FFFFFFFL'),
- ('int16', 'uint16', '-0x8000L', '0x7FFFL' ),
- ('int8', 'uint8', '-0x80L', '0x7FL' )
-)}}
-{{for nptype, utype, lb, ub in type_info}}
-{{ py: otype = nptype + '_' if nptype == 'bool' else nptype }}
-cdef object _rand_{{nptype}}(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- _rand_{{nptype}}(low, high, size, use_masked, *state, lock)
-
- Return random np.{{nptype}} integers from `low` (inclusive) to `high` (exclusive).
-
- Return random integers from the "discrete uniform" distribution in the
- interval [`low`, `high`). If `high` is None (the default),
- then results are from [0, `low`). On entry the arguments are presumed
- to have been validated for size and order for the np.{{nptype}} type.
-
- Parameters
- ----------
- low : int or array-like
- Lowest (signed) integer to be drawn from the distribution (unless
- ``high=None``, in which case this parameter is the *highest* such
- integer).
- high : int or array-like
- If provided, one above the largest (signed) integer to be drawn from the
- distribution (see above for behavior if ``high=None``).
- size : int or tuple of ints
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- use_masked : bool
- If True then rejection sampling with a range mask is used else Lemire's algorithm is used.
- closed : bool
- If True then sample from [low, high]. If False, sample [low, high)
- state : bit generator
- Bit generator state to use in the core random number generators
- lock : threading.Lock
- Lock to prevent multiple using a single generator simultaneously
-
- Returns
- -------
- out : python scalar or ndarray of np.{{nptype}}
- `size`-shaped array of random integers from the appropriate
- distribution, or a single such random int if `size` not provided.
-
- Notes
- -----
- The internal integer generator produces values from the closed
- interval [low, high-(not closed)]. This requires some care since
- high can be out-of-range for {{utype}}. The scalar path leaves
- integers as Python integers until the 1 has been subtracted to
- avoid needing to cast to a larger type.
- """
- cdef np.ndarray out_arr, low_arr, high_arr
- cdef {{utype}}_t rng, off, out_val
- cdef {{utype}}_t *out_data
- cdef np.npy_intp i, n, cnt
-
- if size is not None:
- if (np.prod(size) == 0):
- return np.empty(size, dtype=np.{{nptype}})
-
- low_arr = <np.ndarray>np.array(low, copy=False)
- high_arr = <np.ndarray>np.array(high, copy=False)
- low_ndim = np.PyArray_NDIM(low_arr)
- high_ndim = np.PyArray_NDIM(high_arr)
- if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
- (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
- low = int(low_arr)
- high = int(high_arr)
- # Subtract 1 since internal generator produces on closed interval [low, high]
- if not closed:
- high -= 1
-
- if low < {{lb}}:
- raise ValueError("low is out of bounds for {{nptype}}")
- if high > {{ub}}:
- raise ValueError("high is out of bounds for {{nptype}}")
- if low > high: # -1 already subtracted, closed interval
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- rng = <{{utype}}_t>(high - low)
- off = <{{utype}}_t>(<{{nptype}}_t>low)
- if size is None:
- with lock:
- random_bounded_{{utype}}_fill(state, off, rng, 1, use_masked, &out_val)
- return np.{{otype}}(<{{nptype}}_t>out_val)
- else:
- out_arr = <np.ndarray>np.empty(size, np.{{nptype}})
- cnt = np.PyArray_SIZE(out_arr)
- out_data = <{{utype}}_t *>np.PyArray_DATA(out_arr)
- with lock, nogil:
- random_bounded_{{utype}}_fill(state, off, rng, cnt, use_masked, out_data)
- return out_arr
- return _rand_{{nptype}}_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock)
-{{endfor}}
+++ /dev/null
-#cython: language_level=3
-
-from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
- int8_t, int16_t, int32_t, int64_t, intptr_t,
- uintptr_t)
-from libc.math cimport sqrt
-
-cdef extern from "src/bitgen.h":
- struct bitgen:
- void *state
- uint64_t (*next_uint64)(void *st) nogil
- uint32_t (*next_uint32)(void *st) nogil
- double (*next_double)(void *st) nogil
- uint64_t (*next_raw)(void *st) nogil
-
- ctypedef bitgen bitgen_t
-
-import numpy as np
-cimport numpy as np
-
-cdef double POISSON_LAM_MAX
-cdef double LEGACY_POISSON_LAM_MAX
-cdef uint64_t MAXSIZE
-
-cdef enum ConstraintType:
- CONS_NONE
- CONS_NON_NEGATIVE
- CONS_POSITIVE
- CONS_POSITIVE_NOT_NAN
- CONS_BOUNDED_0_1
- CONS_BOUNDED_0_1_NOTNAN
- CONS_BOUNDED_GT_0_1
- CONS_GT_1
- CONS_GTE_1
- CONS_POISSON
- LEGACY_CONS_POISSON
-
-ctypedef ConstraintType constraint_type
-
-cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method)
-cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output)
-cdef object prepare_cffi(bitgen_t *bitgen)
-cdef object prepare_ctypes(bitgen_t *bitgen)
-cdef int check_constraint(double val, object name, constraint_type cons) except -1
-cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1
-
-cdef extern from "src/aligned_malloc/aligned_malloc.h":
- cdef void *PyArray_realloc_aligned(void *p, size_t n)
- cdef void *PyArray_malloc_aligned(size_t n)
- cdef void *PyArray_calloc_aligned(size_t n, size_t s)
- cdef void PyArray_free_aligned(void *p)
-
-ctypedef double (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil
-ctypedef double (*random_double_0)(void *state) nogil
-ctypedef double (*random_double_1)(void *state, double a) nogil
-ctypedef double (*random_double_2)(void *state, double a, double b) nogil
-ctypedef double (*random_double_3)(void *state, double a, double b, double c) nogil
-
-ctypedef float (*random_float_0)(bitgen_t *state) nogil
-ctypedef float (*random_float_1)(bitgen_t *state, float a) nogil
-
-ctypedef int64_t (*random_uint_0)(void *state) nogil
-ctypedef int64_t (*random_uint_d)(void *state, double a) nogil
-ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) nogil
-ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) nogil
-ctypedef int64_t (*random_uint_i)(void *state, int64_t a) nogil
-ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) nogil
-
-ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) nogil
-ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) nogil
-
-ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) nogil
-ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) nogil
-
-cdef double kahan_sum(double *darr, np.npy_intp n)
-
-cdef inline double uint64_to_double(uint64_t rnd) nogil:
- return (rnd >> 11) * (1.0 / 9007199254740992.0)
-
-cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out)
-
-cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out)
-
-cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out)
-
-cdef object wrap_int(object val, object bits)
-
-cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size)
-
-cdef object cont(void *func, void *state, object size, object lock, int narg,
- object a, object a_name, constraint_type a_constraint,
- object b, object b_name, constraint_type b_constraint,
- object c, object c_name, constraint_type c_constraint,
- object out)
-
-cdef object disc(void *func, void *state, object size, object lock,
- int narg_double, int narg_int64,
- object a, object a_name, constraint_type a_constraint,
- object b, object b_name, constraint_type b_constraint,
- object c, object c_name, constraint_type c_constraint)
-
-cdef object cont_f(void *func, bitgen_t *state, object size, object lock,
- object a, object a_name, constraint_type a_constraint,
- object out)
-
-cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint,
- np.ndarray b_arr, object b_name, constraint_type b_constraint,
- np.ndarray c_arr, object c_name, constraint_type c_constraint)
-
-cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint,
- np.ndarray b_arr, object b_name, constraint_type b_constraint,
- np.ndarray c_arr, object c_name, constraint_type c_constraint)
+++ /dev/null
-#!python
-#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
-from collections import namedtuple
-from cpython cimport PyFloat_AsDouble
-import sys
-import numpy as np
-cimport numpy as np
-
-from .common cimport *
-
-__all__ = ['interface']
-
-np.import_array()
-
-interface = namedtuple('interface', ['state_address', 'state', 'next_uint64',
- 'next_uint32', 'next_double',
- 'bit_generator'])
-
-cdef double LEGACY_POISSON_LAM_MAX = <double>np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10
-cdef double POISSON_LAM_MAX = <double>np.iinfo('int64').max - np.sqrt(np.iinfo('int64').max)*10
-
-cdef uint64_t MAXSIZE = <uint64_t>sys.maxsize
-
-
-cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method):
- """Benchmark command used by BitGenerator"""
- cdef Py_ssize_t i
- if method==u'uint64':
- with lock, nogil:
- for i in range(cnt):
- bitgen.next_uint64(bitgen.state)
- elif method==u'double':
- with lock, nogil:
- for i in range(cnt):
- bitgen.next_double(bitgen.state)
- else:
- raise ValueError('Unknown method')
-
-
-cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output):
- """
- random_raw(self, size=None)
-
- Return randoms as generated by the underlying PRNG
-
- Parameters
- ----------
- bitgen : BitGenerator
- Address of the bit generator struct
- lock : Threading.Lock
- Lock provided by the bit generator
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- output : bool, optional
- Output values. Used for performance testing since the generated
- values are not returned.
-
- Returns
- -------
- out : uint or ndarray
- Drawn samples.
-
- Notes
- -----
- This method directly exposes the the raw underlying pseudo-random
- number generator. All values are returned as unsigned 64-bit
- values irrespective of the number of bits produced by the PRNG.
-
- See the class docstring for the number of bits returned.
- """
- cdef np.ndarray randoms
- cdef uint64_t *randoms_data
- cdef Py_ssize_t i, n
-
- if not output:
- if size is None:
- with lock:
- bitgen.next_raw(bitgen.state)
- return None
- n = np.asarray(size).sum()
- with lock, nogil:
- for i in range(n):
- bitgen.next_raw(bitgen.state)
- return None
-
- if size is None:
- with lock:
- return bitgen.next_raw(bitgen.state)
-
- randoms = <np.ndarray>np.empty(size, np.uint64)
- randoms_data = <uint64_t*>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
-
- with lock, nogil:
- for i in range(n):
- randoms_data[i] = bitgen.next_raw(bitgen.state)
- return randoms
-
-cdef object prepare_cffi(bitgen_t *bitgen):
- """
- Bundles the interfaces to interact with a BitGenerator using cffi
-
- Parameters
- ----------
- bitgen : pointer
- A pointer to a BitGenerator instance
-
- Returns
- -------
- interface : namedtuple
- The functions required to interface with the BitGenerator using cffi
-
- * state_address - Memory address of the state struct
- * state - pointer to the state struct
- * next_uint64 - function pointer to produce 64 bit integers
- * next_uint32 - function pointer to produce 32 bit integers
- * next_double - function pointer to produce doubles
- * bit_generator - pointer to the BitGenerator struct
- """
- try:
- import cffi
- except ImportError:
- raise ImportError('cffi cannot be imported.')
-
- ffi = cffi.FFI()
- _cffi = interface(<uintptr_t>bitgen.state,
- ffi.cast('void *', <uintptr_t>bitgen.state),
- ffi.cast('uint64_t (*)(void *)', <uintptr_t>bitgen.next_uint64),
- ffi.cast('uint32_t (*)(void *)', <uintptr_t>bitgen.next_uint32),
- ffi.cast('double (*)(void *)', <uintptr_t>bitgen.next_double),
- ffi.cast('void *', <uintptr_t>bitgen))
- return _cffi
-
-cdef object prepare_ctypes(bitgen_t *bitgen):
- """
- Bundles the interfaces to interact with a BitGenerator using ctypes
-
- Parameters
- ----------
- bitgen : pointer
- A pointer to a BitGenerator instance
-
- Returns
- -------
- interface : namedtuple
- The functions required to interface with the BitGenerator using ctypes:
-
- * state_address - Memory address of the state struct
- * state - pointer to the state struct
- * next_uint64 - function pointer to produce 64 bit integers
- * next_uint32 - function pointer to produce 32 bit integers
- * next_double - function pointer to produce doubles
- * bit_generator - pointer to the BitGenerator struct
- """
- import ctypes
-
- _ctypes = interface(<uintptr_t>bitgen.state,
- ctypes.c_void_p(<uintptr_t>bitgen.state),
- ctypes.cast(<uintptr_t>bitgen.next_uint64,
- ctypes.CFUNCTYPE(ctypes.c_uint64,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>bitgen.next_uint32,
- ctypes.CFUNCTYPE(ctypes.c_uint32,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>bitgen.next_double,
- ctypes.CFUNCTYPE(ctypes.c_double,
- ctypes.c_void_p)),
- ctypes.c_void_p(<uintptr_t>bitgen))
- return _ctypes
-
-cdef double kahan_sum(double *darr, np.npy_intp n):
- cdef double c, y, t, sum
- cdef np.npy_intp i
- sum = darr[0]
- c = 0.0
- for i in range(1, n):
- y = darr[i] - c
- t = sum + y
- c = (t-sum) - y
- sum = t
- return sum
-
-
-cdef object wrap_int(object val, object bits):
- """Wraparound to place an integer into the interval [0, 2**bits)"""
- mask = ~(~int(0) << bits)
- return val & mask
-
-
-cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size):
- """Convert a large integer to an array of unsigned integers"""
- len = bits // uint_size
- value = np.asarray(value)
- if uint_size == 32:
- dtype = np.uint32
- elif uint_size == 64:
- dtype = np.uint64
- else:
- raise ValueError('Unknown uint_size')
- if value.shape == ():
- value = int(value)
- upper = int(2)**int(bits)
- if value < 0 or value >= upper:
- raise ValueError('{name} must be positive and '
- 'less than 2**{bits}.'.format(name=name, bits=bits))
-
- out = np.empty(len, dtype=dtype)
- for i in range(len):
- out[i] = value % 2**int(uint_size)
- value >>= int(uint_size)
- else:
- out = value.astype(dtype)
- if out.shape != (len,):
- raise ValueError('{name} must have {len} elements when using '
- 'array form'.format(name=name, len=len))
- return out
-
-
-cdef check_output(object out, object dtype, object size):
- if out is None:
- return
- cdef np.ndarray out_array = <np.ndarray>out
- if not (np.PyArray_CHKFLAGS(out_array, np.NPY_CARRAY) or
- np.PyArray_CHKFLAGS(out_array, np.NPY_FARRAY)):
- raise ValueError('Supplied output array is not contiguous, writable or aligned.')
- if out_array.dtype != dtype:
- raise TypeError('Supplied output array has the wrong type. '
- 'Expected {0}, got {1}'.format(np.dtype(dtype), out_array.dtype))
- if size is not None:
- try:
- tup_size = tuple(size)
- except TypeError:
- tup_size = tuple([size])
- if tup_size != out.shape:
- raise ValueError('size must match out.shape when used together')
-
-
-cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out):
- cdef random_double_fill random_func = (<random_double_fill>func)
- cdef double out_val
- cdef double *out_array_data
- cdef np.ndarray out_array
- cdef np.npy_intp i, n
-
- if size is None and out is None:
- with lock:
- random_func(state, 1, &out_val)
- return out_val
-
- if out is not None:
- check_output(out, np.float64, size)
- out_array = <np.ndarray>out
- else:
- out_array = <np.ndarray>np.empty(size, np.double)
-
- n = np.PyArray_SIZE(out_array)
- out_array_data = <double *>np.PyArray_DATA(out_array)
- with lock, nogil:
- random_func(state, n, out_array_data)
- return out_array
-
-cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out):
- cdef random_float_0 random_func = (<random_float_0>func)
- cdef float *out_array_data
- cdef np.ndarray out_array
- cdef np.npy_intp i, n
-
- if size is None and out is None:
- with lock:
- return random_func(state)
-
- if out is not None:
- check_output(out, np.float32, size)
- out_array = <np.ndarray>out
- else:
- out_array = <np.ndarray>np.empty(size, np.float32)
-
- n = np.PyArray_SIZE(out_array)
- out_array_data = <float *>np.PyArray_DATA(out_array)
- with lock, nogil:
- for i in range(n):
- out_array_data[i] = random_func(state)
- return out_array
-
-cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out):
- cdef random_double_0 random_func = (<random_double_0>func)
- cdef float *out_array_data
- cdef np.ndarray out_array
- cdef np.npy_intp i, n
-
- if size is None and out is None:
- with lock:
- return <float>random_func(state)
-
- if out is not None:
- check_output(out, np.float32, size)
- out_array = <np.ndarray>out
- else:
- out_array = <np.ndarray>np.empty(size, np.float32)
-
- n = np.PyArray_SIZE(out_array)
- out_array_data = <float *>np.PyArray_DATA(out_array)
- with lock, nogil:
- for i in range(n):
- out_array_data[i] = <float>random_func(state)
- return out_array
-
-
-cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1:
- if cons == CONS_NON_NEGATIVE:
- if np.any(np.logical_and(np.logical_not(np.isnan(val)), np.signbit(val))):
- raise ValueError(name + " < 0")
- elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN:
- if cons == CONS_POSITIVE_NOT_NAN and np.any(np.isnan(val)):
- raise ValueError(name + " must not be NaN")
- elif np.any(np.less_equal(val, 0)):
- raise ValueError(name + " <= 0")
- elif cons == CONS_BOUNDED_0_1:
- if not np.all(np.greater_equal(val, 0)) or \
- not np.all(np.less_equal(val, 1)):
- raise ValueError("{0} < 0, {0} > 1 or {0} contains NaNs".format(name))
- elif cons == CONS_BOUNDED_GT_0_1:
- if not np.all(np.greater(val, 0)) or not np.all(np.less_equal(val, 1)):
- raise ValueError("{0} <= 0, {0} > 1 or {0} contains NaNs".format(name))
- elif cons == CONS_GT_1:
- if not np.all(np.greater(val, 1)):
- raise ValueError("{0} <= 1 or {0} contains NaNs".format(name))
- elif cons == CONS_GTE_1:
- if not np.all(np.greater_equal(val, 1)):
- raise ValueError("{0} < 1 or {0} contains NaNs".format(name))
- elif cons == CONS_POISSON:
- if not np.all(np.less_equal(val, POISSON_LAM_MAX)):
- raise ValueError("{0} value too large".format(name))
- elif not np.all(np.greater_equal(val, 0.0)):
- raise ValueError("{0} < 0 or {0} contains NaNs".format(name))
- elif cons == LEGACY_CONS_POISSON:
- if not np.all(np.less_equal(val, LEGACY_POISSON_LAM_MAX)):
- raise ValueError("{0} value too large".format(name))
- elif not np.all(np.greater_equal(val, 0.0)):
- raise ValueError("{0} < 0 or {0} contains NaNs".format(name))
-
- return 0
-
-
-cdef int check_constraint(double val, object name, constraint_type cons) except -1:
- cdef bint is_nan
- if cons == CONS_NON_NEGATIVE:
- if not np.isnan(val) and np.signbit(val):
- raise ValueError(name + " < 0")
- elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN:
- if cons == CONS_POSITIVE_NOT_NAN and np.isnan(val):
- raise ValueError(name + " must not be NaN")
- elif val <= 0:
- raise ValueError(name + " <= 0")
- elif cons == CONS_BOUNDED_0_1:
- if not (val >= 0) or not (val <= 1):
- raise ValueError("{0} < 0, {0} > 1 or {0} is NaN".format(name))
- elif cons == CONS_BOUNDED_GT_0_1:
- if not val >0 or not val <= 1:
- raise ValueError("{0} <= 0, {0} > 1 or {0} contains NaNs".format(name))
- elif cons == CONS_GT_1:
- if not (val > 1):
- raise ValueError("{0} <= 1 or {0} is NaN".format(name))
- elif cons == CONS_GTE_1:
- if not (val >= 1):
- raise ValueError("{0} < 1 or {0} is NaN".format(name))
- elif cons == CONS_POISSON:
- if not (val >= 0):
- raise ValueError("{0} < 0 or {0} is NaN".format(name))
- elif not (val <= POISSON_LAM_MAX):
- raise ValueError(name + " value too large")
- elif cons == LEGACY_CONS_POISSON:
- if not (val >= 0):
- raise ValueError("{0} < 0 or {0} is NaN".format(name))
- elif not (val <= LEGACY_POISSON_LAM_MAX):
- raise ValueError(name + " value too large")
-
- return 0
-
-cdef object cont_broadcast_1(void *func, void *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint,
- object out):
-
- cdef np.ndarray randoms
- cdef double a_val
- cdef double *randoms_data
- cdef np.broadcast it
- cdef random_double_1 f = (<random_double_1>func)
- cdef np.npy_intp i, n
-
- if a_constraint != CONS_NONE:
- check_array_constraint(a_arr, a_name, a_constraint)
-
- if size is not None and out is None:
- randoms = <np.ndarray>np.empty(size, np.double)
- elif out is None:
- randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr), np.PyArray_DIMS(a_arr), np.NPY_DOUBLE)
- else:
- randoms = <np.ndarray>out
-
- randoms_data = <double *>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
- it = np.PyArray_MultiIterNew2(randoms, a_arr)
-
- with lock, nogil:
- for i in range(n):
- a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
- randoms_data[i] = f(state, a_val)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return randoms
-
-cdef object cont_broadcast_2(void *func, void *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint,
- np.ndarray b_arr, object b_name, constraint_type b_constraint):
- cdef np.ndarray randoms
- cdef double a_val, b_val
- cdef double *randoms_data
- cdef np.broadcast it
- cdef random_double_2 f = (<random_double_2>func)
- cdef np.npy_intp i, n
-
- if a_constraint != CONS_NONE:
- check_array_constraint(a_arr, a_name, a_constraint)
-
- if b_constraint != CONS_NONE:
- check_array_constraint(b_arr, b_name, b_constraint)
-
- if size is not None:
- randoms = <np.ndarray>np.empty(size, np.double)
- else:
- it = np.PyArray_MultiIterNew2(a_arr, b_arr)
- randoms = <np.ndarray>np.empty(it.shape, np.double)
- # randoms = np.PyArray_SimpleNew(it.nd, np.PyArray_DIMS(it), np.NPY_DOUBLE)
-
- randoms_data = <double *>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
-
- it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr)
- with lock, nogil:
- for i in range(n):
- a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
- b_val = (<double*>np.PyArray_MultiIter_DATA(it, 2))[0]
- randoms_data[i] = f(state, a_val, b_val)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return randoms
-
-cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint,
- np.ndarray b_arr, object b_name, constraint_type b_constraint,
- np.ndarray c_arr, object c_name, constraint_type c_constraint):
- cdef np.ndarray randoms
- cdef double a_val, b_val, c_val
- cdef double *randoms_data
- cdef np.broadcast it
- cdef random_double_3 f = (<random_double_3>func)
- cdef np.npy_intp i, n
-
- if a_constraint != CONS_NONE:
- check_array_constraint(a_arr, a_name, a_constraint)
-
- if b_constraint != CONS_NONE:
- check_array_constraint(b_arr, b_name, b_constraint)
-
- if c_constraint != CONS_NONE:
- check_array_constraint(c_arr, c_name, c_constraint)
-
- if size is not None:
- randoms = <np.ndarray>np.empty(size, np.double)
- else:
- it = np.PyArray_MultiIterNew3(a_arr, b_arr, c_arr)
- # randoms = np.PyArray_SimpleNew(it.nd, np.PyArray_DIMS(it), np.NPY_DOUBLE)
- randoms = <np.ndarray>np.empty(it.shape, np.double)
-
- randoms_data = <double *>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
-
- it = np.PyArray_MultiIterNew4(randoms, a_arr, b_arr, c_arr)
- with lock, nogil:
- for i in range(n):
- a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
- b_val = (<double*>np.PyArray_MultiIter_DATA(it, 2))[0]
- c_val = (<double*>np.PyArray_MultiIter_DATA(it, 3))[0]
- randoms_data[i] = f(state, a_val, b_val, c_val)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return randoms
-
-cdef object cont(void *func, void *state, object size, object lock, int narg,
- object a, object a_name, constraint_type a_constraint,
- object b, object b_name, constraint_type b_constraint,
- object c, object c_name, constraint_type c_constraint,
- object out):
-
- cdef np.ndarray a_arr, b_arr, c_arr
- cdef double _a = 0.0, _b = 0.0, _c = 0.0
- cdef bint is_scalar = True
- check_output(out, np.float64, size)
- if narg > 0:
- a_arr = <np.ndarray>np.PyArray_FROM_OTF(a, np.NPY_DOUBLE, np.NPY_ALIGNED)
- is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0
- if narg > 1:
- b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_DOUBLE, np.NPY_ALIGNED)
- is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
- if narg == 3:
- c_arr = <np.ndarray>np.PyArray_FROM_OTF(c, np.NPY_DOUBLE, np.NPY_ALIGNED)
- is_scalar = is_scalar and np.PyArray_NDIM(c_arr) == 0
-
- if not is_scalar:
- if narg == 1:
- return cont_broadcast_1(func, state, size, lock,
- a_arr, a_name, a_constraint,
- out)
- elif narg == 2:
- return cont_broadcast_2(func, state, size, lock,
- a_arr, a_name, a_constraint,
- b_arr, b_name, b_constraint)
- else:
- return cont_broadcast_3(func, state, size, lock,
- a_arr, a_name, a_constraint,
- b_arr, b_name, b_constraint,
- c_arr, c_name, c_constraint)
-
- if narg > 0:
- _a = PyFloat_AsDouble(a)
- if a_constraint != CONS_NONE and is_scalar:
- check_constraint(_a, a_name, a_constraint)
- if narg > 1:
- _b = PyFloat_AsDouble(b)
- if b_constraint != CONS_NONE:
- check_constraint(_b, b_name, b_constraint)
- if narg == 3:
- _c = PyFloat_AsDouble(c)
- if c_constraint != CONS_NONE and is_scalar:
- check_constraint(_c, c_name, c_constraint)
-
- if size is None and out is None:
- with lock:
- if narg == 0:
- return (<random_double_0>func)(state)
- elif narg == 1:
- return (<random_double_1>func)(state, _a)
- elif narg == 2:
- return (<random_double_2>func)(state, _a, _b)
- elif narg == 3:
- return (<random_double_3>func)(state, _a, _b, _c)
-
- cdef np.npy_intp i, n
- cdef np.ndarray randoms
- if out is None:
- randoms = <np.ndarray>np.empty(size)
- else:
- randoms = <np.ndarray>out
- n = np.PyArray_SIZE(randoms)
-
- cdef double *randoms_data = <double *>np.PyArray_DATA(randoms)
- cdef random_double_0 f0
- cdef random_double_1 f1
- cdef random_double_2 f2
- cdef random_double_3 f3
-
- with lock, nogil:
- if narg == 0:
- f0 = (<random_double_0>func)
- for i in range(n):
- randoms_data[i] = f0(state)
- elif narg == 1:
- f1 = (<random_double_1>func)
- for i in range(n):
- randoms_data[i] = f1(state, _a)
- elif narg == 2:
- f2 = (<random_double_2>func)
- for i in range(n):
- randoms_data[i] = f2(state, _a, _b)
- elif narg == 3:
- f3 = (<random_double_3>func)
- for i in range(n):
- randoms_data[i] = f3(state, _a, _b, _c)
-
- if out is None:
- return randoms
- else:
- return out
-
-cdef object discrete_broadcast_d(void *func, void *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint):
-
- cdef np.ndarray randoms
- cdef int64_t *randoms_data
- cdef np.broadcast it
- cdef random_uint_d f = (<random_uint_d>func)
- cdef np.npy_intp i, n
-
- if a_constraint != CONS_NONE:
- check_array_constraint(a_arr, a_name, a_constraint)
-
- if size is not None:
- randoms = np.empty(size, np.int64)
- else:
- # randoms = np.empty(np.shape(a_arr), np.double)
- randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr), np.PyArray_DIMS(a_arr), np.NPY_INT64)
-
- randoms_data = <int64_t *>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
-
- it = np.PyArray_MultiIterNew2(randoms, a_arr)
- with lock, nogil:
- for i in range(n):
- a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
- randoms_data[i] = f(state, a_val)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return randoms
-
-cdef object discrete_broadcast_dd(void *func, void *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint,
- np.ndarray b_arr, object b_name, constraint_type b_constraint):
- cdef np.ndarray randoms
- cdef int64_t *randoms_data
- cdef np.broadcast it
- cdef random_uint_dd f = (<random_uint_dd>func)
- cdef np.npy_intp i, n
-
- if a_constraint != CONS_NONE:
- check_array_constraint(a_arr, a_name, a_constraint)
- if b_constraint != CONS_NONE:
- check_array_constraint(b_arr, b_name, b_constraint)
-
- if size is not None:
- randoms = <np.ndarray>np.empty(size, np.int64)
- else:
- it = np.PyArray_MultiIterNew2(a_arr, b_arr)
- randoms = <np.ndarray>np.empty(it.shape, np.int64)
- # randoms = np.PyArray_SimpleNew(it.nd, np.PyArray_DIMS(it), np.NPY_INT64)
-
- randoms_data = <int64_t *>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
-
- it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr)
- with lock, nogil:
- for i in range(n):
- a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
- b_val = (<double*>np.PyArray_MultiIter_DATA(it, 2))[0]
- randoms_data[i] = f(state, a_val, b_val)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return randoms
-
-cdef object discrete_broadcast_di(void *func, void *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint,
- np.ndarray b_arr, object b_name, constraint_type b_constraint):
- cdef np.ndarray randoms
- cdef int64_t *randoms_data
- cdef np.broadcast it
- cdef random_uint_di f = (<random_uint_di>func)
- cdef np.npy_intp i, n
-
- if a_constraint != CONS_NONE:
- check_array_constraint(a_arr, a_name, a_constraint)
-
- if b_constraint != CONS_NONE:
- check_array_constraint(b_arr, b_name, b_constraint)
-
- if size is not None:
- randoms = <np.ndarray>np.empty(size, np.int64)
- else:
- it = np.PyArray_MultiIterNew2(a_arr, b_arr)
- randoms = <np.ndarray>np.empty(it.shape, np.int64)
-
- randoms_data = <int64_t *>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
-
- it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr)
- with lock, nogil:
- for i in range(n):
- a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
- b_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 2))[0]
- (<int64_t*>np.PyArray_MultiIter_DATA(it, 0))[0] = f(state, a_val, b_val)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return randoms
-
-cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint,
- np.ndarray b_arr, object b_name, constraint_type b_constraint,
- np.ndarray c_arr, object c_name, constraint_type c_constraint):
- cdef np.ndarray randoms
- cdef int64_t *randoms_data
- cdef np.broadcast it
- cdef random_uint_iii f = (<random_uint_iii>func)
- cdef np.npy_intp i, n
-
- if a_constraint != CONS_NONE:
- check_array_constraint(a_arr, a_name, a_constraint)
-
- if b_constraint != CONS_NONE:
- check_array_constraint(b_arr, b_name, b_constraint)
-
- if c_constraint != CONS_NONE:
- check_array_constraint(c_arr, c_name, c_constraint)
-
- if size is not None:
- randoms = <np.ndarray>np.empty(size, np.int64)
- else:
- it = np.PyArray_MultiIterNew3(a_arr, b_arr, c_arr)
- randoms = <np.ndarray>np.empty(it.shape, np.int64)
-
- randoms_data = <int64_t *>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
-
- it = np.PyArray_MultiIterNew4(randoms, a_arr, b_arr, c_arr)
- with lock, nogil:
- for i in range(n):
- a_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- b_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 2))[0]
- c_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 3))[0]
- randoms_data[i] = f(state, a_val, b_val, c_val)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return randoms
-
-cdef object discrete_broadcast_i(void *func, void *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint):
- cdef np.ndarray randoms
- cdef int64_t *randoms_data
- cdef np.broadcast it
- cdef random_uint_i f = (<random_uint_i>func)
- cdef np.npy_intp i, n
-
- if a_constraint != CONS_NONE:
- check_array_constraint(a_arr, a_name, a_constraint)
-
- if size is not None:
- randoms = <np.ndarray>np.empty(size, np.int64)
- else:
- randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr), np.PyArray_DIMS(a_arr), np.NPY_INT64)
-
- randoms_data = <int64_t *>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
-
- it = np.PyArray_MultiIterNew2(randoms, a_arr)
- with lock, nogil:
- for i in range(n):
- a_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- randoms_data[i] = f(state, a_val)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return randoms
-
-# Needs double <vec>, double-double <vec>, double-int64_t<vec>, int64_t <vec>, int64_t-int64_t-int64_t
-cdef object disc(void *func, void *state, object size, object lock,
- int narg_double, int narg_int64,
- object a, object a_name, constraint_type a_constraint,
- object b, object b_name, constraint_type b_constraint,
- object c, object c_name, constraint_type c_constraint):
-
- cdef double _da = 0, _db = 0
- cdef int64_t _ia = 0, _ib = 0, _ic = 0
- cdef bint is_scalar = True
- if narg_double > 0:
- a_arr = <np.ndarray>np.PyArray_FROM_OTF(a, np.NPY_DOUBLE, np.NPY_ALIGNED)
- is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0
- if narg_double > 1:
- b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_DOUBLE, np.NPY_ALIGNED)
- is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
- elif narg_int64 == 1:
- b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_INT64, np.NPY_ALIGNED)
- is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
- else:
- if narg_int64 > 0:
- a_arr = <np.ndarray>np.PyArray_FROM_OTF(a, np.NPY_INT64, np.NPY_ALIGNED)
- is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0
- if narg_int64 > 1:
- b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_INT64, np.NPY_ALIGNED)
- is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
- if narg_int64 > 2:
- c_arr = <np.ndarray>np.PyArray_FROM_OTF(c, np.NPY_INT64, np.NPY_ALIGNED)
- is_scalar = is_scalar and np.PyArray_NDIM(c_arr) == 0
-
- if not is_scalar:
- if narg_int64 == 0:
- if narg_double == 1:
- return discrete_broadcast_d(func, state, size, lock,
- a_arr, a_name, a_constraint)
- elif narg_double == 2:
- return discrete_broadcast_dd(func, state, size, lock,
- a_arr, a_name, a_constraint,
- b_arr, b_name, b_constraint)
- elif narg_int64 == 1:
- if narg_double == 0:
- return discrete_broadcast_i(func, state, size, lock,
- a_arr, a_name, a_constraint)
- elif narg_double == 1:
- return discrete_broadcast_di(func, state, size, lock,
- a_arr, a_name, a_constraint,
- b_arr, b_name, b_constraint)
- else:
- raise NotImplementedError("No vector path available")
-
- if narg_double > 0:
- _da = PyFloat_AsDouble(a)
- if a_constraint != CONS_NONE and is_scalar:
- check_constraint(_da, a_name, a_constraint)
-
- if narg_double > 1:
- _db = PyFloat_AsDouble(b)
- if b_constraint != CONS_NONE and is_scalar:
- check_constraint(_db, b_name, b_constraint)
- elif narg_int64 == 1:
- _ib = <int64_t>b
- if b_constraint != CONS_NONE and is_scalar:
- check_constraint(<double>_ib, b_name, b_constraint)
- else:
- if narg_int64 > 0:
- _ia = <int64_t>a
- if a_constraint != CONS_NONE and is_scalar:
- check_constraint(<double>_ia, a_name, a_constraint)
- if narg_int64 > 1:
- _ib = <int64_t>b
- if b_constraint != CONS_NONE and is_scalar:
- check_constraint(<double>_ib, b_name, b_constraint)
- if narg_int64 > 2:
- _ic = <int64_t>c
- if c_constraint != CONS_NONE and is_scalar:
- check_constraint(<double>_ic, c_name, c_constraint)
-
- if size is None:
- with lock:
- if narg_int64 == 0:
- if narg_double == 0:
- return (<random_uint_0>func)(state)
- elif narg_double == 1:
- return (<random_uint_d>func)(state, _da)
- elif narg_double == 2:
- return (<random_uint_dd>func)(state, _da, _db)
- elif narg_int64 == 1:
- if narg_double == 0:
- return (<random_uint_i>func)(state, _ia)
- if narg_double == 1:
- return (<random_uint_di>func)(state, _da, _ib)
- else:
- return (<random_uint_iii>func)(state, _ia, _ib, _ic)
-
- cdef np.npy_intp i, n
- cdef np.ndarray randoms = <np.ndarray>np.empty(size, np.int64)
- cdef np.int64_t *randoms_data
- cdef random_uint_0 f0
- cdef random_uint_d fd
- cdef random_uint_dd fdd
- cdef random_uint_di fdi
- cdef random_uint_i fi
- cdef random_uint_iii fiii
-
- n = np.PyArray_SIZE(randoms)
- randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
-
- with lock, nogil:
- if narg_int64 == 0:
- if narg_double == 0:
- f0 = (<random_uint_0>func)
- for i in range(n):
- randoms_data[i] = f0(state)
- elif narg_double == 1:
- fd = (<random_uint_d>func)
- for i in range(n):
- randoms_data[i] = fd(state, _da)
- elif narg_double == 2:
- fdd = (<random_uint_dd>func)
- for i in range(n):
- randoms_data[i] = fdd(state, _da, _db)
- elif narg_int64 == 1:
- if narg_double == 0:
- fi = (<random_uint_i>func)
- for i in range(n):
- randoms_data[i] = fi(state, _ia)
- if narg_double == 1:
- fdi = (<random_uint_di>func)
- for i in range(n):
- randoms_data[i] = fdi(state, _da, _ib)
- else:
- fiii = (<random_uint_iii>func)
- for i in range(n):
- randoms_data[i] = fiii(state, _ia, _ib, _ic)
-
- return randoms
-
-
-cdef object cont_broadcast_1_f(void *func, bitgen_t *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint,
- object out):
-
- cdef np.ndarray randoms
- cdef float a_val
- cdef float *randoms_data
- cdef np.broadcast it
- cdef random_float_1 f = (<random_float_1>func)
- cdef np.npy_intp i, n
-
- if a_constraint != CONS_NONE:
- check_array_constraint(a_arr, a_name, a_constraint)
-
- if size is not None and out is None:
- randoms = <np.ndarray>np.empty(size, np.float32)
- elif out is None:
- randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr),
- np.PyArray_DIMS(a_arr),
- np.NPY_FLOAT32)
- else:
- randoms = <np.ndarray>out
-
- randoms_data = <float *>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
- it = np.PyArray_MultiIterNew2(randoms, a_arr)
-
- with lock, nogil:
- for i in range(n):
- a_val = (<float*>np.PyArray_MultiIter_DATA(it, 1))[0]
- randoms_data[i] = f(state, a_val)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return randoms
-
-cdef object cont_f(void *func, bitgen_t *state, object size, object lock,
- object a, object a_name, constraint_type a_constraint,
- object out):
-
- cdef np.ndarray a_arr, b_arr, c_arr
- cdef float _a
- cdef bint is_scalar = True
- cdef int requirements = np.NPY_ALIGNED | np.NPY_FORCECAST
- check_output(out, np.float32, size)
- a_arr = <np.ndarray>np.PyArray_FROMANY(a, np.NPY_FLOAT32, 0, 0, requirements)
- is_scalar = np.PyArray_NDIM(a_arr) == 0
-
- if not is_scalar:
- return cont_broadcast_1_f(func, state, size, lock, a_arr, a_name, a_constraint, out)
-
- _a = <float>PyFloat_AsDouble(a)
- if a_constraint != CONS_NONE:
- check_constraint(_a, a_name, a_constraint)
-
- if size is None and out is None:
- with lock:
- return (<random_float_1>func)(state, _a)
-
- cdef np.npy_intp i, n
- cdef np.ndarray randoms
- if out is None:
- randoms = <np.ndarray>np.empty(size, np.float32)
- else:
- randoms = <np.ndarray>out
- n = np.PyArray_SIZE(randoms)
-
- cdef float *randoms_data = <float *>np.PyArray_DATA(randoms)
- cdef random_float_1 f1 = <random_float_1>func
-
- with lock, nogil:
- for i in range(n):
- randoms_data[i] = f1(state, _a)
-
- if out is None:
- return randoms
- else:
- return out
+++ /dev/null
-#cython: language_level=3
-
-from .common cimport (uint8_t, uint16_t, uint32_t, uint64_t,
- int32_t, int64_t, bitgen_t)
-import numpy as np
-cimport numpy as np
-
-cdef extern from "src/distributions/distributions.h":
-
- struct s_binomial_t:
- int has_binomial
- double psave
- int64_t nsave
- double r
- double q
- double fm
- int64_t m
- double p1
- double xm
- double xl
- double xr
- double c
- double laml
- double lamr
- double p2
- double p3
- double p4
-
- ctypedef s_binomial_t binomial_t
-
- double random_double(bitgen_t *bitgen_state) nogil
- void random_double_fill(bitgen_t* bitgen_state, np.npy_intp cnt, double *out) nogil
- double random_standard_exponential(bitgen_t *bitgen_state) nogil
- void random_standard_exponential_fill(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil
- double random_standard_exponential_zig(bitgen_t *bitgen_state) nogil
- void random_standard_exponential_zig_fill(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil
- double random_gauss_zig(bitgen_t* bitgen_state) nogil
- void random_gauss_zig_fill(bitgen_t *bitgen_state, np.npy_intp count, double *out) nogil
- double random_standard_gamma_zig(bitgen_t *bitgen_state, double shape) nogil
-
- float random_float(bitgen_t *bitgen_state) nogil
- float random_standard_exponential_f(bitgen_t *bitgen_state) nogil
- float random_standard_exponential_zig_f(bitgen_t *bitgen_state) nogil
- float random_gauss_zig_f(bitgen_t* bitgen_state) nogil
- float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
- float random_standard_gamma_zig_f(bitgen_t *bitgen_state, float shape) nogil
-
- int64_t random_positive_int64(bitgen_t *bitgen_state) nogil
- int32_t random_positive_int32(bitgen_t *bitgen_state) nogil
- int64_t random_positive_int(bitgen_t *bitgen_state) nogil
- uint64_t random_uint(bitgen_t *bitgen_state) nogil
-
- double random_normal_zig(bitgen_t *bitgen_state, double loc, double scale) nogil
-
- double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil
- float random_gamma_float(bitgen_t *bitgen_state, float shape, float scale) nogil
-
- double random_exponential(bitgen_t *bitgen_state, double scale) nogil
- double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil
- double random_beta(bitgen_t *bitgen_state, double a, double b) nogil
- double random_chisquare(bitgen_t *bitgen_state, double df) nogil
- double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil
- double random_standard_cauchy(bitgen_t *bitgen_state) nogil
- double random_pareto(bitgen_t *bitgen_state, double a) nogil
- double random_weibull(bitgen_t *bitgen_state, double a) nogil
- double random_power(bitgen_t *bitgen_state, double a) nogil
- double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil
- double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil
- double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil
- double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil
- double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil
- double random_standard_t(bitgen_t *bitgen_state, double df) nogil
- double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
- double nonc) nogil
- double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
- double dfden, double nonc) nogil
- double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil
- double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil
- double random_triangular(bitgen_t *bitgen_state, double left, double mode,
- double right) nogil
-
- int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil
- int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil
- int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil
- int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil
- int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil
- int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil
- int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil
- int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil
- int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad,
- int64_t sample) nogil
-
- uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil
-
- # Generate random uint64 numbers in closed interval [off, off + rng].
- uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
- uint64_t off, uint64_t rng,
- uint64_t mask, bint use_masked) nogil
-
- # Generate random uint32 numbers in closed interval [off, off + rng].
- uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
- uint32_t off, uint32_t rng,
- uint32_t mask, bint use_masked,
- int *bcnt, uint32_t *buf) nogil
- uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
- uint16_t off, uint16_t rng,
- uint16_t mask, bint use_masked,
- int *bcnt, uint32_t *buf) nogil
- uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state,
- uint8_t off, uint8_t rng,
- uint8_t mask, bint use_masked,
- int *bcnt, uint32_t *buf) nogil
- np.npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state,
- np.npy_bool off, np.npy_bool rng,
- np.npy_bool mask, bint use_masked,
- int *bcnt, uint32_t *buf) nogil
-
- void random_bounded_uint64_fill(bitgen_t *bitgen_state,
- uint64_t off, uint64_t rng, np.npy_intp cnt,
- bint use_masked,
- uint64_t *out) nogil
- void random_bounded_uint32_fill(bitgen_t *bitgen_state,
- uint32_t off, uint32_t rng, np.npy_intp cnt,
- bint use_masked,
- uint32_t *out) nogil
- void random_bounded_uint16_fill(bitgen_t *bitgen_state,
- uint16_t off, uint16_t rng, np.npy_intp cnt,
- bint use_masked,
- uint16_t *out) nogil
- void random_bounded_uint8_fill(bitgen_t *bitgen_state,
- uint8_t off, uint8_t rng, np.npy_intp cnt,
- bint use_masked,
- uint8_t *out) nogil
- void random_bounded_bool_fill(bitgen_t *bitgen_state,
- np.npy_bool off, np.npy_bool rng, np.npy_intp cnt,
- bint use_masked,
- np.npy_bool *out) nogil
-
- void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix,
- double *pix, np.npy_intp d, binomial_t *binomial) nogil
+++ /dev/null
-#!/usr/bin/env python
-#cython: language_level=3
-
-from libc.stdint cimport uint32_t
-from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
-
-import numpy as np
-cimport numpy as np
-cimport cython
-
-from numpy.random.common cimport bitgen_t
-from numpy.random import PCG64
-
-np.import_array()
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def uniform_mean(Py_ssize_t n):
- cdef Py_ssize_t i
- cdef bitgen_t *rng
- cdef const char *capsule_name = "BitGenerator"
- cdef double[::1] random_values
- cdef np.ndarray randoms
-
- x = PCG64()
- capsule = x.capsule
- if not PyCapsule_IsValid(capsule, capsule_name):
- raise ValueError("Invalid pointer to anon_func_state")
- rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
- random_values = np.empty(n)
- # Best practice is to acquire the lock whenever generating random values.
- # This prevents other threads from modifying the state. Acquiring the lock
- # is only necessary if if the GIL is also released, as in this example.
- with x.lock, nogil:
- for i in range(n):
- random_values[i] = rng.next_double(rng.state)
- randoms = np.asarray(random_values)
- return randoms.mean()
-
-
-# This function is declated nogil so it can be used without the GIL below
-cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil:
- cdef uint32_t mask, delta, val
- mask = delta = ub - lb
- mask |= mask >> 1
- mask |= mask >> 2
- mask |= mask >> 4
- mask |= mask >> 8
- mask |= mask >> 16
-
- val = rng.next_uint32(rng.state) & mask
- while val > delta:
- val = rng.next_uint32(rng.state) & mask
-
- return lb + val
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n):
- cdef Py_ssize_t i
- cdef bitgen_t *rng
- cdef uint32_t[::1] out
- cdef const char *capsule_name = "BitGenerator"
-
- x = PCG64()
- out = np.empty(n, dtype=np.uint32)
- capsule = x.capsule
-
- if not PyCapsule_IsValid(capsule, capsule_name):
- raise ValueError("Invalid pointer to anon_func_state")
- rng = <bitgen_t *>PyCapsule_GetPointer(capsule, capsule_name)
-
- with x.lock, nogil:
- for i in range(n):
- out[i] = bounded_uint(lb, ub, rng)
- return np.asarray(out)
+++ /dev/null
-#!/usr/bin/env python
-#cython: language_level=3
-"""
-This file shows how the distributions that are accessed through
-distributions.pxd can be used Cython code.
-"""
-import numpy as np
-cimport numpy as np
-cimport cython
-from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
-from numpy.random.common cimport *
-from numpy.random.distributions cimport random_gauss_zig
-from numpy.random import PCG64
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def normals_zig(Py_ssize_t n):
- cdef Py_ssize_t i
- cdef bitgen_t *rng
- cdef const char *capsule_name = "BitGenerator"
- cdef double[::1] random_values
-
- x = PCG64()
- capsule = x.capsule
- if not PyCapsule_IsValid(capsule, capsule_name):
- raise ValueError("Invalid pointer to anon_func_state")
- rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
- random_values = np.empty(n)
- # Best practice is to release GIL and acquire the lock
- with x.lock, nogil:
- for i in range(n):
- random_values[i] = random_gauss_zig(rng)
- randoms = np.asarray(random_values)
- return randoms
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def uniforms(Py_ssize_t n):
- cdef Py_ssize_t i
- cdef bitgen_t *rng
- cdef const char *capsule_name = "BitGenerator"
- cdef double[::1] random_values
-
- x = PCG64()
- capsule = x.capsule
- # Optional check that the capsule if from a BitGenerator
- if not PyCapsule_IsValid(capsule, capsule_name):
- raise ValueError("Invalid pointer to anon_func_state")
- # Cast the pointer
- rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
- random_values = np.empty(n)
- with x.lock, nogil:
- for i in range(n):
- # Call the function
- random_values[i] = rng.next_double(rng.state)
- randoms = np.asarray(random_values)
- return randoms
+++ /dev/null
-#!/usr/bin/env python3
-"""
-Build the demos
-
-Usage: python setup.py build_ext -i
-"""
-
-import numpy as np
-from distutils.core import setup
-from Cython.Build import cythonize
-from setuptools.extension import Extension
-from os.path import join
-
-extending = Extension("extending",
- sources=['extending.pyx'],
- include_dirs=[np.get_include()])
-distributions = Extension("extending_distributions",
- sources=['extending_distributions.pyx',
- join('..', '..', 'src',
- 'distributions', 'distributions.c')],
- include_dirs=[np.get_include()])
-
-extensions = [extending, distributions]
-
-setup(
- ext_modules=cythonize(extensions)
-)
+++ /dev/null
-import datetime as dt
-
-import numpy as np
-import numba as nb
-
-from numpy.random import PCG64
-
-x = PCG64()
-f = x.ctypes.next_uint32
-s = x.ctypes.state
-
-
-@nb.jit(nopython=True)
-def bounded_uint(lb, ub, state):
- mask = delta = ub - lb
- mask |= mask >> 1
- mask |= mask >> 2
- mask |= mask >> 4
- mask |= mask >> 8
- mask |= mask >> 16
-
- val = f(state) & mask
- while val > delta:
- val = f(state) & mask
-
- return lb + val
-
-
-print(bounded_uint(323, 2394691, s.value))
-
-
-@nb.jit(nopython=True)
-def bounded_uints(lb, ub, n, state):
- out = np.empty(n, dtype=np.uint32)
- for i in range(n):
- out[i] = bounded_uint(lb, ub, state)
-
-
-bounded_uints(323, 2394691, 10000000, s.value)
-
-g = x.cffi.next_double
-cffi_state = x.cffi.state
-state_addr = x.cffi.state_address
-
-
-def normals(n, state):
- out = np.empty(n)
- for i in range((n + 1) // 2):
- x1 = 2.0 * g(state) - 1.0
- x2 = 2.0 * g(state) - 1.0
- r2 = x1 * x1 + x2 * x2
- while r2 >= 1.0 or r2 == 0.0:
- x1 = 2.0 * g(state) - 1.0
- x2 = 2.0 * g(state) - 1.0
- r2 = x1 * x1 + x2 * x2
- f = np.sqrt(-2.0 * np.log(r2) / r2)
- out[2 * i] = f * x1
- if 2 * i + 1 < n:
- out[2 * i + 1] = f * x2
- return out
-
-
-print(normals(10, cffi_state).var())
-# Warm up
-normalsj = nb.jit(normals, nopython=True)
-normalsj(1, state_addr)
-
-start = dt.datetime.now()
-normalsj(1000000, state_addr)
-ms = 1000 * (dt.datetime.now() - start).total_seconds()
-print('1,000,000 Polar-transform (numba/PCG64) randoms in '
- '{ms:0.1f}ms'.format(ms=ms))
-
-start = dt.datetime.now()
-np.random.standard_normal(1000000)
-ms = 1000 * (dt.datetime.now() - start).total_seconds()
-print('1,000,000 Polar-transform (NumPy) randoms in {ms:0.1f}ms'.format(ms=ms))
+++ /dev/null
-r"""
-On *nix, execute in randomgen/src/distributions
-
-export PYTHON_INCLUDE=#path to Python's include folder, usually \
- ${PYTHON_HOME}/include/python${PYTHON_VERSION}m
-export NUMPY_INCLUDE=#path to numpy's include folder, usually \
- ${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include
-gcc -shared -o libdistributions.so -fPIC distributions.c \
- -I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE}
-mv libdistributions.so ../../examples/numba/
-
-On Windows
-
-rem PYTHON_HOME is setup dependent, this is an example
-set PYTHON_HOME=c:\Anaconda
-cl.exe /LD .\distributions.c -DDLL_EXPORT \
- -I%PYTHON_HOME%\lib\site-packages\numpy\core\include \
- -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python36.lib
-move distributions.dll ../../examples/numba/
-"""
-import os
-
-import numba as nb
-import numpy as np
-from cffi import FFI
-
-from numpy.random import PCG64
-
-ffi = FFI()
-if os.path.exists('./distributions.dll'):
- lib = ffi.dlopen('./distributions.dll')
-elif os.path.exists('./libdistributions.so'):
- lib = ffi.dlopen('./libdistributions.so')
-else:
- raise RuntimeError('Required DLL/so file was not found.')
-
-ffi.cdef("""
-double random_gauss_zig(void *bitgen_state);
-""")
-x = PCG64()
-xffi = x.cffi
-bit_generator = xffi.bit_generator
-
-random_gauss_zig = lib.random_gauss_zig
-
-
-def normals(n, bit_generator):
- out = np.empty(n)
- for i in range(n):
- out[i] = random_gauss_zig(bit_generator)
- return out
-
-
-normalsj = nb.jit(normals, nopython=True)
-
-# Numba requires a memory address for void *
-# Can also get address from x.ctypes.bit_generator.value
-bit_generator_address = int(ffi.cast('uintptr_t', bit_generator))
-
-norm = normalsj(1000, bit_generator_address)
-print(norm[:12])
+++ /dev/null
-#!python
-#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
-import operator
-import warnings
-
-import numpy as np
-
-from .bounded_integers import _integers_types
-from .pcg64 import PCG64
-
-from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
-from cpython cimport (Py_INCREF, PyFloat_AsDouble)
-from libc cimport string
-
-cimport cython
-cimport numpy as np
-
-from .bounded_integers cimport *
-from .common cimport *
-from .distributions cimport *
-
-
-__all__ = ['Generator', 'beta', 'binomial', 'bytes', 'chisquare', 'choice',
- 'dirichlet', 'exponential', 'f', 'gamma',
- 'geometric', 'gumbel', 'hypergeometric', 'integers', 'laplace',
- 'logistic', 'lognormal', 'logseries', 'multinomial',
- 'multivariate_normal', 'negative_binomial', 'noncentral_chisquare',
- 'noncentral_f', 'normal', 'pareto', 'permutation',
- 'poisson', 'power', 'random', 'rayleigh', 'shuffle',
- 'standard_cauchy', 'standard_exponential', 'standard_gamma',
- 'standard_normal', 'standard_t', 'triangular',
- 'uniform', 'vonmises', 'wald', 'weibull', 'zipf']
-
-np.import_array()
-
-
-cdef bint _check_bit_generator(object bitgen):
- """Check if an object satisfies the BitGenerator interface.
- """
- if not hasattr(bitgen, "capsule"):
- return False
- cdef const char *name = "BitGenerator"
- return PyCapsule_IsValid(bitgen.capsule, name)
-
-
-cdef class Generator:
- """
- Generator(bit_generator)
-
- Container for the BitGenerators.
-
- ``Generator`` exposes a number of methods for generating random
- numbers drawn from a variety of probability distributions. In addition to
- the distribution-specific arguments, each method takes a keyword argument
- `size` that defaults to ``None``. If `size` is ``None``, then a single
- value is generated and returned. If `size` is an integer, then a 1-D
- array filled with generated values is returned. If `size` is a tuple,
- then an array with that shape is filled and returned.
-
- The function :func:`numpy.random.default_rng` will instantiate
- a `Generator` with numpy's default `BitGenerator`.
-
- **No Compatibility Guarantee**
-
- ``Generator`` does not provide a version compatibility guarantee. In
- particular, as better algorithms evolve the bit stream may change.
-
- Parameters
- ----------
- bit_generator : BitGenerator
- BitGenerator to use as the core generator.
-
- Notes
- -----
- The Python stdlib module `random` contains pseudo-random number generator
- with a number of methods that are similar to the ones available in
- ``Generator``. It uses Mersenne Twister, and this bit generator can
- be accessed using ``MT19937``. ``Generator``, besides being
- NumPy-aware, has the advantage that it provides a much larger number
- of probability distributions to choose from.
-
- Examples
- --------
- >>> from numpy.random import Generator, PCG64
- >>> rg = Generator(PCG64())
- >>> rg.standard_normal()
- -0.203 # random
-
- See Also
- --------
- default_rng : Recommended constructor for `Generator`.
- """
- cdef public object _bit_generator
- cdef bitgen_t _bitgen
- cdef binomial_t _binomial
- cdef object lock
- _poisson_lam_max = POISSON_LAM_MAX
-
- def __init__(self, bit_generator):
- self._bit_generator = bit_generator
-
- capsule = bit_generator.capsule
- cdef const char *name = "BitGenerator"
- if not PyCapsule_IsValid(capsule, name):
- raise ValueError("Invalid bit generator'. The bit generator must "
- "be instantiated.")
- self._bitgen = (<bitgen_t *> PyCapsule_GetPointer(capsule, name))[0]
- self.lock = bit_generator.lock
-
- def __repr__(self):
- return self.__str__() + ' at 0x{:X}'.format(id(self))
-
- def __str__(self):
- _str = self.__class__.__name__
- _str += '(' + self.bit_generator.__class__.__name__ + ')'
- return _str
-
- # Pickling support:
- def __getstate__(self):
- return self.bit_generator.state
-
- def __setstate__(self, state):
- self.bit_generator.state = state
-
- def __reduce__(self):
- from ._pickle import __generator_ctor
- return __generator_ctor, (self.bit_generator.state['bit_generator'],), self.bit_generator.state
-
- @property
- def bit_generator(self):
- """
- Gets the bit generator instance used by the generator
-
- Returns
- -------
- bit_generator : BitGenerator
- The bit generator instance used by the generator
- """
- return self._bit_generator
-
- def random(self, size=None, dtype=np.float64, out=None):
- """
- random(size=None, dtype='d', out=None)
-
- Return random floats in the half-open interval [0.0, 1.0).
-
- Results are from the "continuous uniform" distribution over the
- stated interval. To sample :math:`Unif[a, b), b > a` multiply
- the output of `random` by `(b-a)` and add `a`::
-
- (b - a) * random() + a
-
- Parameters
- ----------
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- dtype : {str, dtype}, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
- out : ndarray, optional
- Alternative output array in which to place the result. If size is not None,
- it must have the same shape as the provided size and must match the type of
- the output values.
-
- Returns
- -------
- out : float or ndarray of floats
- Array of random floats of shape `size` (unless ``size=None``, in which
- case a single float is returned).
-
- Examples
- --------
- >>> rng = np.random.default_rng()
- >>> rng.random()
- 0.47108547995356098 # random
- >>> type(rng.random())
- <class 'float'>
- >>> rng.random((5,))
- array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428]) # random
-
- Three-by-two array of random numbers from [-5, 0):
-
- >>> 5 * rng.random((3, 2)) - 5
- array([[-3.99149989, -0.52338984], # random
- [-2.99091858, -0.79479508],
- [-1.23204345, -1.75224494]])
-
- """
- cdef double temp
- key = np.dtype(dtype).name
- if key == 'float64':
- return double_fill(&random_double_fill, &self._bitgen, size, self.lock, out)
- elif key == 'float32':
- return float_fill(&random_float, &self._bitgen, size, self.lock, out)
- else:
- raise TypeError('Unsupported dtype "%s" for random' % key)
-
- def beta(self, a, b, size=None):
- """
- beta(a, b, size=None)
-
- Draw samples from a Beta distribution.
-
- The Beta distribution is a special case of the Dirichlet distribution,
- and is related to the Gamma distribution. It has the probability
- distribution function
-
- .. math:: f(x; a,b) = \\frac{1}{B(\\alpha, \\beta)} x^{\\alpha - 1}
- (1 - x)^{\\beta - 1},
-
- where the normalization, B, is the beta function,
-
- .. math:: B(\\alpha, \\beta) = \\int_0^1 t^{\\alpha - 1}
- (1 - t)^{\\beta - 1} dt.
-
- It is often seen in Bayesian inference and order statistics.
-
- Parameters
- ----------
- a : float or array_like of floats
- Alpha, positive (>0).
- b : float or array_like of floats
- Beta, positive (>0).
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``a`` and ``b`` are both scalars.
- Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized beta distribution.
-
- """
- return cont(&random_beta, &self._bitgen, size, self.lock, 2,
- a, 'a', CONS_POSITIVE,
- b, 'b', CONS_POSITIVE,
- 0.0, '', CONS_NONE, None)
-
- def exponential(self, scale=1.0, size=None):
- """
- exponential(scale=1.0, size=None)
-
- Draw samples from an exponential distribution.
-
- Its probability density function is
-
- .. math:: f(x; \\frac{1}{\\beta}) = \\frac{1}{\\beta} \\exp(-\\frac{x}{\\beta}),
-
- for ``x > 0`` and 0 elsewhere. :math:`\\beta` is the scale parameter,
- which is the inverse of the rate parameter :math:`\\lambda = 1/\\beta`.
- The rate parameter is an alternative, widely used parameterization
- of the exponential distribution [3]_.
-
- The exponential distribution is a continuous analogue of the
- geometric distribution. It describes many common situations, such as
- the size of raindrops measured over many rainstorms [1]_, or the time
- between page requests to Wikipedia [2]_.
-
- Parameters
- ----------
- scale : float or array_like of floats
- The scale parameter, :math:`\\beta = 1/\\lambda`. Must be
- non-negative.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``scale`` is a scalar. Otherwise,
- ``np.array(scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized exponential distribution.
-
- References
- ----------
- .. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
- Random Signal Principles", 4th ed, 2001, p. 57.
- .. [2] Wikipedia, "Poisson process",
- https://en.wikipedia.org/wiki/Poisson_process
- .. [3] Wikipedia, "Exponential distribution",
- https://en.wikipedia.org/wiki/Exponential_distribution
-
- """
- return cont(&random_exponential, &self._bitgen, size, self.lock, 1,
- scale, 'scale', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE,
- None)
-
- def standard_exponential(self, size=None, dtype=np.float64, method=u'zig', out=None):
- """
- standard_exponential(size=None, dtype='d', method='zig', out=None)
-
- Draw samples from the standard exponential distribution.
-
- `standard_exponential` is identical to the exponential distribution
- with a scale parameter of 1.
-
- Parameters
- ----------
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- dtype : dtype, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
- method : str, optional
- Either 'inv' or 'zig'. 'inv' uses the default inverse CDF method.
- 'zig' uses the much faster Ziggurat method of Marsaglia and Tsang.
- out : ndarray, optional
- Alternative output array in which to place the result. If size is not None,
- it must have the same shape as the provided size and must match the type of
- the output values.
-
- Returns
- -------
- out : float or ndarray
- Drawn samples.
-
- Examples
- --------
- Output a 3x8000 array:
-
- >>> n = np.random.default_rng().standard_exponential((3, 8000))
-
- """
- key = np.dtype(dtype).name
- if key == 'float64':
- if method == u'zig':
- return double_fill(&random_standard_exponential_zig_fill, &self._bitgen, size, self.lock, out)
- else:
- return double_fill(&random_standard_exponential_fill, &self._bitgen, size, self.lock, out)
- elif key == 'float32':
- if method == u'zig':
- return float_fill(&random_standard_exponential_zig_f, &self._bitgen, size, self.lock, out)
- else:
- return float_fill(&random_standard_exponential_f, &self._bitgen, size, self.lock, out)
- else:
- raise TypeError('Unsupported dtype "%s" for standard_exponential'
- % key)
-
- def integers(self, low, high=None, size=None, dtype=np.int64, endpoint=False):
- """
- integers(low, high=None, size=None, dtype='int64', endpoint=False)
-
- Return random integers from `low` (inclusive) to `high` (exclusive), or
- if endpoint=True, `low` (inclusive) to `high` (inclusive). Replaces
- `RandomState.randint` (with endpoint=False) and
- `RandomState.random_integers` (with endpoint=True)
-
- Return random integers from the "discrete uniform" distribution of
- the specified dtype. If `high` is None (the default), then results are
- from 0 to `low`.
-
- Parameters
- ----------
- low : int or array-like of ints
- Lowest (signed) integers to be drawn from the distribution (unless
- ``high=None``, in which case this parameter is 0 and this value is
- used for `high`).
- high : int or array-like of ints, optional
- If provided, one above the largest (signed) integer to be drawn
- from the distribution (see above for behavior if ``high=None``).
- If array-like, must contain integer values
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- dtype : {str, dtype}, optional
- Desired dtype of the result. All dtypes are determined by their
- name, i.e., 'int64', 'int', etc, so byteorder is not available
- and a specific precision may have different C types depending
- on the platform. The default value is 'np.int'.
- endpoint : bool, optional
- If true, sample from the interval [low, high] instead of the
- default [low, high)
- Defaults to False
-
- Returns
- -------
- out : int or ndarray of ints
- `size`-shaped array of random integers from the appropriate
- distribution, or a single such random int if `size` not provided.
-
- Notes
- -----
- When using broadcasting with uint64 dtypes, the maximum value (2**64)
- cannot be represented as a standard integer type. The high array (or
- low if high is None) must have object dtype, e.g., array([2**64]).
-
- Examples
- --------
- >>> rng = np.random.default_rng()
- >>> rng.integers(2, size=10)
- array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0]) # random
- >>> rng.integers(1, size=10)
- array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
-
- Generate a 2 x 4 array of ints between 0 and 4, inclusive:
-
- >>> rng.integers(5, size=(2, 4))
- array([[4, 0, 2, 1],
- [3, 2, 2, 0]]) # random
-
- Generate a 1 x 3 array with 3 different upper bounds
-
- >>> rng.integers(1, [3, 5, 10])
- array([2, 2, 9]) # random
-
- Generate a 1 by 3 array with 3 different lower bounds
-
- >>> rng.integers([1, 5, 7], 10)
- array([9, 8, 7]) # random
-
- Generate a 2 by 4 array using broadcasting with dtype of uint8
-
- >>> rng.integers([1, 3, 5, 7], [[10], [20]], dtype=np.uint8)
- array([[ 8, 6, 9, 7],
- [ 1, 16, 9, 12]], dtype=uint8) # random
-
- References
- ----------
- .. [1] Daniel Lemire., "Fast Random Integer Generation in an Interval",
- ACM Transactions on Modeling and Computer Simulation 29 (1), 2019,
- http://arxiv.org/abs/1805.10941.
-
- """
- if high is None:
- high = low
- low = 0
-
- dt = np.dtype(dtype)
- key = dt.name
- if key not in _integers_types:
- raise TypeError('Unsupported dtype "%s" for integers' % key)
- if not dt.isnative:
- raise ValueError('Providing a dtype with a non-native byteorder '
- 'is not supported. If you require '
- 'platform-independent byteorder, call byteswap '
- 'when required.')
-
- # Implementation detail: the old API used a masked method to generate
- # bounded uniform integers. Lemire's method is preferable since it is
- # faster. randomgen allows a choice, we will always use the faster one.
- cdef bint _masked = False
-
- if key == 'int32':
- ret = _rand_int32(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'int64':
- ret = _rand_int64(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'int16':
- ret = _rand_int16(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'int8':
- ret = _rand_int8(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint64':
- ret = _rand_uint64(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint32':
- ret = _rand_uint32(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint16':
- ret = _rand_uint16(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint8':
- ret = _rand_uint8(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'bool':
- ret = _rand_bool(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
-
- if size is None and dtype in (np.bool, np.int, np.long):
- if np.array(ret).shape == ():
- return dtype(ret)
- return ret
-
- def bytes(self, np.npy_intp length):
- """
- bytes(length)
-
- Return random bytes.
-
- Parameters
- ----------
- length : int
- Number of random bytes.
-
- Returns
- -------
- out : str
- String of length `length`.
-
- Examples
- --------
- >>> np.random.default_rng().bytes(10)
- ' eh\\x85\\x022SZ\\xbf\\xa4' #random
-
- """
- cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
- # Interpret the uint32s as little-endian to convert them to bytes
- # consistently.
- return self.integers(0, 4294967296, size=n_uint32,
- dtype=np.uint32).astype('<u4').tobytes()[:length]
-
- @cython.wraparound(True)
- def choice(self, a, size=None, replace=True, p=None, axis=0, bint shuffle=True):
- """
- choice(a, size=None, replace=True, p=None, axis=0):
-
- Generates a random sample from a given 1-D array
-
- Parameters
- ----------
- a : 1-D array-like or int
- If an ndarray, a random sample is generated from its elements.
- If an int, the random sample is generated as if a were np.arange(a)
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn from the 1-d `a`. If `a` has more
- than one dimension, the `size` shape will be inserted into the
- `axis` dimension, so the output ``ndim`` will be ``a.ndim - 1 +
- len(size)``. Default is None, in which case a single value is
- returned.
- replace : boolean, optional
- Whether the sample is with or without replacement
- p : 1-D array-like, optional
- The probabilities associated with each entry in a.
- If not given the sample assumes a uniform distribution over all
- entries in a.
- axis : int, optional
- The axis along which the selection is performed. The default, 0,
- selects by row.
- shuffle : boolean, optional
- Whether the sample is shuffled when sampling without replacement.
- Default is True, False provides a speedup.
-
- Returns
- -------
- samples : single item or ndarray
- The generated random samples
-
- Raises
- ------
- ValueError
- If a is an int and less than zero, if p is not 1-dimensional, if
- a is array-like with a size 0, if p is not a vector of
- probabilities, if a and p have different lengths, or if
- replace=False and the sample size is greater than the population
- size.
-
- See Also
- --------
- integers, shuffle, permutation
-
- Examples
- --------
- Generate a uniform random sample from np.arange(5) of size 3:
-
- >>> rng = np.random.default_rng()
- >>> rng.choice(5, 3)
- array([0, 3, 4]) # random
- >>> #This is equivalent to rng.integers(0,5,3)
-
- Generate a non-uniform random sample from np.arange(5) of size 3:
-
- >>> rng.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])
- array([3, 3, 0]) # random
-
- Generate a uniform random sample from np.arange(5) of size 3 without
- replacement:
-
- >>> rng.choice(5, 3, replace=False)
- array([3,1,0]) # random
- >>> #This is equivalent to rng.permutation(np.arange(5))[:3]
-
- Generate a non-uniform random sample from np.arange(5) of size
- 3 without replacement:
-
- >>> rng.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
- array([2, 3, 0]) # random
-
- Any of the above can be repeated with an arbitrary array-like
- instead of just integers. For instance:
-
- >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
- >>> rng.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
- array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'], # random
- dtype='<U11')
-
- """
-
- cdef int64_t val, t, loc, size_i, pop_size_i
- cdef int64_t *idx_data
- cdef np.npy_intp j
- cdef uint64_t set_size, mask
- cdef uint64_t[::1] hash_set
- # Format and Verify input
- a = np.array(a, copy=False)
- if a.ndim == 0:
- try:
- # __index__ must return an integer by python rules.
- pop_size = operator.index(a.item())
- except TypeError:
- raise ValueError("a must be 1-dimensional or an integer")
- if pop_size <= 0 and np.prod(size) != 0:
- raise ValueError("a must be greater than 0 unless no samples are taken")
- else:
- pop_size = a.shape[axis]
- if pop_size == 0 and np.prod(size) != 0:
- raise ValueError("'a' cannot be empty unless no samples are taken")
-
- if p is not None:
- d = len(p)
-
- atol = np.sqrt(np.finfo(np.float64).eps)
- if isinstance(p, np.ndarray):
- if np.issubdtype(p.dtype, np.floating):
- atol = max(atol, np.sqrt(np.finfo(p.dtype).eps))
-
- p = <np.ndarray>np.PyArray_FROM_OTF(
- p, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
- pix = <double*>np.PyArray_DATA(p)
-
- if p.ndim != 1:
- raise ValueError("'p' must be 1-dimensional")
- if p.size != pop_size:
- raise ValueError("'a' and 'p' must have same size")
- p_sum = kahan_sum(pix, d)
- if np.isnan(p_sum):
- raise ValueError("probabilities contain NaN")
- if np.logical_or.reduce(p < 0):
- raise ValueError("probabilities are not non-negative")
- if abs(p_sum - 1.) > atol:
- raise ValueError("probabilities do not sum to 1")
-
- shape = size
- if shape is not None:
- size = np.prod(shape, dtype=np.intp)
- else:
- size = 1
-
- # Actual sampling
- if replace:
- if p is not None:
- cdf = p.cumsum()
- cdf /= cdf[-1]
- uniform_samples = self.random(shape)
- idx = cdf.searchsorted(uniform_samples, side='right')
- idx = np.array(idx, copy=False, dtype=np.int64) # searchsorted returns a scalar
- else:
- idx = self.integers(0, pop_size, size=shape, dtype=np.int64)
- else:
- if size > pop_size:
- raise ValueError("Cannot take a larger sample than "
- "population when 'replace=False'")
- elif size < 0:
- raise ValueError("negative dimensions are not allowed")
-
- if p is not None:
- if np.count_nonzero(p > 0) < size:
- raise ValueError("Fewer non-zero entries in p than size")
- n_uniq = 0
- p = p.copy()
- found = np.zeros(shape, dtype=np.int64)
- flat_found = found.ravel()
- while n_uniq < size:
- x = self.random((size - n_uniq,))
- if n_uniq > 0:
- p[flat_found[0:n_uniq]] = 0
- cdf = np.cumsum(p)
- cdf /= cdf[-1]
- new = cdf.searchsorted(x, side='right')
- _, unique_indices = np.unique(new, return_index=True)
- unique_indices.sort()
- new = new.take(unique_indices)
- flat_found[n_uniq:n_uniq + new.size] = new
- n_uniq += new.size
- idx = found
- else:
- size_i = size
- pop_size_i = pop_size
- # This is a heuristic tuning. should be improvable
- if shuffle:
- cutoff = 50
- else:
- cutoff = 20
- if pop_size_i > 10000 and (size_i > (pop_size_i // cutoff)):
- # Tail shuffle size elements
- idx = np.PyArray_Arange(0, pop_size_i, 1, np.NPY_INT64)
- idx_data = <int64_t*>(<np.ndarray>idx).data
- with self.lock, nogil:
- self._shuffle_int(pop_size_i, max(pop_size_i - size_i, 1),
- idx_data)
- # Copy to allow potentially large array backing idx to be gc
- idx = idx[(pop_size - size):].copy()
- else:
- # Floyd's algorithm
- idx = np.empty(size, dtype=np.int64)
- idx_data = <int64_t*>np.PyArray_DATA(<np.ndarray>idx)
- # smallest power of 2 larger than 1.2 * size
- set_size = <uint64_t>(1.2 * size_i)
- mask = _gen_mask(set_size)
- set_size = 1 + mask
- hash_set = np.full(set_size, <uint64_t>-1, np.uint64)
- with self.lock, cython.wraparound(False), nogil:
- for j in range(pop_size_i - size_i, pop_size_i):
- val = random_bounded_uint64(&self._bitgen, 0, j, 0, 0)
- loc = val & mask
- while hash_set[loc] != <uint64_t>-1 and hash_set[loc] != <uint64_t>val:
- loc = (loc + 1) & mask
- if hash_set[loc] == <uint64_t>-1: # then val not in hash_set
- hash_set[loc] = val
- idx_data[j - pop_size_i + size_i] = val
- else: # we need to insert j instead
- loc = j & mask
- while hash_set[loc] != <uint64_t>-1:
- loc = (loc + 1) & mask
- hash_set[loc] = j
- idx_data[j - pop_size_i + size_i] = j
- if shuffle:
- self._shuffle_int(size_i, 1, idx_data)
- if shape is not None:
- idx.shape = shape
-
- if shape is None and isinstance(idx, np.ndarray):
- # In most cases a scalar will have been made an array
- idx = idx.item(0)
-
- # Use samples as indices for a if a is array-like
- if a.ndim == 0:
- return idx
-
- if shape is not None and idx.ndim == 0:
- # If size == () then the user requested a 0-d array as opposed to
- # a scalar object when size is None. However a[idx] is always a
- # scalar and not an array. So this makes sure the result is an
- # array, taking into account that np.array(item) may not work
- # for object arrays.
- res = np.empty((), dtype=a.dtype)
- res[()] = a[idx]
- return res
-
- # asarray downcasts on 32-bit platforms, always safe
- # no-op on 64-bit platforms
- return a.take(np.asarray(idx, dtype=np.intp), axis=axis)
-
- def uniform(self, low=0.0, high=1.0, size=None):
- """
- uniform(low=0.0, high=1.0, size=None)
-
- Draw samples from a uniform distribution.
-
- Samples are uniformly distributed over the half-open interval
- ``[low, high)`` (includes low, but excludes high). In other words,
- any value within the given interval is equally likely to be drawn
- by `uniform`.
-
- Parameters
- ----------
- low : float or array_like of floats, optional
- Lower boundary of the output interval. All values generated will be
- greater than or equal to low. The default value is 0.
- high : float or array_like of floats
- Upper boundary of the output interval. All values generated will be
- less than high. The default value is 1.0.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``low`` and ``high`` are both scalars.
- Otherwise, ``np.broadcast(low, high).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized uniform distribution.
-
- See Also
- --------
- integers : Discrete uniform distribution, yielding integers.
- random : Floats uniformly distributed over ``[0, 1)``.
- random : Alias for `random`.
-
- Notes
- -----
- The probability density function of the uniform distribution is
-
- .. math:: p(x) = \\frac{1}{b - a}
-
- anywhere within the interval ``[a, b)``, and zero elsewhere.
-
- When ``high`` == ``low``, values of ``low`` will be returned.
- If ``high`` < ``low``, the results are officially undefined
- and may eventually raise an error, i.e. do not rely on this
- function to behave when passed arguments satisfying that
- inequality condition.
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> s = np.random.default_rng().uniform(-1,0,1000)
-
- All values are within the given interval:
-
- >>> np.all(s >= -1)
- True
- >>> np.all(s < 0)
- True
-
- Display the histogram of the samples, along with the
- probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 15, density=True)
- >>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
- >>> plt.show()
-
- """
- cdef bint is_scalar = True
- cdef np.ndarray alow, ahigh, arange
- cdef double _low, _high, range
- cdef object temp
-
- alow = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ALIGNED)
- ahigh = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_DOUBLE, np.NPY_ALIGNED)
-
- if np.PyArray_NDIM(alow) == np.PyArray_NDIM(ahigh) == 0:
- _low = PyFloat_AsDouble(low)
- _high = PyFloat_AsDouble(high)
- range = _high - _low
- if not np.isfinite(range):
- raise OverflowError('Range exceeds valid bounds')
-
- return cont(&random_uniform, &self._bitgen, size, self.lock, 2,
- _low, '', CONS_NONE,
- range, '', CONS_NONE,
- 0.0, '', CONS_NONE,
- None)
-
- temp = np.subtract(ahigh, alow)
- # needed to get around Pyrex's automatic reference-counting
- # rules because EnsureArray steals a reference
- Py_INCREF(temp)
-
- arange = <np.ndarray>np.PyArray_EnsureArray(temp)
- if not np.all(np.isfinite(arange)):
- raise OverflowError('Range exceeds valid bounds')
- return cont(&random_uniform, &self._bitgen, size, self.lock, 2,
- alow, '', CONS_NONE,
- arange, '', CONS_NONE,
- 0.0, '', CONS_NONE,
- None)
-
- # Complicated, continuous distributions:
- def standard_normal(self, size=None, dtype=np.float64, out=None):
- """
- standard_normal(size=None, dtype='d', out=None)
-
- Draw samples from a standard Normal distribution (mean=0, stdev=1).
-
- Parameters
- ----------
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- dtype : {str, dtype}, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
- out : ndarray, optional
- Alternative output array in which to place the result. If size is not None,
- it must have the same shape as the provided size and must match the type of
- the output values.
-
- Returns
- -------
- out : float or ndarray
- A floating-point array of shape ``size`` of drawn samples, or a
- single sample if ``size`` was not specified.
-
- Notes
- -----
- For random samples from :math:`N(\\mu, \\sigma^2)`, use one of::
-
- mu + sigma * gen.standard_normal(size=...)
- gen.normal(mu, sigma, size=...)
-
- See Also
- --------
- normal :
- Equivalent function with additional ``loc`` and ``scale`` arguments
- for setting the mean and standard deviation.
-
- Examples
- --------
- >>> rng = np.random.default_rng()
- >>> rng.standard_normal()
- 2.1923875335537315 #random
-
- >>> s = rng.standard_normal(8000)
- >>> s
- array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, # random
- -0.38672696, -0.4685006 ]) # random
- >>> s.shape
- (8000,)
- >>> s = rng.standard_normal(size=(3, 4, 2))
- >>> s.shape
- (3, 4, 2)
-
- Two-by-four array of samples from :math:`N(3, 6.25)`:
-
- >>> 3 + 2.5 * rng.standard_normal(size=(2, 4))
- array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
- [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
-
- """
- key = np.dtype(dtype).name
- if key == 'float64':
- return double_fill(&random_gauss_zig_fill, &self._bitgen, size, self.lock, out)
- elif key == 'float32':
- return float_fill(&random_gauss_zig_f, &self._bitgen, size, self.lock, out)
-
- else:
- raise TypeError('Unsupported dtype "%s" for standard_normal' % key)
-
- def normal(self, loc=0.0, scale=1.0, size=None):
- """
- normal(loc=0.0, scale=1.0, size=None)
-
- Draw random samples from a normal (Gaussian) distribution.
-
- The probability density function of the normal distribution, first
- derived by De Moivre and 200 years later by both Gauss and Laplace
- independently [2]_, is often called the bell curve because of
- its characteristic shape (see the example below).
-
- The normal distributions occurs often in nature. For example, it
- describes the commonly occurring distribution of samples influenced
- by a large number of tiny, random disturbances, each with its own
- unique distribution [2]_.
-
- Parameters
- ----------
- loc : float or array_like of floats
- Mean ("centre") of the distribution.
- scale : float or array_like of floats
- Standard deviation (spread or "width") of the distribution. Must be
- non-negative.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``loc`` and ``scale`` are both scalars.
- Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized normal distribution.
-
- See Also
- --------
- scipy.stats.norm : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the Gaussian distribution is
-
- .. math:: p(x) = \\frac{1}{\\sqrt{ 2 \\pi \\sigma^2 }}
- e^{ - \\frac{ (x - \\mu)^2 } {2 \\sigma^2} },
-
- where :math:`\\mu` is the mean and :math:`\\sigma` the standard
- deviation. The square of the standard deviation, :math:`\\sigma^2`,
- is called the variance.
-
- The function has its peak at the mean, and its "spread" increases with
- the standard deviation (the function reaches 0.607 times its maximum at
- :math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that
- :meth:`normal` is more likely to return samples lying close to the
- mean, rather than those far away.
-
- References
- ----------
- .. [1] Wikipedia, "Normal distribution",
- https://en.wikipedia.org/wiki/Normal_distribution
- .. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability,
- Random Variables and Random Signal Principles", 4th ed., 2001,
- pp. 51, 51, 125.
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> mu, sigma = 0, 0.1 # mean and standard deviation
- >>> s = np.random.default_rng().normal(mu, sigma, 1000)
-
- Verify the mean and the variance:
-
- >>> abs(mu - np.mean(s))
- 0.0 # may vary
-
- >>> abs(sigma - np.std(s, ddof=1))
- 0.1 # may vary
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 30, density=True)
- >>> plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
- ... np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
- ... linewidth=2, color='r')
- >>> plt.show()
-
- Two-by-four array of samples from N(3, 6.25):
-
- >>> np.random.default_rng().normal(3, 2.5, size=(2, 4))
- array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
- [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
-
- """
- return cont(&random_normal_zig, &self._bitgen, size, self.lock, 2,
- loc, '', CONS_NONE,
- scale, 'scale', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE,
- None)
-
- def standard_gamma(self, shape, size=None, dtype=np.float64, out=None):
- """
- standard_gamma(shape, size=None, dtype='d', out=None)
-
- Draw samples from a standard Gamma distribution.
-
- Samples are drawn from a Gamma distribution with specified parameters,
- shape (sometimes designated "k") and scale=1.
-
- Parameters
- ----------
- shape : float or array_like of floats
- Parameter, must be non-negative.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``shape`` is a scalar. Otherwise,
- ``np.array(shape).size`` samples are drawn.
- dtype : {str, dtype}, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
- out : ndarray, optional
- Alternative output array in which to place the result. If size is
- not None, it must have the same shape as the provided size and
- must match the type of the output values.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized standard gamma distribution.
-
- See Also
- --------
- scipy.stats.gamma : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the Gamma distribution is
-
- .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},
-
- where :math:`k` is the shape and :math:`\\theta` the scale,
- and :math:`\\Gamma` is the Gamma function.
-
- The Gamma distribution is often used to model the times to failure of
- electronic components, and arises naturally in processes for which the
- waiting times between Poisson distributed events are relevant.
-
- References
- ----------
- .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
- Wolfram Web Resource.
- http://mathworld.wolfram.com/GammaDistribution.html
- .. [2] Wikipedia, "Gamma distribution",
- https://en.wikipedia.org/wiki/Gamma_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> shape, scale = 2., 1. # mean and width
- >>> s = np.random.default_rng().standard_gamma(shape, 1000000)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> import scipy.special as sps # doctest: +SKIP
- >>> count, bins, ignored = plt.hist(s, 50, density=True)
- >>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ # doctest: +SKIP
- ... (sps.gamma(shape) * scale**shape))
- >>> plt.plot(bins, y, linewidth=2, color='r') # doctest: +SKIP
- >>> plt.show()
-
- """
- cdef void *func
- key = np.dtype(dtype).name
- if key == 'float64':
- return cont(&random_standard_gamma_zig, &self._bitgen, size, self.lock, 1,
- shape, 'shape', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE,
- out)
- if key == 'float32':
- return cont_f(&random_standard_gamma_zig_f, &self._bitgen, size, self.lock,
- shape, 'shape', CONS_NON_NEGATIVE,
- out)
- else:
- raise TypeError('Unsupported dtype "%s" for standard_gamma' % key)
-
- def gamma(self, shape, scale=1.0, size=None):
- """
- gamma(shape, scale=1.0, size=None)
-
- Draw samples from a Gamma distribution.
-
- Samples are drawn from a Gamma distribution with specified parameters,
- `shape` (sometimes designated "k") and `scale` (sometimes designated
- "theta"), where both parameters are > 0.
-
- Parameters
- ----------
- shape : float or array_like of floats
- The shape of the gamma distribution. Must be non-negative.
- scale : float or array_like of floats, optional
- The scale of the gamma distribution. Must be non-negative.
- Default is equal to 1.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``shape`` and ``scale`` are both scalars.
- Otherwise, ``np.broadcast(shape, scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized gamma distribution.
-
- See Also
- --------
- scipy.stats.gamma : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the Gamma distribution is
-
- .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},
-
- where :math:`k` is the shape and :math:`\\theta` the scale,
- and :math:`\\Gamma` is the Gamma function.
-
- The Gamma distribution is often used to model the times to failure of
- electronic components, and arises naturally in processes for which the
- waiting times between Poisson distributed events are relevant.
-
- References
- ----------
- .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
- Wolfram Web Resource.
- http://mathworld.wolfram.com/GammaDistribution.html
- .. [2] Wikipedia, "Gamma distribution",
- https://en.wikipedia.org/wiki/Gamma_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> shape, scale = 2., 2. # mean=4, std=2*sqrt(2)
- >>> s = np.random.default_rng().gamma(shape, scale, 1000)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> import scipy.special as sps # doctest: +SKIP
- >>> count, bins, ignored = plt.hist(s, 50, density=True)
- >>> y = bins**(shape-1)*(np.exp(-bins/scale) / # doctest: +SKIP
- ... (sps.gamma(shape)*scale**shape))
- >>> plt.plot(bins, y, linewidth=2, color='r') # doctest: +SKIP
- >>> plt.show()
-
- """
- return cont(&random_gamma, &self._bitgen, size, self.lock, 2,
- shape, 'shape', CONS_NON_NEGATIVE,
- scale, 'scale', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE, None)
-
- def f(self, dfnum, dfden, size=None):
- """
- f(dfnum, dfden, size=None)
-
- Draw samples from an F distribution.
-
- Samples are drawn from an F distribution with specified parameters,
- `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
- freedom in denominator), where both parameters must be greater than
- zero.
-
- The random variate of the F distribution (also known as the
- Fisher distribution) is a continuous probability distribution
- that arises in ANOVA tests, and is the ratio of two chi-square
- variates.
-
- Parameters
- ----------
- dfnum : float or array_like of floats
- Degrees of freedom in numerator, must be > 0.
- dfden : float or array_like of float
- Degrees of freedom in denominator, must be > 0.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``dfnum`` and ``dfden`` are both scalars.
- Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Fisher distribution.
-
- See Also
- --------
- scipy.stats.f : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The F statistic is used to compare in-group variances to between-group
- variances. Calculating the distribution depends on the sampling, and
- so it is a function of the respective degrees of freedom in the
- problem. The variable `dfnum` is the number of samples minus one, the
- between-groups degrees of freedom, while `dfden` is the within-groups
- degrees of freedom, the sum of the number of samples in each group
- minus the number of groups.
-
- References
- ----------
- .. [1] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
- Fifth Edition, 2002.
- .. [2] Wikipedia, "F-distribution",
- https://en.wikipedia.org/wiki/F-distribution
-
- Examples
- --------
- An example from Glantz[1], pp 47-40:
-
- Two groups, children of diabetics (25 people) and children from people
- without diabetes (25 controls). Fasting blood glucose was measured,
- case group had a mean value of 86.1, controls had a mean value of
- 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these
- data consistent with the null hypothesis that the parents diabetic
- status does not affect their children's blood glucose levels?
- Calculating the F statistic from the data gives a value of 36.01.
-
- Draw samples from the distribution:
-
- >>> dfnum = 1. # between group degrees of freedom
- >>> dfden = 48. # within groups degrees of freedom
- >>> s = np.random.default_rng().f(dfnum, dfden, 1000)
-
- The lower bound for the top 1% of the samples is :
-
- >>> np.sort(s)[-10]
- 7.61988120985 # random
-
- So there is about a 1% chance that the F statistic will exceed 7.62,
- the measured value is 36, so the null hypothesis is rejected at the 1%
- level.
-
- """
- return cont(&random_f, &self._bitgen, size, self.lock, 2,
- dfnum, 'dfnum', CONS_POSITIVE,
- dfden, 'dfden', CONS_POSITIVE,
- 0.0, '', CONS_NONE, None)
-
- def noncentral_f(self, dfnum, dfden, nonc, size=None):
- """
- noncentral_f(dfnum, dfden, nonc, size=None)
-
- Draw samples from the noncentral F distribution.
-
- Samples are drawn from an F distribution with specified parameters,
- `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
- freedom in denominator), where both parameters > 1.
- `nonc` is the non-centrality parameter.
-
- Parameters
- ----------
- dfnum : float or array_like of floats
- Numerator degrees of freedom, must be > 0.
-
- .. versionchanged:: 1.14.0
- Earlier NumPy versions required dfnum > 1.
- dfden : float or array_like of floats
- Denominator degrees of freedom, must be > 0.
- nonc : float or array_like of floats
- Non-centrality parameter, the sum of the squares of the numerator
- means, must be >= 0.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``dfnum``, ``dfden``, and ``nonc``
- are all scalars. Otherwise, ``np.broadcast(dfnum, dfden, nonc).size``
- samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized noncentral Fisher distribution.
-
- Notes
- -----
- When calculating the power of an experiment (power = probability of
- rejecting the null hypothesis when a specific alternative is true) the
- non-central F statistic becomes important. When the null hypothesis is
- true, the F statistic follows a central F distribution. When the null
- hypothesis is not true, then it follows a non-central F statistic.
-
- References
- ----------
- .. [1] Weisstein, Eric W. "Noncentral F-Distribution."
- From MathWorld--A Wolfram Web Resource.
- http://mathworld.wolfram.com/NoncentralF-Distribution.html
- .. [2] Wikipedia, "Noncentral F-distribution",
- https://en.wikipedia.org/wiki/Noncentral_F-distribution
-
- Examples
- --------
- In a study, testing for a specific alternative to the null hypothesis
- requires use of the Noncentral F distribution. We need to calculate the
- area in the tail of the distribution that exceeds the value of the F
- distribution for the null hypothesis. We'll plot the two probability
- distributions for comparison.
-
- >>> rng = np.random.default_rng()
- >>> dfnum = 3 # between group deg of freedom
- >>> dfden = 20 # within groups degrees of freedom
- >>> nonc = 3.0
- >>> nc_vals = rng.noncentral_f(dfnum, dfden, nonc, 1000000)
- >>> NF = np.histogram(nc_vals, bins=50, density=True)
- >>> c_vals = rng.f(dfnum, dfden, 1000000)
- >>> F = np.histogram(c_vals, bins=50, density=True)
- >>> import matplotlib.pyplot as plt
- >>> plt.plot(F[1][1:], F[0])
- >>> plt.plot(NF[1][1:], NF[0])
- >>> plt.show()
-
- """
- return cont(&random_noncentral_f, &self._bitgen, size, self.lock, 3,
- dfnum, 'dfnum', CONS_POSITIVE,
- dfden, 'dfden', CONS_POSITIVE,
- nonc, 'nonc', CONS_NON_NEGATIVE, None)
-
- def chisquare(self, df, size=None):
- """
- chisquare(df, size=None)
-
- Draw samples from a chi-square distribution.
-
- When `df` independent random variables, each with standard normal
- distributions (mean 0, variance 1), are squared and summed, the
- resulting distribution is chi-square (see Notes). This distribution
- is often used in hypothesis testing.
-
- Parameters
- ----------
- df : float or array_like of floats
- Number of degrees of freedom, must be > 0.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``df`` is a scalar. Otherwise,
- ``np.array(df).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized chi-square distribution.
-
- Raises
- ------
- ValueError
- When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)
- is given.
-
- Notes
- -----
- The variable obtained by summing the squares of `df` independent,
- standard normally distributed random variables:
-
- .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i
-
- is chi-square distributed, denoted
-
- .. math:: Q \\sim \\chi^2_k.
-
- The probability density function of the chi-squared distribution is
-
- .. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}
- x^{k/2 - 1} e^{-x/2},
-
- where :math:`\\Gamma` is the gamma function,
-
- .. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt.
-
- References
- ----------
- .. [1] NIST "Engineering Statistics Handbook"
- https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
-
- Examples
- --------
- >>> np.random.default_rng().chisquare(2,4)
- array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
-
- """
- return cont(&random_chisquare, &self._bitgen, size, self.lock, 1,
- df, 'df', CONS_POSITIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE, None)
-
- def noncentral_chisquare(self, df, nonc, size=None):
- """
- noncentral_chisquare(df, nonc, size=None)
-
- Draw samples from a noncentral chi-square distribution.
-
- The noncentral :math:`\\chi^2` distribution is a generalization of
- the :math:`\\chi^2` distribution.
-
- Parameters
- ----------
- df : float or array_like of floats
- Degrees of freedom, must be > 0.
-
- .. versionchanged:: 1.10.0
- Earlier NumPy versions required dfnum > 1.
- nonc : float or array_like of floats
- Non-centrality, must be non-negative.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``df`` and ``nonc`` are both scalars.
- Otherwise, ``np.broadcast(df, nonc).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized noncentral chi-square distribution.
-
- Notes
- -----
- The probability density function for the noncentral Chi-square
- distribution is
-
- .. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}
- \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}
- P_{Y_{df+2i}}(x),
-
- where :math:`Y_{q}` is the Chi-square with q degrees of freedom.
-
- References
- ----------
- .. [1] Wikipedia, "Noncentral chi-squared distribution"
- https://en.wikipedia.org/wiki/Noncentral_chi-squared_distribution
-
- Examples
- --------
- Draw values from the distribution and plot the histogram
-
- >>> rng = np.random.default_rng()
- >>> import matplotlib.pyplot as plt
- >>> values = plt.hist(rng.noncentral_chisquare(3, 20, 100000),
- ... bins=200, density=True)
- >>> plt.show()
-
- Draw values from a noncentral chisquare with very small noncentrality,
- and compare to a chisquare.
-
- >>> plt.figure()
- >>> values = plt.hist(rng.noncentral_chisquare(3, .0000001, 100000),
- ... bins=np.arange(0., 25, .1), density=True)
- >>> values2 = plt.hist(rng.chisquare(3, 100000),
- ... bins=np.arange(0., 25, .1), density=True)
- >>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')
- >>> plt.show()
-
- Demonstrate how large values of non-centrality lead to a more symmetric
- distribution.
-
- >>> plt.figure()
- >>> values = plt.hist(rng.noncentral_chisquare(3, 20, 100000),
- ... bins=200, density=True)
- >>> plt.show()
-
- """
- return cont(&random_noncentral_chisquare, &self._bitgen, size, self.lock, 2,
- df, 'df', CONS_POSITIVE,
- nonc, 'nonc', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE, None)
-
- def standard_cauchy(self, size=None):
- """
- standard_cauchy(size=None)
-
- Draw samples from a standard Cauchy distribution with mode = 0.
-
- Also known as the Lorentz distribution.
-
- Parameters
- ----------
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
-
- Returns
- -------
- samples : ndarray or scalar
- The drawn samples.
-
- Notes
- -----
- The probability density function for the full Cauchy distribution is
-
- .. math:: P(x; x_0, \\gamma) = \\frac{1}{\\pi \\gamma \\bigl[ 1+
- (\\frac{x-x_0}{\\gamma})^2 \\bigr] }
-
- and the Standard Cauchy distribution just sets :math:`x_0=0` and
- :math:`\\gamma=1`
-
- The Cauchy distribution arises in the solution to the driven harmonic
- oscillator problem, and also describes spectral line broadening. It
- also describes the distribution of values at which a line tilted at
- a random angle will cut the x axis.
-
- When studying hypothesis tests that assume normality, seeing how the
- tests perform on data from a Cauchy distribution is a good indicator of
- their sensitivity to a heavy-tailed distribution, since the Cauchy looks
- very much like a Gaussian distribution, but with heavier tails.
-
- References
- ----------
- .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "Cauchy
- Distribution",
- https://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm
- .. [2] Weisstein, Eric W. "Cauchy Distribution." From MathWorld--A
- Wolfram Web Resource.
- http://mathworld.wolfram.com/CauchyDistribution.html
- .. [3] Wikipedia, "Cauchy distribution"
- https://en.wikipedia.org/wiki/Cauchy_distribution
-
- Examples
- --------
- Draw samples and plot the distribution:
-
- >>> import matplotlib.pyplot as plt
- >>> s = np.random.default_rng().standard_cauchy(1000000)
- >>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well
- >>> plt.hist(s, bins=100)
- >>> plt.show()
-
- """
- return cont(&random_standard_cauchy, &self._bitgen, size, self.lock, 0,
- 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, None)
-
- def standard_t(self, df, size=None):
- """
- standard_t(df, size=None)
-
- Draw samples from a standard Student's t distribution with `df` degrees
- of freedom.
-
- A special case of the hyperbolic distribution. As `df` gets
- large, the result resembles that of the standard normal
- distribution (`standard_normal`).
-
- Parameters
- ----------
- df : float or array_like of floats
- Degrees of freedom, must be > 0.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``df`` is a scalar. Otherwise,
- ``np.array(df).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized standard Student's t distribution.
-
- Notes
- -----
- The probability density function for the t distribution is
-
- .. math:: P(x, df) = \\frac{\\Gamma(\\frac{df+1}{2})}{\\sqrt{\\pi df}
- \\Gamma(\\frac{df}{2})}\\Bigl( 1+\\frac{x^2}{df} \\Bigr)^{-(df+1)/2}
-
- The t test is based on an assumption that the data come from a
- Normal distribution. The t test provides a way to test whether
- the sample mean (that is the mean calculated from the data) is
- a good estimate of the true mean.
-
- The derivation of the t-distribution was first published in
- 1908 by William Gosset while working for the Guinness Brewery
- in Dublin. Due to proprietary issues, he had to publish under
- a pseudonym, and so he used the name Student.
-
- References
- ----------
- .. [1] Dalgaard, Peter, "Introductory Statistics With R",
- Springer, 2002.
- .. [2] Wikipedia, "Student's t-distribution"
- https://en.wikipedia.org/wiki/Student's_t-distribution
-
- Examples
- --------
- From Dalgaard page 83 [1]_, suppose the daily energy intake for 11
- women in kilojoules (kJ) is:
-
- >>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\
- ... 7515, 8230, 8770])
-
- Does their energy intake deviate systematically from the recommended
- value of 7725 kJ?
-
- We have 10 degrees of freedom, so is the sample mean within 95% of the
- recommended value?
-
- >>> s = np.random.default_rng().standard_t(10, size=100000)
- >>> np.mean(intake)
- 6753.636363636364
- >>> intake.std(ddof=1)
- 1142.1232221373727
-
- Calculate the t statistic, setting the ddof parameter to the unbiased
- value so the divisor in the standard deviation will be degrees of
- freedom, N-1.
-
- >>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))
- >>> import matplotlib.pyplot as plt
- >>> h = plt.hist(s, bins=100, density=True)
-
- For a one-sided t-test, how far out in the distribution does the t
- statistic appear?
-
- >>> np.sum(s<t) / float(len(s))
- 0.0090699999999999999 #random
-
- So the p-value is about 0.009, which says the null hypothesis has a
- probability of about 99% of being true.
-
- """
- return cont(&random_standard_t, &self._bitgen, size, self.lock, 1,
- df, 'df', CONS_POSITIVE,
- 0, '', CONS_NONE,
- 0, '', CONS_NONE,
- None)
-
- def vonmises(self, mu, kappa, size=None):
- """
- vonmises(mu, kappa, size=None)
-
- Draw samples from a von Mises distribution.
-
- Samples are drawn from a von Mises distribution with specified mode
- (mu) and dispersion (kappa), on the interval [-pi, pi].
-
- The von Mises distribution (also known as the circular normal
- distribution) is a continuous probability distribution on the unit
- circle. It may be thought of as the circular analogue of the normal
- distribution.
-
- Parameters
- ----------
- mu : float or array_like of floats
- Mode ("center") of the distribution.
- kappa : float or array_like of floats
- Dispersion of the distribution, has to be >=0.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``mu`` and ``kappa`` are both scalars.
- Otherwise, ``np.broadcast(mu, kappa).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized von Mises distribution.
-
- See Also
- --------
- scipy.stats.vonmises : probability density function, distribution, or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the von Mises distribution is
-
- .. math:: p(x) = \\frac{e^{\\kappa cos(x-\\mu)}}{2\\pi I_0(\\kappa)},
-
- where :math:`\\mu` is the mode and :math:`\\kappa` the dispersion,
- and :math:`I_0(\\kappa)` is the modified Bessel function of order 0.
-
- The von Mises is named for Richard Edler von Mises, who was born in
- Austria-Hungary, in what is now the Ukraine. He fled to the United
- States in 1939 and became a professor at Harvard. He worked in
- probability theory, aerodynamics, fluid mechanics, and philosophy of
- science.
-
- References
- ----------
- .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
- Mathematical Functions with Formulas, Graphs, and Mathematical
- Tables, 9th printing," New York: Dover, 1972.
- .. [2] von Mises, R., "Mathematical Theory of Probability
- and Statistics", New York: Academic Press, 1964.
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> mu, kappa = 0.0, 4.0 # mean and dispersion
- >>> s = np.random.default_rng().vonmises(mu, kappa, 1000)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> from scipy.special import i0 # doctest: +SKIP
- >>> plt.hist(s, 50, density=True)
- >>> x = np.linspace(-np.pi, np.pi, num=51)
- >>> y = np.exp(kappa*np.cos(x-mu))/(2*np.pi*i0(kappa)) # doctest: +SKIP
- >>> plt.plot(x, y, linewidth=2, color='r') # doctest: +SKIP
- >>> plt.show()
-
- """
- return cont(&random_vonmises, &self._bitgen, size, self.lock, 2,
- mu, 'mu', CONS_NONE,
- kappa, 'kappa', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE, None)
-
- def pareto(self, a, size=None):
- """
- pareto(a, size=None)
-
- Draw samples from a Pareto II or Lomax distribution with
- specified shape.
-
- The Lomax or Pareto II distribution is a shifted Pareto
- distribution. The classical Pareto distribution can be
- obtained from the Lomax distribution by adding 1 and
- multiplying by the scale parameter ``m`` (see Notes). The
- smallest value of the Lomax distribution is zero while for the
- classical Pareto distribution it is ``mu``, where the standard
- Pareto distribution has location ``mu = 1``. Lomax can also
- be considered as a simplified version of the Generalized
- Pareto distribution (available in SciPy), with the scale set
- to one and the location set to zero.
-
- The Pareto distribution must be greater than zero, and is
- unbounded above. It is also known as the "80-20 rule". In
- this distribution, 80 percent of the weights are in the lowest
- 20 percent of the range, while the other 20 percent fill the
- remaining 80 percent of the range.
-
- Parameters
- ----------
- a : float or array_like of floats
- Shape of the distribution. Must be positive.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``a`` is a scalar. Otherwise,
- ``np.array(a).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Pareto distribution.
-
- See Also
- --------
- scipy.stats.lomax : probability density function, distribution or
- cumulative density function, etc.
- scipy.stats.genpareto : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the Pareto distribution is
-
- .. math:: p(x) = \\frac{am^a}{x^{a+1}}
-
- where :math:`a` is the shape and :math:`m` the scale.
-
- The Pareto distribution, named after the Italian economist
- Vilfredo Pareto, is a power law probability distribution
- useful in many real world problems. Outside the field of
- economics it is generally referred to as the Bradford
- distribution. Pareto developed the distribution to describe
- the distribution of wealth in an economy. It has also found
- use in insurance, web page access statistics, oil field sizes,
- and many other problems, including the download frequency for
- projects in Sourceforge [1]_. It is one of the so-called
- "fat-tailed" distributions.
-
-
- References
- ----------
- .. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of
- Sourceforge projects.
- .. [2] Pareto, V. (1896). Course of Political Economy. Lausanne.
- .. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme
- Values, Birkhauser Verlag, Basel, pp 23-30.
- .. [4] Wikipedia, "Pareto distribution",
- https://en.wikipedia.org/wiki/Pareto_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> a, m = 3., 2. # shape and mode
- >>> s = (np.random.default_rng().pareto(a, 1000) + 1) * m
-
- Display the histogram of the samples, along with the probability
- density function:
-
- >>> import matplotlib.pyplot as plt
- >>> count, bins, _ = plt.hist(s, 100, density=True)
- >>> fit = a*m**a / bins**(a+1)
- >>> plt.plot(bins, max(count)*fit/max(fit), linewidth=2, color='r')
- >>> plt.show()
-
- """
- return cont(&random_pareto, &self._bitgen, size, self.lock, 1,
- a, 'a', CONS_POSITIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE, None)
-
- def weibull(self, a, size=None):
- """
- weibull(a, size=None)
-
- Draw samples from a Weibull distribution.
-
- Draw samples from a 1-parameter Weibull distribution with the given
- shape parameter `a`.
-
- .. math:: X = (-ln(U))^{1/a}
-
- Here, U is drawn from the uniform distribution over (0,1].
-
- The more common 2-parameter Weibull, including a scale parameter
- :math:`\\lambda` is just :math:`X = \\lambda(-ln(U))^{1/a}`.
-
- Parameters
- ----------
- a : float or array_like of floats
- Shape parameter of the distribution. Must be nonnegative.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``a`` is a scalar. Otherwise,
- ``np.array(a).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Weibull distribution.
-
- See Also
- --------
- scipy.stats.weibull_max
- scipy.stats.weibull_min
- scipy.stats.genextreme
- gumbel
-
- Notes
- -----
- The Weibull (or Type III asymptotic extreme value distribution
- for smallest values, SEV Type III, or Rosin-Rammler
- distribution) is one of a class of Generalized Extreme Value
- (GEV) distributions used in modeling extreme value problems.
- This class includes the Gumbel and Frechet distributions.
-
- The probability density for the Weibull distribution is
-
- .. math:: p(x) = \\frac{a}
- {\\lambda}(\\frac{x}{\\lambda})^{a-1}e^{-(x/\\lambda)^a},
-
- where :math:`a` is the shape and :math:`\\lambda` the scale.
-
- The function has its peak (the mode) at
- :math:`\\lambda(\\frac{a-1}{a})^{1/a}`.
-
- When ``a = 1``, the Weibull distribution reduces to the exponential
- distribution.
-
- References
- ----------
- .. [1] Waloddi Weibull, Royal Technical University, Stockholm,
- 1939 "A Statistical Theory Of The Strength Of Materials",
- Ingeniorsvetenskapsakademiens Handlingar Nr 151, 1939,
- Generalstabens Litografiska Anstalts Forlag, Stockholm.
- .. [2] Waloddi Weibull, "A Statistical Distribution Function of
- Wide Applicability", Journal Of Applied Mechanics ASME Paper
- 1951.
- .. [3] Wikipedia, "Weibull distribution",
- https://en.wikipedia.org/wiki/Weibull_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> rng = np.random.default_rng()
- >>> a = 5. # shape
- >>> s = rng.weibull(a, 1000)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> x = np.arange(1,100.)/50.
- >>> def weib(x,n,a):
- ... return (a / n) * (x / n)**(a - 1) * np.exp(-(x / n)**a)
-
- >>> count, bins, ignored = plt.hist(rng.weibull(5.,1000))
- >>> x = np.arange(1,100.)/50.
- >>> scale = count.max()/weib(x, 1., 5.).max()
- >>> plt.plot(x, weib(x, 1., 5.)*scale)
- >>> plt.show()
-
- """
- return cont(&random_weibull, &self._bitgen, size, self.lock, 1,
- a, 'a', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE, None)
-
- def power(self, a, size=None):
- """
- power(a, size=None)
-
- Draws samples in [0, 1] from a power distribution with positive
- exponent a - 1.
-
- Also known as the power function distribution.
-
- Parameters
- ----------
- a : float or array_like of floats
- Parameter of the distribution. Must be non-negative.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``a`` is a scalar. Otherwise,
- ``np.array(a).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized power distribution.
-
- Raises
- ------
- ValueError
- If a < 1.
-
- Notes
- -----
- The probability density function is
-
- .. math:: P(x; a) = ax^{a-1}, 0 \\le x \\le 1, a>0.
-
- The power function distribution is just the inverse of the Pareto
- distribution. It may also be seen as a special case of the Beta
- distribution.
-
- It is used, for example, in modeling the over-reporting of insurance
- claims.
-
- References
- ----------
- .. [1] Christian Kleiber, Samuel Kotz, "Statistical size distributions
- in economics and actuarial sciences", Wiley, 2003.
- .. [2] Heckert, N. A. and Filliben, James J. "NIST Handbook 148:
- Dataplot Reference Manual, Volume 2: Let Subcommands and Library
- Functions", National Institute of Standards and Technology
- Handbook Series, June 2003.
- https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> rng = np.random.default_rng()
- >>> a = 5. # shape
- >>> samples = 1000
- >>> s = rng.power(a, samples)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, bins=30)
- >>> x = np.linspace(0, 1, 100)
- >>> y = a*x**(a-1.)
- >>> normed_y = samples*np.diff(bins)[0]*y
- >>> plt.plot(x, normed_y)
- >>> plt.show()
-
- Compare the power function distribution to the inverse of the Pareto.
-
- >>> from scipy import stats # doctest: +SKIP
- >>> rvs = rng.power(5, 1000000)
- >>> rvsp = rng.pareto(5, 1000000)
- >>> xx = np.linspace(0,1,100)
- >>> powpdf = stats.powerlaw.pdf(xx,5) # doctest: +SKIP
-
- >>> plt.figure()
- >>> plt.hist(rvs, bins=50, density=True)
- >>> plt.plot(xx,powpdf,'r-') # doctest: +SKIP
- >>> plt.title('power(5)')
-
- >>> plt.figure()
- >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
- >>> plt.plot(xx,powpdf,'r-') # doctest: +SKIP
- >>> plt.title('inverse of 1 + Generator.pareto(5)')
-
- >>> plt.figure()
- >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
- >>> plt.plot(xx,powpdf,'r-') # doctest: +SKIP
- >>> plt.title('inverse of stats.pareto(5)')
-
- """
- return cont(&random_power, &self._bitgen, size, self.lock, 1,
- a, 'a', CONS_POSITIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE, None)
-
- def laplace(self, loc=0.0, scale=1.0, size=None):
- """
- laplace(loc=0.0, scale=1.0, size=None)
-
- Draw samples from the Laplace or double exponential distribution with
- specified location (or mean) and scale (decay).
-
- The Laplace distribution is similar to the Gaussian/normal distribution,
- but is sharper at the peak and has fatter tails. It represents the
- difference between two independent, identically distributed exponential
- random variables.
-
- Parameters
- ----------
- loc : float or array_like of floats, optional
- The position, :math:`\\mu`, of the distribution peak. Default is 0.
- scale : float or array_like of floats, optional
- :math:`\\lambda`, the exponential decay. Default is 1. Must be non-
- negative.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``loc`` and ``scale`` are both scalars.
- Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Laplace distribution.
-
- Notes
- -----
- It has the probability density function
-
- .. math:: f(x; \\mu, \\lambda) = \\frac{1}{2\\lambda}
- \\exp\\left(-\\frac{|x - \\mu|}{\\lambda}\\right).
-
- The first law of Laplace, from 1774, states that the frequency
- of an error can be expressed as an exponential function of the
- absolute magnitude of the error, which leads to the Laplace
- distribution. For many problems in economics and health
- sciences, this distribution seems to model the data better
- than the standard Gaussian distribution.
-
- References
- ----------
- .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
- Mathematical Functions with Formulas, Graphs, and Mathematical
- Tables, 9th printing," New York: Dover, 1972.
- .. [2] Kotz, Samuel, et. al. "The Laplace Distribution and
- Generalizations, " Birkhauser, 2001.
- .. [3] Weisstein, Eric W. "Laplace Distribution."
- From MathWorld--A Wolfram Web Resource.
- http://mathworld.wolfram.com/LaplaceDistribution.html
- .. [4] Wikipedia, "Laplace distribution",
- https://en.wikipedia.org/wiki/Laplace_distribution
-
- Examples
- --------
- Draw samples from the distribution
-
- >>> loc, scale = 0., 1.
- >>> s = np.random.default_rng().laplace(loc, scale, 1000)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 30, density=True)
- >>> x = np.arange(-8., 8., .01)
- >>> pdf = np.exp(-abs(x-loc)/scale)/(2.*scale)
- >>> plt.plot(x, pdf)
-
- Plot Gaussian for comparison:
-
- >>> g = (1/(scale * np.sqrt(2 * np.pi)) *
- ... np.exp(-(x - loc)**2 / (2 * scale**2)))
- >>> plt.plot(x,g)
-
- """
- return cont(&random_laplace, &self._bitgen, size, self.lock, 2,
- loc, 'loc', CONS_NONE,
- scale, 'scale', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE, None)
-
- def gumbel(self, loc=0.0, scale=1.0, size=None):
- """
- gumbel(loc=0.0, scale=1.0, size=None)
-
- Draw samples from a Gumbel distribution.
-
- Draw samples from a Gumbel distribution with specified location and
- scale. For more information on the Gumbel distribution, see
- Notes and References below.
-
- Parameters
- ----------
- loc : float or array_like of floats, optional
- The location of the mode of the distribution. Default is 0.
- scale : float or array_like of floats, optional
- The scale parameter of the distribution. Default is 1. Must be non-
- negative.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``loc`` and ``scale`` are both scalars.
- Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Gumbel distribution.
-
- See Also
- --------
- scipy.stats.gumbel_l
- scipy.stats.gumbel_r
- scipy.stats.genextreme
- weibull
-
- Notes
- -----
- The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme
- Value Type I) distribution is one of a class of Generalized Extreme
- Value (GEV) distributions used in modeling extreme value problems.
- The Gumbel is a special case of the Extreme Value Type I distribution
- for maximums from distributions with "exponential-like" tails.
-
- The probability density for the Gumbel distribution is
-
- .. math:: p(x) = \\frac{e^{-(x - \\mu)/ \\beta}}{\\beta} e^{ -e^{-(x - \\mu)/
- \\beta}},
-
- where :math:`\\mu` is the mode, a location parameter, and
- :math:`\\beta` is the scale parameter.
-
- The Gumbel (named for German mathematician Emil Julius Gumbel) was used
- very early in the hydrology literature, for modeling the occurrence of
- flood events. It is also used for modeling maximum wind speed and
- rainfall rates. It is a "fat-tailed" distribution - the probability of
- an event in the tail of the distribution is larger than if one used a
- Gaussian, hence the surprisingly frequent occurrence of 100-year
- floods. Floods were initially modeled as a Gaussian process, which
- underestimated the frequency of extreme events.
-
- It is one of a class of extreme value distributions, the Generalized
- Extreme Value (GEV) distributions, which also includes the Weibull and
- Frechet.
-
- The function has a mean of :math:`\\mu + 0.57721\\beta` and a variance
- of :math:`\\frac{\\pi^2}{6}\\beta^2`.
-
- References
- ----------
- .. [1] Gumbel, E. J., "Statistics of Extremes,"
- New York: Columbia University Press, 1958.
- .. [2] Reiss, R.-D. and Thomas, M., "Statistical Analysis of Extreme
- Values from Insurance, Finance, Hydrology and Other Fields,"
- Basel: Birkhauser Verlag, 2001.
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> rng = np.random.default_rng()
- >>> mu, beta = 0, 0.1 # location and scale
- >>> s = rng.gumbel(mu, beta, 1000)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 30, density=True)
- >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
- ... * np.exp( -np.exp( -(bins - mu) /beta) ),
- ... linewidth=2, color='r')
- >>> plt.show()
-
- Show how an extreme value distribution can arise from a Gaussian process
- and compare to a Gaussian:
-
- >>> means = []
- >>> maxima = []
- >>> for i in range(0,1000) :
- ... a = rng.normal(mu, beta, 1000)
- ... means.append(a.mean())
- ... maxima.append(a.max())
- >>> count, bins, ignored = plt.hist(maxima, 30, density=True)
- >>> beta = np.std(maxima) * np.sqrt(6) / np.pi
- >>> mu = np.mean(maxima) - 0.57721*beta
- >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
- ... * np.exp(-np.exp(-(bins - mu)/beta)),
- ... linewidth=2, color='r')
- >>> plt.plot(bins, 1/(beta * np.sqrt(2 * np.pi))
- ... * np.exp(-(bins - mu)**2 / (2 * beta**2)),
- ... linewidth=2, color='g')
- >>> plt.show()
-
- """
- return cont(&random_gumbel, &self._bitgen, size, self.lock, 2,
- loc, 'loc', CONS_NONE,
- scale, 'scale', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE, None)
-
- def logistic(self, loc=0.0, scale=1.0, size=None):
- """
- logistic(loc=0.0, scale=1.0, size=None)
-
- Draw samples from a logistic distribution.
-
- Samples are drawn from a logistic distribution with specified
- parameters, loc (location or mean, also median), and scale (>0).
-
- Parameters
- ----------
- loc : float or array_like of floats, optional
- Parameter of the distribution. Default is 0.
- scale : float or array_like of floats, optional
- Parameter of the distribution. Must be non-negative.
- Default is 1.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``loc`` and ``scale`` are both scalars.
- Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized logistic distribution.
-
- See Also
- --------
- scipy.stats.logistic : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the Logistic distribution is
-
- .. math:: P(x) = P(x) = \\frac{e^{-(x-\\mu)/s}}{s(1+e^{-(x-\\mu)/s})^2},
-
- where :math:`\\mu` = location and :math:`s` = scale.
-
- The Logistic distribution is used in Extreme Value problems where it
- can act as a mixture of Gumbel distributions, in Epidemiology, and by
- the World Chess Federation (FIDE) where it is used in the Elo ranking
- system, assuming the performance of each player is a logistically
- distributed random variable.
-
- References
- ----------
- .. [1] Reiss, R.-D. and Thomas M. (2001), "Statistical Analysis of
- Extreme Values, from Insurance, Finance, Hydrology and Other
- Fields," Birkhauser Verlag, Basel, pp 132-133.
- .. [2] Weisstein, Eric W. "Logistic Distribution." From
- MathWorld--A Wolfram Web Resource.
- http://mathworld.wolfram.com/LogisticDistribution.html
- .. [3] Wikipedia, "Logistic-distribution",
- https://en.wikipedia.org/wiki/Logistic_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> loc, scale = 10, 1
- >>> s = np.random.default_rng().logistic(loc, scale, 10000)
- >>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, bins=50)
-
- # plot against distribution
-
- >>> def logist(x, loc, scale):
- ... return np.exp((loc-x)/scale)/(scale*(1+np.exp((loc-x)/scale))**2)
- >>> lgst_val = logist(bins, loc, scale)
- >>> plt.plot(bins, lgst_val * count.max() / lgst_val.max())
- >>> plt.show()
-
- """
- return cont(&random_logistic, &self._bitgen, size, self.lock, 2,
- loc, 'loc', CONS_NONE,
- scale, 'scale', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE, None)
-
- def lognormal(self, mean=0.0, sigma=1.0, size=None):
- """
- lognormal(mean=0.0, sigma=1.0, size=None)
-
- Draw samples from a log-normal distribution.
-
- Draw samples from a log-normal distribution with specified mean,
- standard deviation, and array shape. Note that the mean and standard
- deviation are not the values for the distribution itself, but of the
- underlying normal distribution it is derived from.
-
- Parameters
- ----------
- mean : float or array_like of floats, optional
- Mean value of the underlying normal distribution. Default is 0.
- sigma : float or array_like of floats, optional
- Standard deviation of the underlying normal distribution. Must be
- non-negative. Default is 1.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``mean`` and ``sigma`` are both scalars.
- Otherwise, ``np.broadcast(mean, sigma).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized log-normal distribution.
-
- See Also
- --------
- scipy.stats.lognorm : probability density function, distribution,
- cumulative density function, etc.
-
- Notes
- -----
- A variable `x` has a log-normal distribution if `log(x)` is normally
- distributed. The probability density function for the log-normal
- distribution is:
-
- .. math:: p(x) = \\frac{1}{\\sigma x \\sqrt{2\\pi}}
- e^{(-\\frac{(ln(x)-\\mu)^2}{2\\sigma^2})}
-
- where :math:`\\mu` is the mean and :math:`\\sigma` is the standard
- deviation of the normally distributed logarithm of the variable.
- A log-normal distribution results if a random variable is the *product*
- of a large number of independent, identically-distributed variables in
- the same way that a normal distribution results if the variable is the
- *sum* of a large number of independent, identically-distributed
- variables.
-
- References
- ----------
- .. [1] Limpert, E., Stahel, W. A., and Abbt, M., "Log-normal
- Distributions across the Sciences: Keys and Clues,"
- BioScience, Vol. 51, No. 5, May, 2001.
- https://stat.ethz.ch/~stahel/lognormal/bioscience.pdf
- .. [2] Reiss, R.D. and Thomas, M., "Statistical Analysis of Extreme
- Values," Basel: Birkhauser Verlag, 2001, pp. 31-32.
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> rng = np.random.default_rng()
- >>> mu, sigma = 3., 1. # mean and standard deviation
- >>> s = rng.lognormal(mu, sigma, 1000)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 100, density=True, align='mid')
-
- >>> x = np.linspace(min(bins), max(bins), 10000)
- >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
- ... / (x * sigma * np.sqrt(2 * np.pi)))
-
- >>> plt.plot(x, pdf, linewidth=2, color='r')
- >>> plt.axis('tight')
- >>> plt.show()
-
- Demonstrate that taking the products of random samples from a uniform
- distribution can be fit well by a log-normal probability density
- function.
-
- >>> # Generate a thousand samples: each is the product of 100 random
- >>> # values, drawn from a normal distribution.
- >>> rng = rng
- >>> b = []
- >>> for i in range(1000):
- ... a = 10. + rng.standard_normal(100)
- ... b.append(np.product(a))
-
- >>> b = np.array(b) / np.min(b) # scale values to be positive
- >>> count, bins, ignored = plt.hist(b, 100, density=True, align='mid')
- >>> sigma = np.std(np.log(b))
- >>> mu = np.mean(np.log(b))
-
- >>> x = np.linspace(min(bins), max(bins), 10000)
- >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
- ... / (x * sigma * np.sqrt(2 * np.pi)))
-
- >>> plt.plot(x, pdf, color='r', linewidth=2)
- >>> plt.show()
-
- """
- return cont(&random_lognormal, &self._bitgen, size, self.lock, 2,
- mean, 'mean', CONS_NONE,
- sigma, 'sigma', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE, None)
-
- def rayleigh(self, scale=1.0, size=None):
- """
- rayleigh(scale=1.0, size=None)
-
- Draw samples from a Rayleigh distribution.
-
- The :math:`\\chi` and Weibull distributions are generalizations of the
- Rayleigh.
-
- Parameters
- ----------
- scale : float or array_like of floats, optional
- Scale, also equals the mode. Must be non-negative. Default is 1.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``scale`` is a scalar. Otherwise,
- ``np.array(scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Rayleigh distribution.
-
- Notes
- -----
- The probability density function for the Rayleigh distribution is
-
- .. math:: P(x;scale) = \\frac{x}{scale^2}e^{\\frac{-x^2}{2 \\cdotp scale^2}}
-
- The Rayleigh distribution would arise, for example, if the East
- and North components of the wind velocity had identical zero-mean
- Gaussian distributions. Then the wind speed would have a Rayleigh
- distribution.
-
- References
- ----------
- .. [1] Brighton Webs Ltd., "Rayleigh Distribution,"
- https://web.archive.org/web/20090514091424/http://brighton-webs.co.uk:80/distributions/rayleigh.asp
- .. [2] Wikipedia, "Rayleigh distribution"
- https://en.wikipedia.org/wiki/Rayleigh_distribution
-
- Examples
- --------
- Draw values from the distribution and plot the histogram
-
- >>> from matplotlib.pyplot import hist
- >>> rng = np.random.default_rng()
- >>> values = hist(rng.rayleigh(3, 100000), bins=200, density=True)
-
- Wave heights tend to follow a Rayleigh distribution. If the mean wave
- height is 1 meter, what fraction of waves are likely to be larger than 3
- meters?
-
- >>> meanvalue = 1
- >>> modevalue = np.sqrt(2 / np.pi) * meanvalue
- >>> s = rng.rayleigh(modevalue, 1000000)
-
- The percentage of waves larger than 3 meters is:
-
- >>> 100.*sum(s>3)/1000000.
- 0.087300000000000003 # random
-
- """
- return cont(&random_rayleigh, &self._bitgen, size, self.lock, 1,
- scale, 'scale', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE, None)
-
- def wald(self, mean, scale, size=None):
- """
- wald(mean, scale, size=None)
-
- Draw samples from a Wald, or inverse Gaussian, distribution.
-
- As the scale approaches infinity, the distribution becomes more like a
- Gaussian. Some references claim that the Wald is an inverse Gaussian
- with mean equal to 1, but this is by no means universal.
-
- The inverse Gaussian distribution was first studied in relationship to
- Brownian motion. In 1956 M.C.K. Tweedie used the name inverse Gaussian
- because there is an inverse relationship between the time to cover a
- unit distance and distance covered in unit time.
-
- Parameters
- ----------
- mean : float or array_like of floats
- Distribution mean, must be > 0.
- scale : float or array_like of floats
- Scale parameter, must be > 0.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``mean`` and ``scale`` are both scalars.
- Otherwise, ``np.broadcast(mean, scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Wald distribution.
-
- Notes
- -----
- The probability density function for the Wald distribution is
-
- .. math:: P(x;mean,scale) = \\sqrt{\\frac{scale}{2\\pi x^3}}e^
- \\frac{-scale(x-mean)^2}{2\\cdotp mean^2x}
-
- As noted above the inverse Gaussian distribution first arise
- from attempts to model Brownian motion. It is also a
- competitor to the Weibull for use in reliability modeling and
- modeling stock returns and interest rate processes.
-
- References
- ----------
- .. [1] Brighton Webs Ltd., Wald Distribution,
- https://web.archive.org/web/20090423014010/http://www.brighton-webs.co.uk:80/distributions/wald.asp
- .. [2] Chhikara, Raj S., and Folks, J. Leroy, "The Inverse Gaussian
- Distribution: Theory : Methodology, and Applications", CRC Press,
- 1988.
- .. [3] Wikipedia, "Inverse Gaussian distribution"
- https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
-
- Examples
- --------
- Draw values from the distribution and plot the histogram:
-
- >>> import matplotlib.pyplot as plt
- >>> h = plt.hist(np.random.default_rng().wald(3, 2, 100000), bins=200, density=True)
- >>> plt.show()
-
- """
- return cont(&random_wald, &self._bitgen, size, self.lock, 2,
- mean, 'mean', CONS_POSITIVE,
- scale, 'scale', CONS_POSITIVE,
- 0.0, '', CONS_NONE, None)
-
- def triangular(self, left, mode, right, size=None):
- """
- triangular(left, mode, right, size=None)
-
- Draw samples from the triangular distribution over the
- interval ``[left, right]``.
-
- The triangular distribution is a continuous probability
- distribution with lower limit left, peak at mode, and upper
- limit right. Unlike the other distributions, these parameters
- directly define the shape of the pdf.
-
- Parameters
- ----------
- left : float or array_like of floats
- Lower limit.
- mode : float or array_like of floats
- The value where the peak of the distribution occurs.
- The value must fulfill the condition ``left <= mode <= right``.
- right : float or array_like of floats
- Upper limit, must be larger than `left`.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``left``, ``mode``, and ``right``
- are all scalars. Otherwise, ``np.broadcast(left, mode, right).size``
- samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized triangular distribution.
-
- Notes
- -----
- The probability density function for the triangular distribution is
-
- .. math:: P(x;l, m, r) = \\begin{cases}
- \\frac{2(x-l)}{(r-l)(m-l)}& \\text{for $l \\leq x \\leq m$},\\\\
- \\frac{2(r-x)}{(r-l)(r-m)}& \\text{for $m \\leq x \\leq r$},\\\\
- 0& \\text{otherwise}.
- \\end{cases}
-
- The triangular distribution is often used in ill-defined
- problems where the underlying distribution is not known, but
- some knowledge of the limits and mode exists. Often it is used
- in simulations.
-
- References
- ----------
- .. [1] Wikipedia, "Triangular distribution"
- https://en.wikipedia.org/wiki/Triangular_distribution
-
- Examples
- --------
- Draw values from the distribution and plot the histogram:
-
- >>> import matplotlib.pyplot as plt
- >>> h = plt.hist(np.random.default_rng().triangular(-3, 0, 8, 100000), bins=200,
- ... density=True)
- >>> plt.show()
-
- """
- cdef bint is_scalar = True
- cdef double fleft, fmode, fright
- cdef np.ndarray oleft, omode, oright
-
- oleft = <np.ndarray>np.PyArray_FROM_OTF(left, np.NPY_DOUBLE, np.NPY_ALIGNED)
- omode = <np.ndarray>np.PyArray_FROM_OTF(mode, np.NPY_DOUBLE, np.NPY_ALIGNED)
- oright = <np.ndarray>np.PyArray_FROM_OTF(right, np.NPY_DOUBLE, np.NPY_ALIGNED)
-
- if np.PyArray_NDIM(oleft) == np.PyArray_NDIM(omode) == np.PyArray_NDIM(oright) == 0:
- fleft = PyFloat_AsDouble(left)
- fright = PyFloat_AsDouble(right)
- fmode = PyFloat_AsDouble(mode)
-
- if fleft > fmode:
- raise ValueError("left > mode")
- if fmode > fright:
- raise ValueError("mode > right")
- if fleft == fright:
- raise ValueError("left == right")
- return cont(&random_triangular, &self._bitgen, size, self.lock, 3,
- fleft, '', CONS_NONE,
- fmode, '', CONS_NONE,
- fright, '', CONS_NONE, None)
-
- if np.any(np.greater(oleft, omode)):
- raise ValueError("left > mode")
- if np.any(np.greater(omode, oright)):
- raise ValueError("mode > right")
- if np.any(np.equal(oleft, oright)):
- raise ValueError("left == right")
-
- return cont_broadcast_3(&random_triangular, &self._bitgen, size, self.lock,
- oleft, '', CONS_NONE,
- omode, '', CONS_NONE,
- oright, '', CONS_NONE)
-
- # Complicated, discrete distributions:
- def binomial(self, n, p, size=None):
- """
- binomial(n, p, size=None)
-
- Draw samples from a binomial distribution.
-
- Samples are drawn from a binomial distribution with specified
- parameters, n trials and p probability of success where
- n an integer >= 0 and p is in the interval [0,1]. (n may be
- input as a float, but it is truncated to an integer in use)
-
- Parameters
- ----------
- n : int or array_like of ints
- Parameter of the distribution, >= 0. Floats are also accepted,
- but they will be truncated to integers.
- p : float or array_like of floats
- Parameter of the distribution, >= 0 and <=1.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``n`` and ``p`` are both scalars.
- Otherwise, ``np.broadcast(n, p).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized binomial distribution, where
- each sample is equal to the number of successes over the n trials.
-
- See Also
- --------
- scipy.stats.binom : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the binomial distribution is
-
- .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N},
-
- where :math:`n` is the number of trials, :math:`p` is the probability
- of success, and :math:`N` is the number of successes.
-
- When estimating the standard error of a proportion in a population by
- using a random sample, the normal distribution works well unless the
- product p*n <=5, where p = population proportion estimate, and n =
- number of samples, in which case the binomial distribution is used
- instead. For example, a sample of 15 people shows 4 who are left
- handed, and 11 who are right handed. Then p = 4/15 = 27%. 0.27*15 = 4,
- so the binomial distribution should be used in this case.
-
- References
- ----------
- .. [1] Dalgaard, Peter, "Introductory Statistics with R",
- Springer-Verlag, 2002.
- .. [2] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
- Fifth Edition, 2002.
- .. [3] Lentner, Marvin, "Elementary Applied Statistics", Bogden
- and Quigley, 1972.
- .. [4] Weisstein, Eric W. "Binomial Distribution." From MathWorld--A
- Wolfram Web Resource.
- http://mathworld.wolfram.com/BinomialDistribution.html
- .. [5] Wikipedia, "Binomial distribution",
- https://en.wikipedia.org/wiki/Binomial_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> rng = np.random.default_rng()
- >>> n, p = 10, .5 # number of trials, probability of each trial
- >>> s = rng.binomial(n, p, 1000)
- # result of flipping a coin 10 times, tested 1000 times.
-
- A real world example. A company drills 9 wild-cat oil exploration
- wells, each with an estimated probability of success of 0.1. All nine
- wells fail. What is the probability of that happening?
-
- Let's do 20,000 trials of the model, and count the number that
- generate zero positive results.
-
- >>> sum(rng.binomial(9, 0.1, 20000) == 0)/20000.
- # answer = 0.38885, or 38%.
-
- """
-
- # Uses a custom implementation since self._binomial is required
- cdef double _dp = 0
- cdef int64_t _in = 0
- cdef bint is_scalar = True
- cdef np.npy_intp i, cnt
- cdef np.ndarray randoms
- cdef np.int64_t *randoms_data
- cdef np.broadcast it
-
- p_arr = <np.ndarray>np.PyArray_FROM_OTF(p, np.NPY_DOUBLE, np.NPY_ALIGNED)
- is_scalar = is_scalar and np.PyArray_NDIM(p_arr) == 0
- n_arr = <np.ndarray>np.PyArray_FROM_OTF(n, np.NPY_INT64, np.NPY_ALIGNED)
- is_scalar = is_scalar and np.PyArray_NDIM(n_arr) == 0
-
- if not is_scalar:
- check_array_constraint(p_arr, 'p', CONS_BOUNDED_0_1)
- check_array_constraint(n_arr, 'n', CONS_NON_NEGATIVE)
- if size is not None:
- randoms = <np.ndarray>np.empty(size, np.int64)
- else:
- it = np.PyArray_MultiIterNew2(p_arr, n_arr)
- randoms = <np.ndarray>np.empty(it.shape, np.int64)
-
- randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
- cnt = np.PyArray_SIZE(randoms)
-
- it = np.PyArray_MultiIterNew3(randoms, p_arr, n_arr)
- with self.lock, nogil:
- for i in range(cnt):
- _dp = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
- _in = (<int64_t*>np.PyArray_MultiIter_DATA(it, 2))[0]
- (<int64_t*>np.PyArray_MultiIter_DATA(it, 0))[0] = random_binomial(&self._bitgen, _dp, _in, &self._binomial)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return randoms
-
- _dp = PyFloat_AsDouble(p)
- _in = <int64_t>n
- check_constraint(_dp, 'p', CONS_BOUNDED_0_1)
- check_constraint(<double>_in, 'n', CONS_NON_NEGATIVE)
-
- if size is None:
- with self.lock:
- return random_binomial(&self._bitgen, _dp, _in, &self._binomial)
-
- randoms = <np.ndarray>np.empty(size, np.int64)
- cnt = np.PyArray_SIZE(randoms)
- randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
-
- with self.lock, nogil:
- for i in range(cnt):
- randoms_data[i] = random_binomial(&self._bitgen, _dp, _in,
- &self._binomial)
-
- return randoms
-
- def negative_binomial(self, n, p, size=None):
- """
- negative_binomial(n, p, size=None)
-
- Draw samples from a negative binomial distribution.
-
- Samples are drawn from a negative binomial distribution with specified
- parameters, `n` successes and `p` probability of success where `n`
- is > 0 and `p` is in the interval [0, 1].
-
- Parameters
- ----------
- n : float or array_like of floats
- Parameter of the distribution, > 0.
- p : float or array_like of floats
- Parameter of the distribution, >= 0 and <=1.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``n`` and ``p`` are both scalars.
- Otherwise, ``np.broadcast(n, p).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized negative binomial distribution,
- where each sample is equal to N, the number of failures that
- occurred before a total of n successes was reached.
-
- Notes
- -----
- The probability mass function of the negative binomial distribution is
-
- .. math:: P(N;n,p) = \\frac{\\Gamma(N+n)}{N!\\Gamma(n)}p^{n}(1-p)^{N},
-
- where :math:`n` is the number of successes, :math:`p` is the
- probability of success, :math:`N+n` is the number of trials, and
- :math:`\\Gamma` is the gamma function. When :math:`n` is an integer,
- :math:`\\frac{\\Gamma(N+n)}{N!\\Gamma(n)} = \\binom{N+n-1}{N}`, which is
- the more common form of this term in the the pmf. The negative
- binomial distribution gives the probability of N failures given n
- successes, with a success on the last trial.
-
- If one throws a die repeatedly until the third time a "1" appears,
- then the probability distribution of the number of non-"1"s that
- appear before the third "1" is a negative binomial distribution.
-
- References
- ----------
- .. [1] Weisstein, Eric W. "Negative Binomial Distribution." From
- MathWorld--A Wolfram Web Resource.
- http://mathworld.wolfram.com/NegativeBinomialDistribution.html
- .. [2] Wikipedia, "Negative binomial distribution",
- https://en.wikipedia.org/wiki/Negative_binomial_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- A real world example. A company drills wild-cat oil
- exploration wells, each with an estimated probability of
- success of 0.1. What is the probability of having one success
- for each successive well, that is what is the probability of a
- single success after drilling 5 wells, after 6 wells, etc.?
-
- >>> s = np.random.default_rng().negative_binomial(1, 0.1, 100000)
- >>> for i in range(1, 11): # doctest: +SKIP
- ... probability = sum(s<i) / 100000.
- ... print(i, "wells drilled, probability of one success =", probability)
-
- """
- return disc(&random_negative_binomial, &self._bitgen, size, self.lock, 2, 0,
- n, 'n', CONS_POSITIVE_NOT_NAN,
- p, 'p', CONS_BOUNDED_0_1,
- 0.0, '', CONS_NONE)
-
- def poisson(self, lam=1.0, size=None):
- """
- poisson(lam=1.0, size=None)
-
- Draw samples from a Poisson distribution.
-
- The Poisson distribution is the limit of the binomial distribution
- for large N.
-
- Parameters
- ----------
- lam : float or array_like of floats
- Expectation of interval, must be >= 0. A sequence of expectation
- intervals must be broadcastable over the requested size.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``lam`` is a scalar. Otherwise,
- ``np.array(lam).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Poisson distribution.
-
- Notes
- -----
- The Poisson distribution
-
- .. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!}
-
- For events with an expected separation :math:`\\lambda` the Poisson
- distribution :math:`f(k; \\lambda)` describes the probability of
- :math:`k` events occurring within the observed
- interval :math:`\\lambda`.
-
- Because the output is limited to the range of the C int64 type, a
- ValueError is raised when `lam` is within 10 sigma of the maximum
- representable value.
-
- References
- ----------
- .. [1] Weisstein, Eric W. "Poisson Distribution."
- From MathWorld--A Wolfram Web Resource.
- http://mathworld.wolfram.com/PoissonDistribution.html
- .. [2] Wikipedia, "Poisson distribution",
- https://en.wikipedia.org/wiki/Poisson_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> import numpy as np
- >>> rng = np.random.default_rng()
- >>> s = rng.poisson(5, 10000)
-
- Display histogram of the sample:
-
- >>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 14, density=True)
- >>> plt.show()
-
- Draw each 100 values for lambda 100 and 500:
-
- >>> s = rng.poisson(lam=(100., 500.), size=(100, 2))
-
- """
- return disc(&random_poisson, &self._bitgen, size, self.lock, 1, 0,
- lam, 'lam', CONS_POISSON,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE)
-
- def zipf(self, a, size=None):
- """
- zipf(a, size=None)
-
- Draw samples from a Zipf distribution.
-
- Samples are drawn from a Zipf distribution with specified parameter
- `a` > 1.
-
- The Zipf distribution (also known as the zeta distribution) is a
- continuous probability distribution that satisfies Zipf's law: the
- frequency of an item is inversely proportional to its rank in a
- frequency table.
-
- Parameters
- ----------
- a : float or array_like of floats
- Distribution parameter. Must be greater than 1.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``a`` is a scalar. Otherwise,
- ``np.array(a).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Zipf distribution.
-
- See Also
- --------
- scipy.stats.zipf : probability density function, distribution, or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the Zipf distribution is
-
- .. math:: p(x) = \\frac{x^{-a}}{\\zeta(a)},
-
- where :math:`\\zeta` is the Riemann Zeta function.
-
- It is named for the American linguist George Kingsley Zipf, who noted
- that the frequency of any word in a sample of a language is inversely
- proportional to its rank in the frequency table.
-
- References
- ----------
- .. [1] Zipf, G. K., "Selected Studies of the Principle of Relative
- Frequency in Language," Cambridge, MA: Harvard Univ. Press,
- 1932.
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> a = 2. # parameter
- >>> s = np.random.default_rng().zipf(a, 1000)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> from scipy import special # doctest: +SKIP
-
- Truncate s values at 50 so plot is interesting:
-
- >>> count, bins, ignored = plt.hist(s[s<50],
- ... 50, density=True)
- >>> x = np.arange(1., 50.)
- >>> y = x**(-a) / special.zetac(a) # doctest: +SKIP
- >>> plt.plot(x, y/max(y), linewidth=2, color='r') # doctest: +SKIP
- >>> plt.show()
-
- """
- return disc(&random_zipf, &self._bitgen, size, self.lock, 1, 0,
- a, 'a', CONS_GT_1,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE)
-
- def geometric(self, p, size=None):
- """
- geometric(p, size=None)
-
- Draw samples from the geometric distribution.
-
- Bernoulli trials are experiments with one of two outcomes:
- success or failure (an example of such an experiment is flipping
- a coin). The geometric distribution models the number of trials
- that must be run in order to achieve success. It is therefore
- supported on the positive integers, ``k = 1, 2, ...``.
-
- The probability mass function of the geometric distribution is
-
- .. math:: f(k) = (1 - p)^{k - 1} p
-
- where `p` is the probability of success of an individual trial.
-
- Parameters
- ----------
- p : float or array_like of floats
- The probability of success of an individual trial.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``p`` is a scalar. Otherwise,
- ``np.array(p).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized geometric distribution.
-
- Examples
- --------
- Draw ten thousand values from the geometric distribution,
- with the probability of an individual success equal to 0.35:
-
- >>> z = np.random.default_rng().geometric(p=0.35, size=10000)
-
- How many trials succeeded after a single run?
-
- >>> (z == 1).sum() / 10000.
- 0.34889999999999999 #random
-
- """
- return disc(&random_geometric, &self._bitgen, size, self.lock, 1, 0,
- p, 'p', CONS_BOUNDED_GT_0_1,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE)
-
- def hypergeometric(self, ngood, nbad, nsample, size=None):
- """
- hypergeometric(ngood, nbad, nsample, size=None)
-
- Draw samples from a Hypergeometric distribution.
-
- Samples are drawn from a hypergeometric distribution with specified
- parameters, `ngood` (ways to make a good selection), `nbad` (ways to make
- a bad selection), and `nsample` (number of items sampled, which is less
- than or equal to the sum ``ngood + nbad``).
-
- Parameters
- ----------
- ngood : int or array_like of ints
- Number of ways to make a good selection. Must be nonnegative and
- less than 10**9.
- nbad : int or array_like of ints
- Number of ways to make a bad selection. Must be nonnegative and
- less than 10**9.
- nsample : int or array_like of ints
- Number of items sampled. Must be nonnegative and less than
- ``ngood + nbad``.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if `ngood`, `nbad`, and `nsample`
- are all scalars. Otherwise, ``np.broadcast(ngood, nbad, nsample).size``
- samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized hypergeometric distribution. Each
- sample is the number of good items within a randomly selected subset of
- size `nsample` taken from a set of `ngood` good items and `nbad` bad items.
-
- See Also
- --------
- scipy.stats.hypergeom : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the Hypergeometric distribution is
-
- .. math:: P(x) = \\frac{\\binom{g}{x}\\binom{b}{n-x}}{\\binom{g+b}{n}},
-
- where :math:`0 \\le x \\le n` and :math:`n-b \\le x \\le g`
-
- for P(x) the probability of ``x`` good results in the drawn sample,
- g = `ngood`, b = `nbad`, and n = `nsample`.
-
- Consider an urn with black and white marbles in it, `ngood` of them
- are black and `nbad` are white. If you draw `nsample` balls without
- replacement, then the hypergeometric distribution describes the
- distribution of black balls in the drawn sample.
-
- Note that this distribution is very similar to the binomial
- distribution, except that in this case, samples are drawn without
- replacement, whereas in the Binomial case samples are drawn with
- replacement (or the sample space is infinite). As the sample space
- becomes large, this distribution approaches the binomial.
-
- The arguments `ngood` and `nbad` each must be less than `10**9`. For
- extremely large arguments, the algorithm that is used to compute the
- samples [4]_ breaks down because of loss of precision in floating point
- calculations. For such large values, if `nsample` is not also large,
- the distribution can be approximated with the binomial distribution,
- `binomial(n=nsample, p=ngood/(ngood + nbad))`.
-
- References
- ----------
- .. [1] Lentner, Marvin, "Elementary Applied Statistics", Bogden
- and Quigley, 1972.
- .. [2] Weisstein, Eric W. "Hypergeometric Distribution." From
- MathWorld--A Wolfram Web Resource.
- http://mathworld.wolfram.com/HypergeometricDistribution.html
- .. [3] Wikipedia, "Hypergeometric distribution",
- https://en.wikipedia.org/wiki/Hypergeometric_distribution
- .. [4] Stadlober, Ernst, "The ratio of uniforms approach for generating
- discrete random variates", Journal of Computational and Applied
- Mathematics, 31, pp. 181-189 (1990).
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> rng = np.random.default_rng()
- >>> ngood, nbad, nsamp = 100, 2, 10
- # number of good, number of bad, and number of samples
- >>> s = rng.hypergeometric(ngood, nbad, nsamp, 1000)
- >>> from matplotlib.pyplot import hist
- >>> hist(s)
- # note that it is very unlikely to grab both bad items
-
- Suppose you have an urn with 15 white and 15 black marbles.
- If you pull 15 marbles at random, how likely is it that
- 12 or more of them are one color?
-
- >>> s = rng.hypergeometric(15, 15, 15, 100000)
- >>> sum(s>=12)/100000. + sum(s<=3)/100000.
- # answer = 0.003 ... pretty unlikely!
-
- """
- DEF HYPERGEOM_MAX = 10**9
- cdef bint is_scalar = True
- cdef np.ndarray ongood, onbad, onsample
- cdef int64_t lngood, lnbad, lnsample
-
- ongood = <np.ndarray>np.PyArray_FROM_OTF(ngood, np.NPY_INT64, np.NPY_ALIGNED)
- onbad = <np.ndarray>np.PyArray_FROM_OTF(nbad, np.NPY_INT64, np.NPY_ALIGNED)
- onsample = <np.ndarray>np.PyArray_FROM_OTF(nsample, np.NPY_INT64, np.NPY_ALIGNED)
-
- if np.PyArray_NDIM(ongood) == np.PyArray_NDIM(onbad) == np.PyArray_NDIM(onsample) == 0:
-
- lngood = <int64_t>ngood
- lnbad = <int64_t>nbad
- lnsample = <int64_t>nsample
-
- if lngood >= HYPERGEOM_MAX or lnbad >= HYPERGEOM_MAX:
- raise ValueError("both ngood and nbad must be less than %d" %
- HYPERGEOM_MAX)
- if lngood + lnbad < lnsample:
- raise ValueError("ngood + nbad < nsample")
- return disc(&random_hypergeometric, &self._bitgen, size, self.lock, 0, 3,
- lngood, 'ngood', CONS_NON_NEGATIVE,
- lnbad, 'nbad', CONS_NON_NEGATIVE,
- lnsample, 'nsample', CONS_NON_NEGATIVE)
-
- if np.any(ongood >= HYPERGEOM_MAX) or np.any(onbad >= HYPERGEOM_MAX):
- raise ValueError("both ngood and nbad must be less than %d" %
- HYPERGEOM_MAX)
-
- if np.any(np.less(np.add(ongood, onbad), onsample)):
- raise ValueError("ngood + nbad < nsample")
-
- return discrete_broadcast_iii(&random_hypergeometric, &self._bitgen, size, self.lock,
- ongood, 'ngood', CONS_NON_NEGATIVE,
- onbad, 'nbad', CONS_NON_NEGATIVE,
- onsample, 'nsample', CONS_NON_NEGATIVE)
-
- def logseries(self, p, size=None):
- """
- logseries(p, size=None)
-
- Draw samples from a logarithmic series distribution.
-
- Samples are drawn from a log series distribution with specified
- shape parameter, 0 < ``p`` < 1.
-
- Parameters
- ----------
- p : float or array_like of floats
- Shape parameter for the distribution. Must be in the range (0, 1).
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``p`` is a scalar. Otherwise,
- ``np.array(p).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized logarithmic series distribution.
-
- See Also
- --------
- scipy.stats.logser : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The probability mass function for the Log Series distribution is
-
- .. math:: P(k) = \\frac{-p^k}{k \\ln(1-p)},
-
- where p = probability.
-
- The log series distribution is frequently used to represent species
- richness and occurrence, first proposed by Fisher, Corbet, and
- Williams in 1943 [2]. It may also be used to model the numbers of
- occupants seen in cars [3].
-
- References
- ----------
- .. [1] Buzas, Martin A.; Culver, Stephen J., Understanding regional
- species diversity through the log series distribution of
- occurrences: BIODIVERSITY RESEARCH Diversity & Distributions,
- Volume 5, Number 5, September 1999 , pp. 187-195(9).
- .. [2] Fisher, R.A,, A.S. Corbet, and C.B. Williams. 1943. The
- relation between the number of species and the number of
- individuals in a random sample of an animal population.
- Journal of Animal Ecology, 12:42-58.
- .. [3] D. J. Hand, F. Daly, D. Lunn, E. Ostrowski, A Handbook of Small
- Data Sets, CRC Press, 1994.
- .. [4] Wikipedia, "Logarithmic distribution",
- https://en.wikipedia.org/wiki/Logarithmic_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> a = .6
- >>> s = np.random.default_rng().logseries(a, 10000)
- >>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s)
-
- # plot against distribution
-
- >>> def logseries(k, p):
- ... return -p**k/(k*np.log(1-p))
- >>> plt.plot(bins, logseries(bins, a) * count.max()/
- ... logseries(bins, a).max(), 'r')
- >>> plt.show()
-
- """
- return disc(&random_logseries, &self._bitgen, size, self.lock, 1, 0,
- p, 'p', CONS_BOUNDED_0_1,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE)
-
- # Multivariate distributions:
- def multivariate_normal(self, mean, cov, size=None, check_valid='warn',
- tol=1e-8):
- """
- multivariate_normal(mean, cov, size=None, check_valid='warn', tol=1e-8)
-
- Draw random samples from a multivariate normal distribution.
-
- The multivariate normal, multinormal or Gaussian distribution is a
- generalization of the one-dimensional normal distribution to higher
- dimensions. Such a distribution is specified by its mean and
- covariance matrix. These parameters are analogous to the mean
- (average or "center") and variance (standard deviation, or "width,"
- squared) of the one-dimensional normal distribution.
-
- Parameters
- ----------
- mean : 1-D array_like, of length N
- Mean of the N-dimensional distribution.
- cov : 2-D array_like, of shape (N, N)
- Covariance matrix of the distribution. It must be symmetric and
- positive-semidefinite for proper sampling.
- size : int or tuple of ints, optional
- Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are
- generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because
- each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.
- If no shape is specified, a single (`N`-D) sample is returned.
- check_valid : { 'warn', 'raise', 'ignore' }, optional
- Behavior when the covariance matrix is not positive semidefinite.
- tol : float, optional
- Tolerance when checking the singular values in covariance matrix.
- cov is cast to double before the check.
-
- Returns
- -------
- out : ndarray
- The drawn samples, of shape *size*, if that was provided. If not,
- the shape is ``(N,)``.
-
- In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
- value drawn from the distribution.
-
- Notes
- -----
- The mean is a coordinate in N-dimensional space, which represents the
- location where samples are most likely to be generated. This is
- analogous to the peak of the bell curve for the one-dimensional or
- univariate normal distribution.
-
- Covariance indicates the level to which two variables vary together.
- From the multivariate normal distribution, we draw N-dimensional
- samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix
- element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`.
- The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its
- "spread").
-
- Instead of specifying the full covariance matrix, popular
- approximations include:
-
- - Spherical covariance (`cov` is a multiple of the identity matrix)
- - Diagonal covariance (`cov` has non-negative elements, and only on
- the diagonal)
-
- This geometrical property can be seen in two dimensions by plotting
- generated data-points:
-
- >>> mean = [0, 0]
- >>> cov = [[1, 0], [0, 100]] # diagonal covariance
-
- Diagonal covariance means that points are oriented along x or y-axis:
-
- >>> import matplotlib.pyplot as plt
- >>> x, y = np.random.default_rng().multivariate_normal(mean, cov, 5000).T
- >>> plt.plot(x, y, 'x')
- >>> plt.axis('equal')
- >>> plt.show()
-
- Note that the covariance matrix must be positive semidefinite (a.k.a.
- nonnegative-definite). Otherwise, the behavior of this method is
- undefined and backwards compatibility is not guaranteed.
-
- References
- ----------
- .. [1] Papoulis, A., "Probability, Random Variables, and Stochastic
- Processes," 3rd ed., New York: McGraw-Hill, 1991.
- .. [2] Duda, R. O., Hart, P. E., and Stork, D. G., "Pattern
- Classification," 2nd ed., New York: Wiley, 2001.
-
- Examples
- --------
- >>> mean = (1, 2)
- >>> cov = [[1, 0], [0, 1]]
- >>> x = np.random.default_rng().multivariate_normal(mean, cov, (3, 3))
- >>> x.shape
- (3, 3, 2)
-
- The following is probably true, given that 0.6 is roughly twice the
- standard deviation:
-
- >>> list((x[0,0,:] - mean) < 0.6)
- [True, True] # random
-
- """
- from numpy.dual import svd
-
- # Check preconditions on arguments
- mean = np.array(mean)
- cov = np.array(cov)
- if size is None:
- shape = []
- elif isinstance(size, (int, long, np.integer)):
- shape = [size]
- else:
- shape = size
-
- if len(mean.shape) != 1:
- raise ValueError("mean must be 1 dimensional")
- if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
- raise ValueError("cov must be 2 dimensional and square")
- if mean.shape[0] != cov.shape[0]:
- raise ValueError("mean and cov must have same length")
-
- # Compute shape of output and create a matrix of independent
- # standard normally distributed random numbers. The matrix has rows
- # with the same length as mean and as many rows are necessary to
- # form a matrix of shape final_shape.
- final_shape = list(shape[:])
- final_shape.append(mean.shape[0])
- x = self.standard_normal(final_shape).reshape(-1, mean.shape[0])
-
- # Transform matrix of standard normals into matrix where each row
- # contains multivariate normals with the desired covariance.
- # Compute A such that dot(transpose(A),A) == cov.
- # Then the matrix products of the rows of x and A has the desired
- # covariance. Note that sqrt(s)*v where (u,s,v) is the singular value
- # decomposition of cov is such an A.
- #
- # Also check that cov is positive-semidefinite. If so, the u.T and v
- # matrices should be equal up to roundoff error if cov is
- # symmetric and the singular value of the corresponding row is
- # not zero. We continue to use the SVD rather than Cholesky in
- # order to preserve current outputs. Note that symmetry has not
- # been checked.
-
- # GH10839, ensure double to make tol meaningful
- cov = cov.astype(np.double)
- (u, s, v) = svd(cov)
-
- if check_valid != 'ignore':
- if check_valid != 'warn' and check_valid != 'raise':
- raise ValueError("check_valid must equal 'warn', 'raise', or 'ignore'")
-
- psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol)
- if not psd:
- if check_valid == 'warn':
- warnings.warn("covariance is not positive-semidefinite.",
- RuntimeWarning)
- else:
- raise ValueError("covariance is not positive-semidefinite.")
-
- x = np.dot(x, np.sqrt(s)[:, None] * v)
- x += mean
- x.shape = tuple(final_shape)
- return x
-
- def multinomial(self, object n, object pvals, size=None):
- """
- multinomial(n, pvals, size=None)
-
- Draw samples from a multinomial distribution.
-
- The multinomial distribution is a multivariate generalization of the
- binomial distribution. Take an experiment with one of ``p``
- possible outcomes. An example of such an experiment is throwing a dice,
- where the outcome can be 1 through 6. Each sample drawn from the
- distribution represents `n` such experiments. Its values,
- ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the
- outcome was ``i``.
-
- Parameters
- ----------
- n : int or array-like of ints
- Number of experiments.
- pvals : sequence of floats, length p
- Probabilities of each of the ``p`` different outcomes. These
- must sum to 1 (however, the last element is always assumed to
- account for the remaining probability, as long as
- ``sum(pvals[:-1]) <= 1)``.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
-
- Returns
- -------
- out : ndarray
- The drawn samples, of shape *size*, if that was provided. If not,
- the shape is ``(N,)``.
-
- In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
- value drawn from the distribution.
-
- Examples
- --------
- Throw a dice 20 times:
-
- >>> rng = np.random.default_rng()
- >>> rng.multinomial(20, [1/6.]*6, size=1)
- array([[4, 1, 7, 5, 2, 1]]) # random
-
- It landed 4 times on 1, once on 2, etc.
-
- Now, throw the dice 20 times, and 20 times again:
-
- >>> rng.multinomial(20, [1/6.]*6, size=2)
- array([[3, 4, 3, 3, 4, 3],
- [2, 4, 3, 4, 0, 7]]) # random
-
- For the first run, we threw 3 times 1, 4 times 2, etc. For the second,
- we threw 2 times 1, 4 times 2, etc.
-
- Now, do one experiment throwing the dice 10 time, and 10 times again,
- and another throwing the dice 20 times, and 20 times again:
-
- >>> rng.multinomial([[10], [20]], [1/6.]*6, size=2)
- array([[[2, 4, 0, 1, 2, 1],
- [1, 3, 0, 3, 1, 2]],
- [[1, 4, 4, 4, 4, 3],
- [3, 3, 2, 5, 5, 2]]]) # random
-
- The first array shows the outcomes of throwing the dice 10 times, and
- the second shows the outcomes from throwing the dice 20 times.
-
- A loaded die is more likely to land on number 6:
-
- >>> rng.multinomial(100, [1/7.]*5 + [2/7.])
- array([11, 16, 14, 17, 16, 26]) # random
-
- The probability inputs should be normalized. As an implementation
- detail, the value of the last entry is ignored and assumed to take
- up any leftover probability mass, but this should not be relied on.
- A biased coin which has twice as much weight on one side as on the
- other should be sampled like so:
-
- >>> rng.multinomial(100, [1.0 / 3, 2.0 / 3]) # RIGHT
- array([38, 62]) # random
-
- not like:
-
- >>> rng.multinomial(100, [1.0, 2.0]) # WRONG
- Traceback (most recent call last):
- ValueError: pvals < 0, pvals > 1 or pvals contains NaNs
-
- """
-
- cdef np.npy_intp d, i, sz, offset
- cdef np.ndarray parr, mnarr, on, temp_arr
- cdef double *pix
- cdef int64_t *mnix
- cdef int64_t ni
- cdef np.broadcast it
-
- d = len(pvals)
- on = <np.ndarray>np.PyArray_FROM_OTF(n, np.NPY_INT64, np.NPY_ALIGNED)
- parr = <np.ndarray>np.PyArray_FROM_OTF(
- pvals, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
- pix = <double*>np.PyArray_DATA(parr)
- check_array_constraint(parr, 'pvals', CONS_BOUNDED_0_1)
- if kahan_sum(pix, d-1) > (1.0 + 1e-12):
- raise ValueError("sum(pvals[:-1]) > 1.0")
-
- if np.PyArray_NDIM(on) != 0: # vector
- check_array_constraint(on, 'n', CONS_NON_NEGATIVE)
- if size is None:
- it = np.PyArray_MultiIterNew1(on)
- else:
- temp = np.empty(size, dtype=np.int8)
- temp_arr = <np.ndarray>temp
- it = np.PyArray_MultiIterNew2(on, temp_arr)
- shape = it.shape + (d,)
- multin = np.zeros(shape, dtype=np.int64)
- mnarr = <np.ndarray>multin
- mnix = <int64_t*>np.PyArray_DATA(mnarr)
- offset = 0
- sz = it.size
- with self.lock, nogil:
- for i in range(sz):
- ni = (<int64_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
- random_multinomial(&self._bitgen, ni, &mnix[offset], pix, d, &self._binomial)
- offset += d
- np.PyArray_MultiIter_NEXT(it)
- return multin
-
- if size is None:
- shape = (d,)
- else:
- try:
- shape = (operator.index(size), d)
- except:
- shape = tuple(size) + (d,)
-
- multin = np.zeros(shape, dtype=np.int64)
- mnarr = <np.ndarray>multin
- mnix = <int64_t*>np.PyArray_DATA(mnarr)
- sz = np.PyArray_SIZE(mnarr)
- ni = n
- check_constraint(ni, 'n', CONS_NON_NEGATIVE)
- offset = 0
- with self.lock, nogil:
- for i in range(sz // d):
- random_multinomial(&self._bitgen, ni, &mnix[offset], pix, d, &self._binomial)
- offset += d
-
- return multin
-
- def dirichlet(self, object alpha, size=None):
- """
- dirichlet(alpha, size=None)
-
- Draw samples from the Dirichlet distribution.
-
- Draw `size` samples of dimension k from a Dirichlet distribution. A
- Dirichlet-distributed random variable can be seen as a multivariate
- generalization of a Beta distribution. The Dirichlet distribution
- is a conjugate prior of a multinomial distribution in Bayesian
- inference.
-
- Parameters
- ----------
- alpha : array
- Parameter of the distribution (k dimension for sample of
- dimension k).
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
-
- Returns
- -------
- samples : ndarray,
- The drawn samples, of shape (size, alpha.ndim).
-
- Raises
- -------
- ValueError
- If any value in alpha is less than or equal to zero
-
- Notes
- -----
- The Dirichlet distribution is a distribution over vectors
- :math:`x` that fulfil the conditions :math:`x_i>0` and
- :math:`\\sum_{i=1}^k x_i = 1`.
-
- The probability density function :math:`p` of a
- Dirichlet-distributed random vector :math:`X` is
- proportional to
-
- .. math:: p(x) \\propto \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i},
-
- where :math:`\\alpha` is a vector containing the positive
- concentration parameters.
-
- The method uses the following property for computation: let :math:`Y`
- be a random vector which has components that follow a standard gamma
- distribution, then :math:`X = \\frac{1}{\\sum_{i=1}^k{Y_i}} Y`
- is Dirichlet-distributed
-
- References
- ----------
- .. [1] David McKay, "Information Theory, Inference and Learning
- Algorithms," chapter 23,
- http://www.inference.org.uk/mackay/itila/
- .. [2] Wikipedia, "Dirichlet distribution",
- https://en.wikipedia.org/wiki/Dirichlet_distribution
-
- Examples
- --------
- Taking an example cited in Wikipedia, this distribution can be used if
- one wanted to cut strings (each of initial length 1.0) into K pieces
- with different lengths, where each piece had, on average, a designated
- average length, but allowing some variation in the relative sizes of
- the pieces.
-
- >>> s = np.random.default_rng().dirichlet((10, 5, 3), 20).transpose()
-
- >>> import matplotlib.pyplot as plt
- >>> plt.barh(range(20), s[0])
- >>> plt.barh(range(20), s[1], left=s[0], color='g')
- >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')
- >>> plt.title("Lengths of Strings")
-
- """
-
- # =================
- # Pure python algo
- # =================
- # alpha = N.atleast_1d(alpha)
- # k = alpha.size
-
- # if n == 1:
- # val = N.zeros(k)
- # for i in range(k):
- # val[i] = sgamma(alpha[i], n)
- # val /= N.sum(val)
- # else:
- # val = N.zeros((k, n))
- # for i in range(k):
- # val[i] = sgamma(alpha[i], n)
- # val /= N.sum(val, axis = 0)
- # val = val.T
- # return val
-
- cdef np.npy_intp k, totsize, i, j
- cdef np.ndarray alpha_arr, val_arr
- cdef double *alpha_data
- cdef double *val_data
- cdef double acc, invacc
-
- k = len(alpha)
- alpha_arr = <np.ndarray>np.PyArray_FROM_OTF(
- alpha, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
- if np.any(np.less_equal(alpha_arr, 0)):
- raise ValueError('alpha <= 0')
- alpha_data = <double*>np.PyArray_DATA(alpha_arr)
-
- if size is None:
- shape = (k,)
- else:
- try:
- shape = (operator.index(size), k)
- except:
- shape = tuple(size) + (k,)
-
- diric = np.zeros(shape, np.float64)
- val_arr = <np.ndarray>diric
- val_data= <double*>np.PyArray_DATA(val_arr)
-
- i = 0
- totsize = np.PyArray_SIZE(val_arr)
- with self.lock, nogil:
- while i < totsize:
- acc = 0.0
- for j in range(k):
- val_data[i+j] = random_standard_gamma_zig(&self._bitgen,
- alpha_data[j])
- acc = acc + val_data[i + j]
- invacc = 1/acc
- for j in range(k):
- val_data[i + j] = val_data[i + j] * invacc
- i = i + k
-
- return diric
-
- # Shuffling and permutations:
- def shuffle(self, object x):
- """
- shuffle(x)
-
- Modify a sequence in-place by shuffling its contents.
-
- This function only shuffles the array along the first axis of a
- multi-dimensional array. The order of sub-arrays is changed but
- their contents remains the same.
-
- Parameters
- ----------
- x : array_like
- The array or list to be shuffled.
-
- Returns
- -------
- None
-
- Examples
- --------
- >>> rng = np.random.default_rng()
- >>> arr = np.arange(10)
- >>> rng.shuffle(arr)
- >>> arr
- [1 7 5 2 9 4 3 6 0 8] # random
-
- Multi-dimensional arrays are only shuffled along the first axis:
-
- >>> arr = np.arange(9).reshape((3, 3))
- >>> rng.shuffle(arr)
- >>> arr
- array([[3, 4, 5], # random
- [6, 7, 8],
- [0, 1, 2]])
-
- """
- cdef:
- np.npy_intp i, j, n = len(x), stride, itemsize
- char* x_ptr
- char* buf_ptr
-
- if type(x) is np.ndarray and x.ndim == 1 and x.size:
- # Fast, statically typed path: shuffle the underlying buffer.
- # Only for non-empty, 1d objects of class ndarray (subclasses such
- # as MaskedArrays may not support this approach).
- x_ptr = <char*><size_t>x.ctypes.data
- stride = x.strides[0]
- itemsize = x.dtype.itemsize
- # As the array x could contain python objects we use a buffer
- # of bytes for the swaps to avoid leaving one of the objects
- # within the buffer and erroneously decrementing it's refcount
- # when the function exits.
- buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
- buf_ptr = <char*><size_t>buf.ctypes.data
- with self.lock:
- # We trick gcc into providing a specialized implementation for
- # the most common case, yielding a ~33% performance improvement.
- # Note that apparently, only one branch can ever be specialized.
- if itemsize == sizeof(np.npy_intp):
- self._shuffle_raw(n, 1, sizeof(np.npy_intp), stride, x_ptr, buf_ptr)
- else:
- self._shuffle_raw(n, 1, itemsize, stride, x_ptr, buf_ptr)
- elif isinstance(x, np.ndarray) and x.ndim and x.size:
- buf = np.empty_like(x[0, ...])
- with self.lock:
- for i in reversed(range(1, n)):
- j = random_interval(&self._bitgen, i)
- if i == j:
- # i == j is not needed and memcpy is undefined.
- continue
- buf[...] = x[j]
- x[j] = x[i]
- x[i] = buf
- else:
- # Untyped path.
- with self.lock:
- for i in reversed(range(1, n)):
- j = random_interval(&self._bitgen, i)
- x[i], x[j] = x[j], x[i]
-
- cdef inline _shuffle_raw(self, np.npy_intp n, np.npy_intp first,
- np.npy_intp itemsize, np.npy_intp stride,
- char* data, char* buf):
- """
- Parameters
- ----------
- n
- Number of elements in data
- first
- First observation to shuffle. Shuffles n-1,
- n-2, ..., first, so that when first=1 the entire
- array is shuffled
- itemsize
- Size in bytes of item
- stride
- Array stride
- data
- Location of data
- buf
- Location of buffer (itemsize)
- """
- cdef np.npy_intp i, j
- for i in reversed(range(first, n)):
- j = random_interval(&self._bitgen, i)
- string.memcpy(buf, data + j * stride, itemsize)
- string.memcpy(data + j * stride, data + i * stride, itemsize)
- string.memcpy(data + i * stride, buf, itemsize)
-
- cdef inline void _shuffle_int(self, np.npy_intp n, np.npy_intp first,
- int64_t* data) nogil:
- """
- Parameters
- ----------
- n
- Number of elements in data
- first
- First observation to shuffle. Shuffles n-1,
- n-2, ..., first, so that when first=1 the entire
- array is shuffled
- data
- Location of data
- """
- cdef np.npy_intp i, j
- cdef int64_t temp
- for i in reversed(range(first, n)):
- j = random_bounded_uint64(&self._bitgen, 0, i, 0, 0)
- temp = data[j]
- data[j] = data[i]
- data[i] = temp
-
- def permutation(self, object x):
- """
- permutation(x)
-
- Randomly permute a sequence, or return a permuted range.
- If `x` is a multi-dimensional array, it is only shuffled along its
- first index.
-
- Parameters
- ----------
- x : int or array_like
- If `x` is an integer, randomly permute ``np.arange(x)``.
- If `x` is an array, make a copy and shuffle the elements
- randomly.
-
- Returns
- -------
- out : ndarray
- Permuted sequence or array range.
-
- Examples
- --------
- >>> rng = np.random.default_rng()
- >>> rng.permutation(10)
- array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6]) # random
-
- >>> rng.permutation([1, 4, 9, 12, 15])
- array([15, 1, 9, 4, 12]) # random
-
- >>> arr = np.arange(9).reshape((3, 3))
- >>> rng.permutation(arr)
- array([[6, 7, 8], # random
- [0, 1, 2],
- [3, 4, 5]])
-
- >>> rng.permutation("abc")
- Traceback (most recent call last):
- ...
- numpy.AxisError: x must be an integer or at least 1-dimensional
- """
-
- if isinstance(x, (int, np.integer)):
- arr = np.arange(x)
- self.shuffle(arr)
- return arr
-
- arr = np.asarray(x)
- if arr.ndim < 1:
- raise np.AxisError("x must be an integer or at least 1-dimensional")
-
- # shuffle has fast-path for 1-d
- if arr.ndim == 1:
- # Return a copy if same memory
- if np.may_share_memory(arr, x):
- arr = np.array(arr)
- self.shuffle(arr)
- return arr
-
- # Shuffle index array, dtype to ensure fast path
- idx = np.arange(arr.shape[0], dtype=np.intp)
- self.shuffle(idx)
- return arr[idx]
-
-
-def default_rng(seed=None):
- """Construct a new Generator with the default BitGenerator (PCG64).
-
- Parameters
- ----------
- seed : {None, int, array_like[ints], ISeedSequence, BitGenerator, Generator}, optional
- A seed to initialize the `BitGenerator`. If None, then fresh,
- unpredictable entropy will be pulled from the OS. If an ``int`` or
- ``array_like[ints]`` is passed, then it will be passed to
- `SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in an implementor of the `ISeedSequence` interface like
- `SeedSequence`.
- Additionally, when passed a `BitGenerator`, it will be wrapped by
- `Generator`. If passed a `Generator`, it will be returned unaltered.
-
- Notes
- -----
- When `seed` is omitted or ``None``, a new `BitGenerator` and `Generator` will
- be instantiated each time. This function does not manage a default global
- instance.
- """
- if _check_bit_generator(seed):
- # We were passed a BitGenerator, so just wrap it up.
- return Generator(seed)
- elif isinstance(seed, Generator):
- # Pass through a Generator.
- return seed
- # Otherwise we need to instantiate a new BitGenerator and Generator as
- # normal.
- return Generator(PCG64(seed))
--- /dev/null
+#ifndef _RANDOMDGEN__ALIGNED_MALLOC_H_
+#define _RANDOMDGEN__ALIGNED_MALLOC_H_
+
+#include "Python.h"
+#include "numpy/npy_common.h"
+
+#define NPY_MEMALIGN 16 /* 16 for SSE2, 32 for AVX, 64 for Xeon Phi */
+
+static NPY_INLINE void *PyArray_realloc_aligned(void *p, size_t n)
+{
+ void *p1, **p2, *base;
+ size_t old_offs, offs = NPY_MEMALIGN - 1 + sizeof(void *);
+ if (NPY_UNLIKELY(p != NULL))
+ {
+ base = *(((void **)p) - 1);
+ if (NPY_UNLIKELY((p1 = PyMem_Realloc(base, n + offs)) == NULL))
+ return NULL;
+ if (NPY_LIKELY(p1 == base))
+ return p;
+ p2 = (void **)(((Py_uintptr_t)(p1) + offs) & ~(NPY_MEMALIGN - 1));
+ old_offs = (size_t)((Py_uintptr_t)p - (Py_uintptr_t)base);
+ memmove((void *)p2, ((char *)p1) + old_offs, n);
+ }
+ else
+ {
+ if (NPY_UNLIKELY((p1 = PyMem_Malloc(n + offs)) == NULL))
+ return NULL;
+ p2 = (void **)(((Py_uintptr_t)(p1) + offs) & ~(NPY_MEMALIGN - 1));
+ }
+ *(p2 - 1) = p1;
+ return (void *)p2;
+}
+
+static NPY_INLINE void *PyArray_malloc_aligned(size_t n)
+{
+ return PyArray_realloc_aligned(NULL, n);
+}
+
+static NPY_INLINE void *PyArray_calloc_aligned(size_t n, size_t s)
+{
+ void *p;
+ if (NPY_UNLIKELY((p = PyArray_realloc_aligned(NULL, n * s)) == NULL))
+ return NULL;
+ memset(p, 0, n * s);
+ return p;
+}
+
+static NPY_INLINE void PyArray_free_aligned(void *p)
+{
+ void *base = *(((void **)p) - 1);
+ PyMem_Free(base);
+}
+
+#endif
--- /dev/null
+#ifndef _RANDOMDGEN__DISTRIBUTIONS_LEGACY_H_
+#define _RANDOMDGEN__DISTRIBUTIONS_LEGACY_H_
+
+
+#include "numpy/random/distributions.h"
+
+typedef struct aug_bitgen {
+ bitgen_t *bit_generator;
+ int has_gauss;
+ double gauss;
+} aug_bitgen_t;
+
+extern double legacy_gauss(aug_bitgen_t *aug_state);
+extern double legacy_standard_exponential(aug_bitgen_t *aug_state);
+extern double legacy_pareto(aug_bitgen_t *aug_state, double a);
+extern double legacy_weibull(aug_bitgen_t *aug_state, double a);
+extern double legacy_power(aug_bitgen_t *aug_state, double a);
+extern double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale);
+extern double legacy_chisquare(aug_bitgen_t *aug_state, double df);
+extern double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df,
+ double nonc);
+extern double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum,
+ double dfden, double nonc);
+extern double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale);
+extern double legacy_lognormal(aug_bitgen_t *aug_state, double mean,
+ double sigma);
+extern double legacy_standard_t(aug_bitgen_t *aug_state, double df);
+extern double legacy_standard_cauchy(aug_bitgen_t *state);
+extern double legacy_beta(aug_bitgen_t *aug_state, double a, double b);
+extern double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden);
+extern double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale);
+extern double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape);
+extern double legacy_exponential(aug_bitgen_t *aug_state, double scale);
+extern int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial);
+extern int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n,
+ double p);
+extern int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state,
+ int64_t good, int64_t bad,
+ int64_t sample);
+extern int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p);
+extern int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam);
+extern int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a);
+extern int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p);
+void legacy_random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n,
+ RAND_INT_TYPE *mnix, double *pix, npy_intp d,
+ binomial_t *binomial);
+
+#endif
+++ /dev/null
-from __future__ import division, absolute_import, print_function
-
-from .. import __doc__
-
-depends = ['core']
+++ /dev/null
-#cython: language_level=3
-
-from libc.stdint cimport int64_t
-
-import numpy as np
-cimport numpy as np
-
-from .distributions cimport bitgen_t, binomial_t
-
-cdef extern from "legacy-distributions.h":
-
- struct aug_bitgen:
- bitgen_t *bit_generator
- int has_gauss
- double gauss
-
- ctypedef aug_bitgen aug_bitgen_t
-
- double legacy_gauss(aug_bitgen_t *aug_state) nogil
- double legacy_pareto(aug_bitgen_t *aug_state, double a) nogil
- double legacy_weibull(aug_bitgen_t *aug_state, double a) nogil
- double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape) nogil
- double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale) nogil
- double legacy_standard_t(aug_bitgen_t *aug_state, double df) nogil
-
- double legacy_standard_exponential(aug_bitgen_t *aug_state) nogil
- double legacy_power(aug_bitgen_t *aug_state, double a) nogil
- double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale) nogil
- double legacy_power(aug_bitgen_t *aug_state, double a) nogil
- double legacy_chisquare(aug_bitgen_t *aug_state, double df) nogil
- double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df,
- double nonc) nogil
- double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum, double dfden,
- double nonc) nogil
- double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale) nogil
- double legacy_lognormal(aug_bitgen_t *aug_state, double mean, double sigma) nogil
- int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p,
- int64_t n, binomial_t *binomial) nogil
- int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, double p) nogil
- int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample) nogil
- int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p) nogil
- int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) nogil
- int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) nogil
- int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) nogil
- void legacy_random_multinomial(bitgen_t *bitgen_state, long n, long *mnix, double *pix, np.npy_intp d, binomial_t *binomial) nogil
- double legacy_standard_cauchy(aug_bitgen_t *state) nogil
- double legacy_beta(aug_bitgen_t *aug_state, double a, double b) nogil
- double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden) nogil
- double legacy_exponential(aug_bitgen_t *aug_state, double scale) nogil
- double legacy_power(aug_bitgen_t *state, double a) nogil
+++ /dev/null
-import operator
-
-import numpy as np
-cimport numpy as np
-
-from .common cimport *
-from .bit_generator cimport BitGenerator, SeedSequence
-
-__all__ = ['MT19937']
-
-np.import_array()
-
-cdef extern from "src/mt19937/mt19937.h":
-
- struct s_mt19937_state:
- uint32_t key[624]
- int pos
-
- ctypedef s_mt19937_state mt19937_state
-
- uint64_t mt19937_next64(mt19937_state *state) nogil
- uint32_t mt19937_next32(mt19937_state *state) nogil
- double mt19937_next_double(mt19937_state *state) nogil
- void mt19937_init_by_array(mt19937_state *state, uint32_t *init_key, int key_length)
- void mt19937_seed(mt19937_state *state, uint32_t seed)
- void mt19937_jump(mt19937_state *state)
-
- enum:
- RK_STATE_LEN
-
-cdef uint64_t mt19937_uint64(void *st) nogil:
- return mt19937_next64(<mt19937_state *> st)
-
-cdef uint32_t mt19937_uint32(void *st) nogil:
- return mt19937_next32(<mt19937_state *> st)
-
-cdef double mt19937_double(void *st) nogil:
- return mt19937_next_double(<mt19937_state *> st)
-
-cdef uint64_t mt19937_raw(void *st) nogil:
- return <uint64_t>mt19937_next32(<mt19937_state *> st)
-
-cdef class MT19937(BitGenerator):
- """
- MT19937(seed=None)
-
- Container for the Mersenne Twister pseudo-random number generator.
-
- Parameters
- ----------
- seed : {None, int, array_like[ints], ISeedSequence}, optional
- A seed to initialize the `BitGenerator`. If None, then fresh,
- unpredictable entropy will be pulled from the OS. If an ``int`` or
- ``array_like[ints]`` is passed, then it will be passed to
- `SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in an implementor of the `ISeedSequence` interface like
- `SeedSequence`.
-
- Attributes
- ----------
- lock: threading.Lock
- Lock instance that is shared so that the same bit git generator can
- be used in multiple Generators without corrupting the state. Code that
- generates values from a bit generator should hold the bit generator's
- lock.
-
- Notes
- -----
- ``MT19937`` provides a capsule containing function pointers that produce
- doubles, and unsigned 32 and 64- bit integers [1]_. These are not
- directly consumable in Python and must be consumed by a ``Generator``
- or similar object that supports low-level access.
-
- The Python stdlib module "random" also contains a Mersenne Twister
- pseudo-random number generator.
-
- **State and Seeding**
-
- The ``MT19937`` state vector consists of a 624-element array of
- 32-bit unsigned integers plus a single integer value between 0 and 624
- that indexes the current position within the main array.
-
- The input seed is processed by `SeedSequence` to fill the whole state. The
- first element is reset such that only its most significant bit is set.
-
- **Parallel Features**
-
- The preferred way to use a BitGenerator in parallel applications is to use
- the `SeedSequence.spawn` method to obtain entropy values, and to use these
- to generate new BitGenerators:
-
- >>> from numpy.random import Generator, MT19937, SeedSequence
- >>> sg = SeedSequence(1234)
- >>> rg = [Generator(MT19937(s)) for s in sg.spawn(10)]
-
- Another method is to use `MT19937.jumped` which advances the state as-if
- :math:`2^{128}` random numbers have been generated ([1]_, [2]_). This
- allows the original sequence to be split so that distinct segments can be
- used in each worker process. All generators should be chained to ensure
- that the segments come from the same sequence.
-
- >>> from numpy.random import Generator, MT19937, SeedSequence
- >>> sg = SeedSequence(1234)
- >>> bit_generator = MT19937(sg)
- >>> rg = []
- >>> for _ in range(10):
- ... rg.append(Generator(bit_generator))
- ... # Chain the BitGenerators
- ... bit_generator = bit_generator.jumped()
-
- **Compatibility Guarantee**
-
- ``MT19937`` makes a guarantee that a fixed seed and will always produce
- the same random integer stream.
-
- References
- ----------
- .. [1] Hiroshi Haramoto, Makoto Matsumoto, and Pierre L\'Ecuyer, "A Fast
- Jump Ahead Algorithm for Linear Recurrences in a Polynomial Space",
- Sequences and Their Applications - SETA, 290--298, 2008.
- .. [2] Hiroshi Haramoto, Makoto Matsumoto, Takuji Nishimura, François
- Panneton, Pierre L\'Ecuyer, "Efficient Jump Ahead for F2-Linear
- Random Number Generators", INFORMS JOURNAL ON COMPUTING, Vol. 20,
- No. 3, Summer 2008, pp. 385-390.
-
- """
- cdef mt19937_state rng_state
-
- def __init__(self, seed=None):
- BitGenerator.__init__(self, seed)
- val = self._seed_seq.generate_state(RK_STATE_LEN, np.uint32)
- # MSB is 1; assuring non-zero initial array
- self.rng_state.key[0] = 0x80000000UL
- for i in range(1, RK_STATE_LEN):
- self.rng_state.key[i] = val[i]
- self.rng_state.pos = i
-
- self._bitgen.state = &self.rng_state
- self._bitgen.next_uint64 = &mt19937_uint64
- self._bitgen.next_uint32 = &mt19937_uint32
- self._bitgen.next_double = &mt19937_double
- self._bitgen.next_raw = &mt19937_raw
-
- def _legacy_seeding(self, seed):
- """
- _legacy_seeding(seed)
-
- Seed the generator in a backward compatible way. For modern
- applications, creating a new instance is preferable. Calling this
- overrides self._seed_seq
-
- Parameters
- ----------
- seed : {None, int, array_like}
- Random seed initializing the pseudo-random number generator.
- Can be an integer in [0, 2**32-1], array of integers in
- [0, 2**32-1], a `SeedSequence, or ``None``. If `seed`
- is ``None``, then fresh, unpredictable entropy will be pulled from
- the OS.
-
- Raises
- ------
- ValueError
- If seed values are out of range for the PRNG.
- """
- cdef np.ndarray obj
- with self.lock:
- try:
- if seed is None:
- seed = SeedSequence()
- val = seed.generate_state(RK_STATE_LEN)
- # MSB is 1; assuring non-zero initial array
- self.rng_state.key[0] = 0x80000000UL
- for i in range(1, RK_STATE_LEN):
- self.rng_state.key[i] = val[i]
- else:
- if hasattr(seed, 'squeeze'):
- seed = seed.squeeze()
- idx = operator.index(seed)
- if idx > int(2**32 - 1) or idx < 0:
- raise ValueError("Seed must be between 0 and 2**32 - 1")
- mt19937_seed(&self.rng_state, seed)
- except TypeError:
- obj = np.asarray(seed)
- if obj.size == 0:
- raise ValueError("Seed must be non-empty")
- obj = obj.astype(np.int64, casting='safe')
- if obj.ndim != 1:
- raise ValueError("Seed array must be 1-d")
- if ((obj > int(2**32 - 1)) | (obj < 0)).any():
- raise ValueError("Seed must be between 0 and 2**32 - 1")
- obj = obj.astype(np.uint32, casting='unsafe', order='C')
- mt19937_init_by_array(&self.rng_state, <uint32_t*> obj.data, np.PyArray_DIM(obj, 0))
- self._seed_seq = None
-
- cdef jump_inplace(self, iter):
- """
- Jump state in-place
-
- Not part of public API
-
- Parameters
- ----------
- iter : integer, positive
- Number of times to jump the state of the rng.
- """
- cdef np.npy_intp i
- for i in range(iter):
- mt19937_jump(&self.rng_state)
-
-
- def jumped(self, np.npy_intp jumps=1):
- """
- jumped(jumps=1)
-
- Returns a new bit generator with the state jumped
-
- The state of the returned big generator is jumped as-if
- 2**(128 * jumps) random numbers have been generated.
-
- Parameters
- ----------
- jumps : integer, positive
- Number of times to jump the state of the bit generator returned
-
- Returns
- -------
- bit_generator : MT19937
- New instance of generator jumped iter times
- """
- cdef MT19937 bit_generator
-
- bit_generator = self.__class__()
- bit_generator.state = self.state
- bit_generator.jump_inplace(jumps)
-
- return bit_generator
-
- @property
- def state(self):
- """
- Get or set the PRNG state
-
- Returns
- -------
- state : dict
- Dictionary containing the information required to describe the
- state of the PRNG
- """
- key = np.zeros(624, dtype=np.uint32)
- for i in range(624):
- key[i] = self.rng_state.key[i]
-
- return {'bit_generator': self.__class__.__name__,
- 'state': {'key': key, 'pos': self.rng_state.pos}}
-
- @state.setter
- def state(self, value):
- if isinstance(value, tuple):
- if value[0] != 'MT19937' or len(value) not in (3, 5):
- raise ValueError('state is not a legacy MT19937 state')
- value ={'bit_generator': 'MT19937',
- 'state': {'key': value[1], 'pos': value[2]}}
-
- if not isinstance(value, dict):
- raise TypeError('state must be a dict')
- bitgen = value.get('bit_generator', '')
- if bitgen != self.__class__.__name__:
- raise ValueError('state must be for a {0} '
- 'PRNG'.format(self.__class__.__name__))
- key = value['state']['key']
- for i in range(624):
- self.rng_state.key[i] = key[i]
- self.rng_state.pos = value['state']['pos']
import numpy as np
-from .bounded_integers import _integers_types
-from .mt19937 import MT19937 as _MT19937
from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
from cpython cimport (Py_INCREF, PyFloat_AsDouble)
-from libc cimport string
-
cimport cython
cimport numpy as np
-from .bounded_integers cimport *
-from .common cimport *
-from .distributions cimport *
-from .legacy_distributions cimport *
+from libc cimport string
+from libc.stdint cimport int64_t, uint64_t
+from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64,
+ _rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16,
+ _rand_uint8,)
+from ._bounded_integers import _integers_types
+from ._mt19937 import MT19937 as _MT19937
+from numpy.random cimport bitgen_t
+from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
+ CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1, CONS_GTE_1,
+ CONS_GT_1, LEGACY_CONS_POISSON,
+ double_fill, cont, kahan_sum, cont_broadcast_3,
+ check_array_constraint, check_constraint, disc, discrete_broadcast_iii,
+ )
+
+cdef extern from "numpy/random/distributions.h":
+ struct s_binomial_t:
+ int has_binomial
+ double psave
+ int64_t nsave
+ double r
+ double q
+ double fm
+ int64_t m
+ double p1
+ double xm
+ double xl
+ double xr
+ double c
+ double laml
+ double lamr
+ double p2
+ double p3
+ double p4
+
+ ctypedef s_binomial_t binomial_t
+
+ void random_standard_uniform_fill(bitgen_t* bitgen_state, np.npy_intp cnt, double *out) nogil
+ int64_t random_positive_int(bitgen_t *bitgen_state) nogil
+ double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil
+ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil
+ double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil
+ double random_triangular(bitgen_t *bitgen_state, double left, double mode,
+ double right) nogil
+ uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil
+
+cdef extern from "include/legacy-distributions.h":
+ struct aug_bitgen:
+ bitgen_t *bit_generator
+ int has_gauss
+ double gauss
+
+ ctypedef aug_bitgen aug_bitgen_t
+
+ double legacy_gauss(aug_bitgen_t *aug_state) nogil
+ double legacy_pareto(aug_bitgen_t *aug_state, double a) nogil
+ double legacy_weibull(aug_bitgen_t *aug_state, double a) nogil
+ double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape) nogil
+ double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale) nogil
+ double legacy_standard_t(aug_bitgen_t *aug_state, double df) nogil
+
+ double legacy_standard_exponential(aug_bitgen_t *aug_state) nogil
+ double legacy_power(aug_bitgen_t *aug_state, double a) nogil
+ double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale) nogil
+ double legacy_power(aug_bitgen_t *aug_state, double a) nogil
+ double legacy_chisquare(aug_bitgen_t *aug_state, double df) nogil
+ double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df,
+ double nonc) nogil
+ double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum, double dfden,
+ double nonc) nogil
+ double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale) nogil
+ double legacy_lognormal(aug_bitgen_t *aug_state, double mean, double sigma) nogil
+ int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial) nogil
+ int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, double p) nogil
+ int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample) nogil
+ int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p) nogil
+ int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) nogil
+ int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) nogil
+ int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) nogil
+ void legacy_random_multinomial(bitgen_t *bitgen_state, long n, long *mnix, double *pix, np.npy_intp d, binomial_t *binomial) nogil
+ double legacy_standard_cauchy(aug_bitgen_t *state) nogil
+ double legacy_beta(aug_bitgen_t *aug_state, double a, double b) nogil
+ double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden) nogil
+ double legacy_exponential(aug_bitgen_t *aug_state, double scale) nogil
+ double legacy_power(aug_bitgen_t *state, double a) nogil
np.import_array()
--------
Generator
MT19937
- :ref:`bit_generator`
+ numpy.random.BitGenerator
"""
cdef public object _bit_generator
(b - a) * random_sample() + a
+ .. note::
+ New code should use the ``random`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
size : int or tuple of ints, optional
Array of random floats of shape `size` (unless ``size=None``, in which
case a single float is returned).
+ See Also
+ --------
+ Generator.random: which should be used for new code.
+
Examples
--------
>>> np.random.random_sample()
"""
cdef double temp
- return double_fill(&random_double_fill, &self._bitgen, size, self.lock, None)
+ return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, None)
def random(self, size=None):
"""
It is often seen in Bayesian inference and order statistics.
+ .. note::
+ New code should use the ``beta`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
out : ndarray or scalar
Drawn samples from the parameterized beta distribution.
+ See Also
+ --------
+ Generator.beta: which should be used for new code.
"""
return cont(&legacy_beta, &self._aug_state, size, self.lock, 2,
a, 'a', CONS_POSITIVE,
the size of raindrops measured over many rainstorms [1]_, or the time
between page requests to Wikipedia [2]_.
+ .. note::
+ New code should use the ``exponential`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
scale : float or array_like of floats
out : ndarray or scalar
Drawn samples from the parameterized exponential distribution.
+ See Also
+ --------
+ Generator.exponential: which should be used for new code.
+
References
----------
.. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
`standard_exponential` is identical to the exponential distribution
with a scale parameter of 1.
+ .. note::
+ New code should use the ``standard_exponential`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
size : int or tuple of ints, optional
out : float or ndarray
Drawn samples.
+ See Also
+ --------
+ Generator.standard_exponential: which should be used for new code.
+
Examples
--------
Output a 3x8000 array:
tomaxint(size=None)
Return a sample of uniformly distributed random integers in the interval
- [0, ``np.iinfo(np.int).max``]. The np.int type translates to the C long
+ [0, ``np.iinfo(np.int_).max``]. The `np.int_` type translates to the C long
integer type and its precision is platform dependent.
Parameters
[ 739731006, 1947757578]],
[[1871712945, 752307660],
[1601631370, 1479324245]]])
- >>> rs.tomaxint((2,2,2)) < np.iinfo(np.int).max
+ >>> rs.tomaxint((2,2,2)) < np.iinfo(np.int_).max
array([[[ True, True],
[ True, True]],
[[ True, True],
the specified dtype in the "half-open" interval [`low`, `high`). If
`high` is None (the default), then results are from [0, `low`).
+ .. note::
+ New code should use the ``integers`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
low : int or array-like of ints
Desired dtype of the result. All dtypes are determined by their
name, i.e., 'int64', 'int', etc, so byteorder is not available
and a specific precision may have different C types depending
- on the platform. The default value is 'np.int'.
+ on the platform. The default value is `np.int_`.
.. versionadded:: 1.11.0
See Also
--------
- random.random_integers : similar to `randint`, only for the closed
+ random_integers : similar to `randint`, only for the closed
interval [`low`, `high`], and 1 is the lowest value if `high` is
omitted.
+ Generator.integers: which should be used for new code.
Examples
--------
elif key == 'bool':
ret = _rand_bool(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- if size is None and dtype in (np.bool, np.int, np.long):
+ if size is None and dtype in (bool, int, np.compat.long):
if np.array(ret).shape == ():
return dtype(ret)
return ret
Return random bytes.
+ .. note::
+ New code should use the ``bytes`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
length : int
out : str
String of length `length`.
+ See Also
+ --------
+ Generator.bytes: which should be used for new code.
+
Examples
--------
>>> np.random.bytes(10)
' eh\\x85\\x022SZ\\xbf\\xa4' #random
-
"""
cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
# Interpret the uint32s as little-endian to convert them to bytes
.. versionadded:: 1.7.0
+ .. note::
+ New code should use the ``choice`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : 1-D array-like or int
See Also
--------
randint, shuffle, permutation
+ Generator.choice: which should be used in new code
Examples
--------
any value within the given interval is equally likely to be drawn
by `uniform`.
+ .. note::
+ New code should use the ``uniform`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
low : float or array_like of floats, optional
rand : Convenience function that accepts dimensions as input, e.g.,
``rand(2,2)`` would generate a 2-by-2 array of floats,
uniformly distributed over ``[0, 1)``.
+ Generator.uniform: which should be used for new code.
Notes
-----
.. note::
This is a convenience function for users porting code from Matlab,
- and wraps `numpy.random.random_sample`. That function takes a
+ and wraps `random_sample`. That function takes a
tuple to specify the size of the output, which is consistent with
other NumPy functions like `numpy.zeros` and `numpy.ones`.
.. note::
This is a convenience function for users porting code from Matlab,
- and wraps `numpy.random.standard_normal`. That function takes a
+ and wraps `standard_normal`. That function takes a
tuple to specify the size of the output, which is consistent with
other NumPy functions like `numpy.zeros` and `numpy.ones`.
+ .. note::
+ New code should use the ``standard_normal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
If positive int_like arguments are provided, `randn` generates an array
of shape ``(d0, d1, ..., dn)``, filled
with random floats sampled from a univariate "normal" (Gaussian)
--------
standard_normal : Similar, but takes a tuple as its argument.
normal : Also accepts mu and sigma arguments.
+ Generator.standard_normal: which should be used for new code.
Notes
-----
"""
random_integers(low, high=None, size=None)
- Random integers of type np.int between `low` and `high`, inclusive.
+ Random integers of type `np.int_` between `low` and `high`, inclusive.
- Return random integers of type np.int from the "discrete uniform"
+ Return random integers of type `np.int_` from the "discrete uniform"
distribution in the closed interval [`low`, `high`]. If `high` is
- None (the default), then results are from [1, `low`]. The np.int
+ None (the default), then results are from [1, `low`]. The `np.int_`
type translates to the C long integer type and its precision
is platform dependent.
Draw samples from a standard Normal distribution (mean=0, stdev=1).
+ .. note::
+ New code should use the ``standard_normal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
size : int or tuple of ints, optional
A floating-point array of shape ``size`` of drawn samples, or a
single sample if ``size`` was not specified.
+ See Also
+ --------
+ normal :
+ Equivalent function with additional ``loc`` and ``scale`` arguments
+ for setting the mean and standard deviation.
+ Generator.standard_normal: which should be used for new code.
+
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use one of::
mu + sigma * np.random.standard_normal(size=...)
np.random.normal(mu, sigma, size=...)
- See Also
- --------
- normal :
- Equivalent function with additional ``loc`` and ``scale`` arguments
- for setting the mean and standard deviation.
-
Examples
--------
>>> np.random.standard_normal()
by a large number of tiny, random disturbances, each with its own
unique distribution [2]_.
+ .. note::
+ New code should use the ``normal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
loc : float or array_like of floats
--------
scipy.stats.norm : probability density function, distribution or
cumulative density function, etc.
+ Generator.normal: which should be used for new code.
Notes
-----
The function has its peak at the mean, and its "spread" increases with
the standard deviation (the function reaches 0.607 times its maximum at
:math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that
- `numpy.random.normal` is more likely to return samples lying close to
- the mean, rather than those far away.
+ normal is more likely to return samples lying close to the mean, rather
+ than those far away.
References
----------
Samples are drawn from a Gamma distribution with specified parameters,
shape (sometimes designated "k") and scale=1.
+ .. note::
+ New code should use the ``standard_gamma`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
shape : float or array_like of floats
--------
scipy.stats.gamma : probability density function, distribution or
cumulative density function, etc.
+ Generator.standard_gamma: which should be used for new code.
Notes
-----
`shape` (sometimes designated "k") and `scale` (sometimes designated
"theta"), where both parameters are > 0.
+ .. note::
+ New code should use the ``gamma`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
shape : float or array_like of floats
--------
scipy.stats.gamma : probability density function, distribution or
cumulative density function, etc.
+ Generator.gamma: which should be used for new code.
Notes
-----
that arises in ANOVA tests, and is the ratio of two chi-square
variates.
+ .. note::
+ New code should use the ``f`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
dfnum : float or array_like of floats
--------
scipy.stats.f : probability density function, distribution or
cumulative density function, etc.
+ Generator.f: which should be used for new code.
Notes
-----
freedom in denominator), where both parameters > 1.
`nonc` is the non-centrality parameter.
+ .. note::
+ New code should use the ``noncentral_f`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
dfnum : float or array_like of floats
out : ndarray or scalar
Drawn samples from the parameterized noncentral Fisher distribution.
+ See Also
+ --------
+ Generator.noncentral_f: which should be used for new code.
+
Notes
-----
When calculating the power of an experiment (power = probability of
resulting distribution is chi-square (see Notes). This distribution
is often used in hypothesis testing.
+ .. note::
+ New code should use the ``chisquare`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
df : float or array_like of floats
When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)
is given.
+ See Also
+ --------
+ Generator.chisquare: which should be used for new code.
+
Notes
-----
The variable obtained by summing the squares of `df` independent,
--------
>>> np.random.chisquare(2,4)
array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
-
"""
return cont(&legacy_chisquare, &self._aug_state, size, self.lock, 1,
df, 'df', CONS_POSITIVE,
The noncentral :math:`\\chi^2` distribution is a generalization of
the :math:`\\chi^2` distribution.
+ .. note::
+ New code should use the ``noncentral_chisquare`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
df : float or array_like of floats
out : ndarray or scalar
Drawn samples from the parameterized noncentral chi-square distribution.
+ See Also
+ --------
+ Generator.noncentral_chisquare: which should be used for new code.
+
Notes
-----
The probability density function for the noncentral Chi-square
Also known as the Lorentz distribution.
+ .. note::
+ New code should use the ``standard_cauchy`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
size : int or tuple of ints, optional
samples : ndarray or scalar
The drawn samples.
+ See Also
+ --------
+ Generator.standard_cauchy: which should be used for new code.
+
Notes
-----
The probability density function for the full Cauchy distribution is
large, the result resembles that of the standard normal
distribution (`standard_normal`).
+ .. note::
+ New code should use the ``standard_t`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
df : float or array_like of floats
out : ndarray or scalar
Drawn samples from the parameterized standard Student's t distribution.
+ See Also
+ --------
+ Generator.standard_t: which should be used for new code.
+
Notes
-----
The probability density function for the t distribution is
circle. It may be thought of as the circular analogue of the normal
distribution.
+ .. note::
+ New code should use the ``vonmises`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
mu : float or array_like of floats
--------
scipy.stats.vonmises : probability density function, distribution, or
cumulative density function, etc.
+ Generator.vonmises: which should be used for new code.
Notes
-----
20 percent of the range, while the other 20 percent fill the
remaining 80 percent of the range.
+ .. note::
+ New code should use the ``pareto`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
cumulative density function, etc.
scipy.stats.genpareto : probability density function, distribution or
cumulative density function, etc.
+ Generator.pareto: which should be used for new code.
Notes
-----
The more common 2-parameter Weibull, including a scale parameter
:math:`\\lambda` is just :math:`X = \\lambda(-ln(U))^{1/a}`.
+ .. note::
+ New code should use the ``weibull`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
scipy.stats.weibull_min
scipy.stats.genextreme
gumbel
+ Generator.weibull: which should be used for new code.
Notes
-----
Also known as the power function distribution.
+ .. note::
+ New code should use the ``power`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
ValueError
If a < 1.
+ See Also
+ --------
+ Generator.power: which should be used for new code.
+
Notes
-----
The probability density function is
difference between two independent, identically distributed exponential
random variables.
+ .. note::
+ New code should use the ``laplace`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
loc : float or array_like of floats, optional
out : ndarray or scalar
Drawn samples from the parameterized Laplace distribution.
+ See Also
+ --------
+ Generator.laplace: which should be used for new code.
+
Notes
-----
It has the probability density function
scale. For more information on the Gumbel distribution, see
Notes and References below.
+ .. note::
+ New code should use the ``gumbel`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
loc : float or array_like of floats, optional
scipy.stats.gumbel_r
scipy.stats.genextreme
weibull
+ Generator.gumbel: which should be used for new code.
Notes
-----
Samples are drawn from a logistic distribution with specified
parameters, loc (location or mean, also median), and scale (>0).
+ .. note::
+ New code should use the ``logistic`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
loc : float or array_like of floats, optional
--------
scipy.stats.logistic : probability density function, distribution or
cumulative density function, etc.
+ Generator.logistic: which should be used for new code.
Notes
-----
deviation are not the values for the distribution itself, but of the
underlying normal distribution it is derived from.
+ .. note::
+ New code should use the ``lognormal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
mean : float or array_like of floats, optional
--------
scipy.stats.lognorm : probability density function, distribution,
cumulative density function, etc.
+ Generator.lognormal: which should be used for new code.
Notes
-----
The :math:`\\chi` and Weibull distributions are generalizations of the
Rayleigh.
+ .. note::
+ New code should use the ``rayleigh`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
scale : float or array_like of floats, optional
out : ndarray or scalar
Drawn samples from the parameterized Rayleigh distribution.
+ See Also
+ --------
+ Generator.rayleigh: which should be used for new code.
+
Notes
-----
The probability density function for the Rayleigh distribution is
because there is an inverse relationship between the time to cover a
unit distance and distance covered in unit time.
+ .. note::
+ New code should use the ``wald`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
mean : float or array_like of floats
out : ndarray or scalar
Drawn samples from the parameterized Wald distribution.
+ See Also
+ --------
+ Generator.wald: which should be used for new code.
+
Notes
-----
The probability density function for the Wald distribution is
limit right. Unlike the other distributions, these parameters
directly define the shape of the pdf.
+ .. note::
+ New code should use the ``triangular`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
left : float or array_like of floats
out : ndarray or scalar
Drawn samples from the parameterized triangular distribution.
+ See Also
+ --------
+ Generator.triangular: which should be used for new code.
+
Notes
-----
The probability density function for the triangular distribution is
n an integer >= 0 and p is in the interval [0,1]. (n may be
input as a float, but it is truncated to an integer in use)
+ .. note::
+ New code should use the ``binomial`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
n : int or array_like of ints
--------
scipy.stats.binom : probability density function, distribution or
cumulative density function, etc.
+ Generator.binomial: which should be used for new code.
Notes
-----
parameters, `n` successes and `p` probability of success where `n`
is > 0 and `p` is in the interval [0, 1].
+ .. note::
+ New code should use the ``negative_binomial`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
n : float or array_like of floats
where each sample is equal to N, the number of failures that
occurred before a total of n successes was reached.
+ See Also
+ --------
+ Generator.negative_binomial: which should be used for new code.
+
Notes
-----
The probability mass function of the negative binomial distribution is
The Poisson distribution is the limit of the binomial distribution
for large N.
+ .. note::
+ New code should use the ``poisson`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
lam : float or array_like of floats
out : ndarray or scalar
Drawn samples from the parameterized Poisson distribution.
+ See Also
+ --------
+ Generator.poisson: which should be used for new code.
+
Notes
-----
The Poisson distribution
frequency of an item is inversely proportional to its rank in a
frequency table.
+ .. note::
+ New code should use the ``zipf`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
--------
scipy.stats.zipf : probability density function, distribution, or
cumulative density function, etc.
+ Generator.zipf: which should be used for new code.
Notes
-----
where `p` is the probability of success of an individual trial.
+ .. note::
+ New code should use the ``geometric`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
p : float or array_like of floats
out : ndarray or scalar
Drawn samples from the parameterized geometric distribution.
+ See Also
+ --------
+ Generator.geometric: which should be used for new code.
+
Examples
--------
Draw ten thousand values from the geometric distribution,
a bad selection), and `nsample` (number of items sampled, which is less
than or equal to the sum ``ngood + nbad``).
+ .. note::
+ New code should use the ``hypergeometric`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
ngood : int or array_like of ints
--------
scipy.stats.hypergeom : probability density function, distribution or
cumulative density function, etc.
+ Generator.hypergeometric: which should be used for new code.
Notes
-----
Samples are drawn from a log series distribution with specified
shape parameter, 0 < ``p`` < 1.
+ .. note::
+ New code should use the ``logseries`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
p : float or array_like of floats
--------
scipy.stats.logser : probability density function, distribution or
cumulative density function, etc.
+ Generator.logseries: which should be used for new code.
Notes
-----
(average or "center") and variance (standard deviation, or "width,"
squared) of the one-dimensional normal distribution.
+ .. note::
+ New code should use the ``multivariate_normal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
mean : 1-D array_like, of length N
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
+ See Also
+ --------
+ Generator.multivariate_normal: which should be used for new code.
+
Notes
-----
The mean is a coordinate in N-dimensional space, which represents the
``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the
outcome was ``i``.
+ .. note::
+ New code should use the ``multinomial`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
n : int
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
+ See Also
+ --------
+ Generator.multinomial: which should be used for new code.
+
Examples
--------
Throw a dice 20 times:
is a conjugate prior of a multinomial distribution in Bayesian
inference.
+ .. note::
+ New code should use the ``dirichlet`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
alpha : array
ValueError
If any value in alpha is less than or equal to zero
+ See Also
+ --------
+ Generator.dirichlet: which should be used for new code.
+
Notes
-----
The Dirichlet distribution is a distribution over vectors
multi-dimensional array. The order of sub-arrays is changed but
their contents remains the same.
+ .. note::
+ New code should use the ``shuffle`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
x : array_like
-------
None
+ See Also
+ --------
+ Generator.shuffle: which should be used for new code.
+
Examples
--------
>>> arr = np.arange(10)
# Fast, statically typed path: shuffle the underlying buffer.
# Only for non-empty, 1d objects of class ndarray (subclasses such
# as MaskedArrays may not support this approach).
- x_ptr = <char*><size_t>x.ctypes.data
+ x_ptr = <char*><size_t>np.PyArray_DATA(x)
stride = x.strides[0]
itemsize = x.dtype.itemsize
# As the array x could contain python objects we use a buffer
# within the buffer and erroneously decrementing it's refcount
# when the function exits.
buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
- buf_ptr = <char*><size_t>buf.ctypes.data
+ buf_ptr = <char*><size_t>np.PyArray_DATA(buf)
with self.lock:
# We trick gcc into providing a specialized implementation for
# the most common case, yielding a ~33% performance improvement.
If `x` is a multi-dimensional array, it is only shuffled along its
first index.
+ .. note::
+ New code should use the ``permutation`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
x : int or array_like
out : ndarray
Permuted sequence or array range.
+ See Also
+ --------
+ Generator.permutation: which should be used for new code.
Examples
--------
+++ /dev/null
-import numpy as np
-cimport numpy as np
-
-from .common cimport *
-from .bit_generator cimport BitGenerator
-
-__all__ = ['PCG64']
-
-cdef extern from "src/pcg64/pcg64.h":
- # Use int as generic type, actual type read from pcg64.h and is platform dependent
- ctypedef int pcg64_random_t
-
- struct s_pcg64_state:
- pcg64_random_t *pcg_state
- int has_uint32
- uint32_t uinteger
-
- ctypedef s_pcg64_state pcg64_state
-
- uint64_t pcg64_next64(pcg64_state *state) nogil
- uint32_t pcg64_next32(pcg64_state *state) nogil
- void pcg64_jump(pcg64_state *state)
- void pcg64_advance(pcg64_state *state, uint64_t *step)
- void pcg64_set_seed(pcg64_state *state, uint64_t *seed, uint64_t *inc)
- void pcg64_get_state(pcg64_state *state, uint64_t *state_arr, int *has_uint32, uint32_t *uinteger)
- void pcg64_set_state(pcg64_state *state, uint64_t *state_arr, int has_uint32, uint32_t uinteger)
-
-cdef uint64_t pcg64_uint64(void* st) nogil:
- return pcg64_next64(<pcg64_state *>st)
-
-cdef uint32_t pcg64_uint32(void *st) nogil:
- return pcg64_next32(<pcg64_state *> st)
-
-cdef double pcg64_double(void* st) nogil:
- return uint64_to_double(pcg64_next64(<pcg64_state *>st))
-
-
-cdef class PCG64(BitGenerator):
- """
- PCG64(seed_seq=None)
-
- BitGenerator for the PCG-64 pseudo-random number generator.
-
- Parameters
- ----------
- seed : {None, int, array_like[ints], ISeedSequence}, optional
- A seed to initialize the `BitGenerator`. If None, then fresh,
- unpredictable entropy will be pulled from the OS. If an ``int`` or
- ``array_like[ints]`` is passed, then it will be passed to
- `SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in an implementor of the `ISeedSequence` interface like
- `SeedSequence`.
-
- Notes
- -----
- PCG-64 is a 128-bit implementation of O'Neill's permutation congruential
- generator ([1]_, [2]_). PCG-64 has a period of :math:`2^{128}` and supports
- advancing an arbitrary number of steps as well as :math:`2^{127}` streams.
- The specific member of the PCG family that we use is PCG XSL RR 128/64
- as described in the paper ([2]_).
-
- ``PCG64`` provides a capsule containing function pointers that produce
- doubles, and unsigned 32 and 64- bit integers. These are not
- directly consumable in Python and must be consumed by a ``Generator``
- or similar object that supports low-level access.
-
- Supports the method :meth:`advance` to advance the RNG an arbitrary number of
- steps. The state of the PCG-64 RNG is represented by 2 128-bit unsigned
- integers.
-
- **State and Seeding**
-
- The ``PCG64`` state vector consists of 2 unsigned 128-bit values,
- which are represented externally as Python ints. One is the state of the
- PRNG, which is advanced by a linear congruential generator (LCG). The
- second is a fixed odd increment used in the LCG.
-
- The input seed is processed by `SeedSequence` to generate both values. The
- increment is not independently settable.
-
- **Parallel Features**
-
- The preferred way to use a BitGenerator in parallel applications is to use
- the `SeedSequence.spawn` method to obtain entropy values, and to use these
- to generate new BitGenerators:
-
- >>> from numpy.random import Generator, PCG64, SeedSequence
- >>> sg = SeedSequence(1234)
- >>> rg = [Generator(PCG64(s)) for s in sg.spawn(10)]
-
- **Compatibility Guarantee**
-
- ``PCG64`` makes a guarantee that a fixed seed and will always produce
- the same random integer stream.
-
- References
- ----------
- .. [1] `"PCG, A Family of Better Random Number Generators"
- <http://www.pcg-random.org/>`_
- .. [2] O'Neill, Melissa E. `"PCG: A Family of Simple Fast Space-Efficient
- Statistically Good Algorithms for Random Number Generation"
- <https://www.cs.hmc.edu/tr/hmc-cs-2014-0905.pdf>`_
- """
-
- cdef pcg64_state rng_state
- cdef pcg64_random_t pcg64_random_state
-
- def __init__(self, seed=None):
- BitGenerator.__init__(self, seed)
- self.rng_state.pcg_state = &self.pcg64_random_state
-
- self._bitgen.state = <void *>&self.rng_state
- self._bitgen.next_uint64 = &pcg64_uint64
- self._bitgen.next_uint32 = &pcg64_uint32
- self._bitgen.next_double = &pcg64_double
- self._bitgen.next_raw = &pcg64_uint64
- # Seed the _bitgen
- val = self._seed_seq.generate_state(4, np.uint64)
- pcg64_set_seed(&self.rng_state,
- <uint64_t *>np.PyArray_DATA(val),
- (<uint64_t *>np.PyArray_DATA(val) + 2))
- self._reset_state_variables()
-
- cdef _reset_state_variables(self):
- self.rng_state.has_uint32 = 0
- self.rng_state.uinteger = 0
-
- cdef jump_inplace(self, jumps):
- """
- Jump state in-place
- Not part of public API
-
- Parameters
- ----------
- jumps : integer, positive
- Number of times to jump the state of the rng.
-
- Notes
- -----
- The step size is phi-1 when multiplied by 2**128 where phi is the
- golden ratio.
- """
- step = 0x9e3779b97f4a7c15f39cc0605cedc835
- self.advance(step * int(jumps))
-
- def jumped(self, jumps=1):
- """
- jumped(jumps=1)
-
- Returns a new bit generator with the state jumped.
-
- Jumps the state as-if jumps * 210306068529402873165736369884012333109
- random numbers have been generated.
-
- Parameters
- ----------
- jumps : integer, positive
- Number of times to jump the state of the bit generator returned
-
- Returns
- -------
- bit_generator : PCG64
- New instance of generator jumped iter times
-
- Notes
- -----
- The step size is phi-1 when multiplied by 2**128 where phi is the
- golden ratio.
- """
- cdef PCG64 bit_generator
-
- bit_generator = self.__class__()
- bit_generator.state = self.state
- bit_generator.jump_inplace(jumps)
-
- return bit_generator
-
- @property
- def state(self):
- """
- Get or set the PRNG state
-
- Returns
- -------
- state : dict
- Dictionary containing the information required to describe the
- state of the PRNG
- """
- cdef np.ndarray state_vec
- cdef int has_uint32
- cdef uint32_t uinteger
-
- # state_vec is state.high, state.low, inc.high, inc.low
- state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
- pcg64_get_state(&self.rng_state,
- <uint64_t *>np.PyArray_DATA(state_vec),
- &has_uint32, &uinteger)
- state = int(state_vec[0]) * 2**64 + int(state_vec[1])
- inc = int(state_vec[2]) * 2**64 + int(state_vec[3])
- return {'bit_generator': self.__class__.__name__,
- 'state': {'state': state, 'inc': inc},
- 'has_uint32': has_uint32,
- 'uinteger': uinteger}
-
- @state.setter
- def state(self, value):
- cdef np.ndarray state_vec
- cdef int has_uint32
- cdef uint32_t uinteger
- if not isinstance(value, dict):
- raise TypeError('state must be a dict')
- bitgen = value.get('bit_generator', '')
- if bitgen != self.__class__.__name__:
- raise ValueError('state must be for a {0} '
- 'RNG'.format(self.__class__.__name__))
- state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
- state_vec[0] = value['state']['state'] // 2 ** 64
- state_vec[1] = value['state']['state'] % 2 ** 64
- state_vec[2] = value['state']['inc'] // 2 ** 64
- state_vec[3] = value['state']['inc'] % 2 ** 64
- has_uint32 = value['has_uint32']
- uinteger = value['uinteger']
- pcg64_set_state(&self.rng_state,
- <uint64_t *>np.PyArray_DATA(state_vec),
- has_uint32, uinteger)
-
- def advance(self, delta):
- """
- advance(delta)
-
- Advance the underlying RNG as-if delta draws have occurred.
-
- Parameters
- ----------
- delta : integer, positive
- Number of draws to advance the RNG. Must be less than the
- size state variable in the underlying RNG.
-
- Returns
- -------
- self : PCG64
- RNG advanced delta steps
-
- Notes
- -----
- Advancing a RNG updates the underlying RNG state as-if a given
- number of calls to the underlying RNG have been made. In general
- there is not a one-to-one relationship between the number output
- random values from a particular distribution and the number of
- draws from the core RNG. This occurs for two reasons:
-
- * The random values are simulated using a rejection-based method
- and so, on average, more than one value from the underlying
- RNG is required to generate an single draw.
- * The number of bits required to generate a simulated value
- differs from the number of bits generated by the underlying
- RNG. For example, two 16-bit integer values can be simulated
- from a single draw of a 32-bit RNG.
-
- Advancing the RNG state resets any pre-computed random numbers.
- This is required to ensure exact reproducibility.
- """
- delta = wrap_int(delta, 128)
-
- cdef np.ndarray d = np.empty(2, dtype=np.uint64)
- d[0] = delta // 2**64
- d[1] = delta % 2**64
- pcg64_advance(&self.rng_state, <uint64_t *>np.PyArray_DATA(d))
- self._reset_state_variables()
- return self
+++ /dev/null
-from cpython.pycapsule cimport PyCapsule_New
-
-try:
- from threading import Lock
-except ImportError:
- from dummy_threading import Lock
-
-import numpy as np
-
-from .common cimport *
-from .bit_generator cimport BitGenerator
-
-__all__ = ['Philox']
-
-np.import_array()
-
-DEF PHILOX_BUFFER_SIZE=4
-
-cdef extern from 'src/philox/philox.h':
- struct s_r123array2x64:
- uint64_t v[2]
-
- struct s_r123array4x64:
- uint64_t v[4]
-
- ctypedef s_r123array4x64 r123array4x64
- ctypedef s_r123array2x64 r123array2x64
-
- ctypedef r123array4x64 philox4x64_ctr_t
- ctypedef r123array2x64 philox4x64_key_t
-
- struct s_philox_state:
- philox4x64_ctr_t *ctr
- philox4x64_key_t *key
- int buffer_pos
- uint64_t buffer[PHILOX_BUFFER_SIZE]
- int has_uint32
- uint32_t uinteger
-
- ctypedef s_philox_state philox_state
-
- uint64_t philox_next64(philox_state *state) nogil
- uint32_t philox_next32(philox_state *state) nogil
- void philox_jump(philox_state *state)
- void philox_advance(uint64_t *step, philox_state *state)
-
-
-cdef uint64_t philox_uint64(void*st) nogil:
- return philox_next64(<philox_state *> st)
-
-cdef uint32_t philox_uint32(void *st) nogil:
- return philox_next32(<philox_state *> st)
-
-cdef double philox_double(void*st) nogil:
- return uint64_to_double(philox_next64(<philox_state *> st))
-
-cdef class Philox(BitGenerator):
- """
- Philox(seed=None, counter=None, key=None)
-
- Container for the Philox (4x64) pseudo-random number generator.
-
- Parameters
- ----------
- seed : {None, int, array_like[ints], ISeedSequence}, optional
- A seed to initialize the `BitGenerator`. If None, then fresh,
- unpredictable entropy will be pulled from the OS. If an ``int`` or
- ``array_like[ints]`` is passed, then it will be passed to
- `SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in an implementor of the `ISeedSequence` interface like
- `SeedSequence`.
- counter : {None, int, array_like}, optional
- Counter to use in the Philox state. Can be either
- a Python int (long in 2.x) in [0, 2**256) or a 4-element uint64 array.
- If not provided, the RNG is initialized at 0.
- key : {None, int, array_like}, optional
- Key to use in the Philox state. Unlike seed, the value in key is
- directly set. Can be either a Python int in [0, 2**128) or a 2-element
- uint64 array. `key` and `seed` cannot both be used.
-
- Attributes
- ----------
- lock: threading.Lock
- Lock instance that is shared so that the same bit git generator can
- be used in multiple Generators without corrupting the state. Code that
- generates values from a bit generator should hold the bit generator's
- lock.
-
- Notes
- -----
- Philox is a 64-bit PRNG that uses a counter-based design based on weaker
- (and faster) versions of cryptographic functions [1]_. Instances using
- different values of the key produce independent sequences. Philox has a
- period of :math:`2^{256} - 1` and supports arbitrary advancing and jumping
- the sequence in increments of :math:`2^{128}`. These features allow
- multiple non-overlapping sequences to be generated.
-
- ``Philox`` provides a capsule containing function pointers that produce
- doubles, and unsigned 32 and 64- bit integers. These are not
- directly consumable in Python and must be consumed by a ``Generator``
- or similar object that supports low-level access.
-
- **State and Seeding**
-
- The ``Philox`` state vector consists of a 256-bit value encoded as
- a 4-element uint64 array and a 128-bit value encoded as a 2-element uint64
- array. The former is a counter which is incremented by 1 for every 4 64-bit
- randoms produced. The second is a key which determined the sequence
- produced. Using different keys produces independent sequences.
-
- The input seed is processed by `SeedSequence` to generate the key. The
- counter is set to 0.
-
- Alternately, one can omit the seed parameter and set the ``key`` and
- ``counter`` directly.
-
- **Parallel Features**
-
- The preferred way to use a BitGenerator in parallel applications is to use
- the `SeedSequence.spawn` method to obtain entropy values, and to use these
- to generate new BitGenerators:
-
- >>> from numpy.random import Generator, Philox, SeedSequence
- >>> sg = SeedSequence(1234)
- >>> rg = [Generator(Philox(s)) for s in sg.spawn(10)]
-
- ``Philox`` can be used in parallel applications by calling the ``jumped``
- method to advances the state as-if :math:`2^{128}` random numbers have
- been generated. Alternatively, ``advance`` can be used to advance the
- counter for any positive step in [0, 2**256). When using ``jumped``, all
- generators should be chained to ensure that the segments come from the same
- sequence.
-
- >>> from numpy.random import Generator, Philox
- >>> bit_generator = Philox(1234)
- >>> rg = []
- >>> for _ in range(10):
- ... rg.append(Generator(bit_generator))
- ... bit_generator = bit_generator.jumped()
-
- Alternatively, ``Philox`` can be used in parallel applications by using
- a sequence of distinct keys where each instance uses different key.
-
- >>> key = 2**96 + 2**33 + 2**17 + 2**9
- >>> rg = [Generator(Philox(key=key+i)) for i in range(10)]
-
- **Compatibility Guarantee**
-
- ``Philox`` makes a guarantee that a fixed seed will always produce
- the same random integer stream.
-
- Examples
- --------
- >>> from numpy.random import Generator, Philox
- >>> rg = Generator(Philox(1234))
- >>> rg.standard_normal()
- 0.123 # random
-
- References
- ----------
- .. [1] John K. Salmon, Mark A. Moraes, Ron O. Dror, and David E. Shaw,
- "Parallel Random Numbers: As Easy as 1, 2, 3," Proceedings of
- the International Conference for High Performance Computing,
- Networking, Storage and Analysis (SC11), New York, NY: ACM, 2011.
- """
- cdef philox_state rng_state
- cdef philox4x64_key_t philox_key
- cdef philox4x64_ctr_t philox_ctr
-
- def __init__(self, seed=None, counter=None, key=None):
- if seed is not None and key is not None:
- raise ValueError('seed and key cannot be both used')
- BitGenerator.__init__(self, seed)
- self.rng_state.ctr = &self.philox_ctr
- self.rng_state.key = &self.philox_key
- if key is not None:
- key = int_to_array(key, 'key', 128, 64)
- for i in range(2):
- self.rng_state.key.v[i] = key[i]
- # The seed sequence is invalid.
- self._seed_seq = None
- else:
- key = self._seed_seq.generate_state(2, np.uint64)
- for i in range(2):
- self.rng_state.key.v[i] = key[i]
- counter = 0 if counter is None else counter
- counter = int_to_array(counter, 'counter', 256, 64)
- for i in range(4):
- self.rng_state.ctr.v[i] = counter[i]
-
- self._reset_state_variables()
-
- self._bitgen.state = <void *>&self.rng_state
- self._bitgen.next_uint64 = &philox_uint64
- self._bitgen.next_uint32 = &philox_uint32
- self._bitgen.next_double = &philox_double
- self._bitgen.next_raw = &philox_uint64
-
- cdef _reset_state_variables(self):
- self.rng_state.has_uint32 = 0
- self.rng_state.uinteger = 0
- self.rng_state.buffer_pos = PHILOX_BUFFER_SIZE
- for i in range(PHILOX_BUFFER_SIZE):
- self.rng_state.buffer[i] = 0
-
- @property
- def state(self):
- """
- Get or set the PRNG state
-
- Returns
- -------
- state : dict
- Dictionary containing the information required to describe the
- state of the PRNG
- """
- ctr = np.empty(4, dtype=np.uint64)
- key = np.empty(2, dtype=np.uint64)
- buffer = np.empty(PHILOX_BUFFER_SIZE, dtype=np.uint64)
- for i in range(4):
- ctr[i] = self.rng_state.ctr.v[i]
- if i < 2:
- key[i] = self.rng_state.key.v[i]
- for i in range(PHILOX_BUFFER_SIZE):
- buffer[i] = self.rng_state.buffer[i]
-
- state = {'counter': ctr, 'key': key}
- return {'bit_generator': self.__class__.__name__,
- 'state': state,
- 'buffer': buffer,
- 'buffer_pos': self.rng_state.buffer_pos,
- 'has_uint32': self.rng_state.has_uint32,
- 'uinteger': self.rng_state.uinteger}
-
- @state.setter
- def state(self, value):
- if not isinstance(value, dict):
- raise TypeError('state must be a dict')
- bitgen = value.get('bit_generator', '')
- if bitgen != self.__class__.__name__:
- raise ValueError('state must be for a {0} '
- 'PRNG'.format(self.__class__.__name__))
- for i in range(4):
- self.rng_state.ctr.v[i] = <uint64_t> value['state']['counter'][i]
- if i < 2:
- self.rng_state.key.v[i] = <uint64_t> value['state']['key'][i]
- for i in range(PHILOX_BUFFER_SIZE):
- self.rng_state.buffer[i] = <uint64_t> value['buffer'][i]
-
- self.rng_state.has_uint32 = value['has_uint32']
- self.rng_state.uinteger = value['uinteger']
- self.rng_state.buffer_pos = value['buffer_pos']
-
- cdef jump_inplace(self, iter):
- """
- Jump state in-place
-
- Not part of public API
-
- Parameters
- ----------
- iter : integer, positive
- Number of times to jump the state of the rng.
- """
- self.advance(iter * int(2 ** 128))
-
- def jumped(self, jumps=1):
- """
- jumped(jumps=1)
-
- Returns a new bit generator with the state jumped
-
- The state of the returned big generator is jumped as-if
- 2**(128 * jumps) random numbers have been generated.
-
- Parameters
- ----------
- jumps : integer, positive
- Number of times to jump the state of the bit generator returned
-
- Returns
- -------
- bit_generator : Philox
- New instance of generator jumped iter times
- """
- cdef Philox bit_generator
-
- bit_generator = self.__class__()
- bit_generator.state = self.state
- bit_generator.jump_inplace(jumps)
-
- return bit_generator
-
- def advance(self, delta):
- """
- advance(delta)
-
- Advance the underlying RNG as-if delta draws have occurred.
-
- Parameters
- ----------
- delta : integer, positive
- Number of draws to advance the RNG. Must be less than the
- size state variable in the underlying RNG.
-
- Returns
- -------
- self : Philox
- RNG advanced delta steps
-
- Notes
- -----
- Advancing a RNG updates the underlying RNG state as-if a given
- number of calls to the underlying RNG have been made. In general
- there is not a one-to-one relationship between the number output
- random values from a particular distribution and the number of
- draws from the core RNG. This occurs for two reasons:
-
- * The random values are simulated using a rejection-based method
- and so, on average, more than one value from the underlying
- RNG is required to generate an single draw.
- * The number of bits required to generate a simulated value
- differs from the number of bits generated by the underlying
- RNG. For example, two 16-bit integer values can be simulated
- from a single draw of a 32-bit RNG.
-
- Advancing the RNG state resets any pre-computed random numbers.
- This is required to ensure exact reproducibility.
- """
- delta = wrap_int(delta, 256)
-
- cdef np.ndarray delta_a
- delta_a = int_to_array(delta, 'step', 256, 64)
- philox_advance(<uint64_t *> delta_a.data, &self.rng_state)
- self._reset_state_variables()
- return self
defs.append(('NPY_NO_DEPRECATED_API', 0))
config.add_data_dir('tests')
+ config.add_data_dir('_examples')
EXTRA_LINK_ARGS = []
# Math lib
for gen in ['mt19937']:
# gen.pyx, src/gen/gen.c, src/gen/gen-jump.c
- config.add_extension(gen,
- sources=['{0}.c'.format(gen),
+ config.add_extension('_{0}'.format(gen),
+ sources=['_{0}.c'.format(gen),
'src/{0}/{0}.c'.format(gen),
'src/{0}/{0}-jump.c'.format(gen)],
include_dirs=['.', 'src', join('src', gen)],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
- depends=['%s.pyx' % gen],
+ depends=['_%s.pyx' % gen],
define_macros=defs,
)
for gen in ['philox', 'pcg64', 'sfc64']:
# gen.pyx, src/gen/gen.c
_defs = defs + PCG64_DEFS if gen == 'pcg64' else defs
- config.add_extension(gen,
- sources=['{0}.c'.format(gen),
+ config.add_extension('_{0}'.format(gen),
+ sources=['_{0}.c'.format(gen),
'src/{0}/{0}.c'.format(gen)],
include_dirs=['.', 'src', join('src', gen)],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
- depends=['%s.pyx' % gen, 'bit_generator.pyx',
- 'bit_generator.pxd'],
+ depends=['_%s.pyx' % gen, '_bit_generator.pyx',
+ '_bit_generator.pxd'],
define_macros=_defs,
)
- for gen in ['common', 'bit_generator']:
+ for gen in ['_common', '_bit_generator']:
# gen.pyx
config.add_extension(gen,
sources=['{0}.c'.format(gen)],
depends=['%s.pyx' % gen, '%s.pxd' % gen,],
define_macros=defs,
)
+ config.add_data_files('{0}.pxd'.format(gen))
other_srcs = [
'src/distributions/logfactorial.c',
'src/distributions/distributions.c',
+ 'src/distributions/random_mvhg_count.c',
+ 'src/distributions/random_mvhg_marginals.c',
'src/distributions/random_hypergeometric.c',
]
- for gen in ['generator', 'bounded_integers']:
+ for gen in ['_generator', '_bounded_integers']:
# gen.pyx, src/distributions/distributions.c
config.add_extension(gen,
sources=['{0}.c'.format(gen)] + other_srcs,
depends=['%s.pyx' % gen],
define_macros=defs,
)
+ config.add_data_files('_bounded_integers.pxd')
config.add_extension('mtrand',
- # mtrand does not depend on random_hypergeometric.c.
sources=['mtrand.c',
'src/legacy/legacy-distributions.c',
'src/distributions/logfactorial.c',
depends=['mtrand.pyx'],
define_macros=defs + LEGACY_DEFS,
)
+ config.add_data_files('__init__.pxd')
return config
+++ /dev/null
-import numpy as np
-cimport numpy as np
-
-from .common cimport *
-from .bit_generator cimport BitGenerator
-
-__all__ = ['SFC64']
-
-cdef extern from "src/sfc64/sfc64.h":
- struct s_sfc64_state:
- uint64_t s[4]
- int has_uint32
- uint32_t uinteger
-
- ctypedef s_sfc64_state sfc64_state
- uint64_t sfc64_next64(sfc64_state *state) nogil
- uint32_t sfc64_next32(sfc64_state *state) nogil
- void sfc64_set_seed(sfc64_state *state, uint64_t *seed)
- void sfc64_get_state(sfc64_state *state, uint64_t *state_arr, int *has_uint32, uint32_t *uinteger)
- void sfc64_set_state(sfc64_state *state, uint64_t *state_arr, int has_uint32, uint32_t uinteger)
-
-
-cdef uint64_t sfc64_uint64(void* st) nogil:
- return sfc64_next64(<sfc64_state *>st)
-
-cdef uint32_t sfc64_uint32(void *st) nogil:
- return sfc64_next32(<sfc64_state *> st)
-
-cdef double sfc64_double(void* st) nogil:
- return uint64_to_double(sfc64_next64(<sfc64_state *>st))
-
-
-cdef class SFC64(BitGenerator):
- """
- SFC64(seed=None)
-
- BitGenerator for Chris Doty-Humphrey's Small Fast Chaotic PRNG.
-
- Parameters
- ----------
- seed : {None, int, array_like[ints], ISeedSequence}, optional
- A seed to initialize the `BitGenerator`. If None, then fresh,
- unpredictable entropy will be pulled from the OS. If an ``int`` or
- ``array_like[ints]`` is passed, then it will be passed to
- `SeedSequence` to derive the initial `BitGenerator` state. One may also
- pass in an implementor of the `ISeedSequence` interface like
- `SeedSequence`.
-
- Notes
- -----
- ``SFC64`` is a 256-bit implementation of Chris Doty-Humphrey's Small Fast
- Chaotic PRNG ([1]_). ``SFC64`` has a few different cycles that one might be
- on, depending on the seed; the expected period will be about
- :math:`2^{255}` ([2]_). ``SFC64`` incorporates a 64-bit counter which means
- that the absolute minimum cycle length is :math:`2^{64}` and that distinct
- seeds will not run into each other for at least :math:`2^{64}` iterations.
-
- ``SFC64`` provides a capsule containing function pointers that produce
- doubles, and unsigned 32 and 64- bit integers. These are not
- directly consumable in Python and must be consumed by a ``Generator``
- or similar object that supports low-level access.
-
- **State and Seeding**
-
- The ``SFC64`` state vector consists of 4 unsigned 64-bit values. The last
- is a 64-bit counter that increments by 1 each iteration.
-
- The input seed is processed by `SeedSequence` to generate the first
- 3 values, then the ``SFC64`` algorithm is iterated a small number of times
- to mix.
-
- **Compatibility Guarantee**
-
- ``SFC64`` makes a guarantee that a fixed seed will always produce the same
- random integer stream.
-
- References
- ----------
- .. [1] `"PractRand"
- <http://pracrand.sourceforge.net/RNG_engines.txt>`_
- .. [2] `"Random Invertible Mapping Statistics"
- <http://www.pcg-random.org/posts/random-invertible-mapping-statistics.html>`_
- """
-
- cdef sfc64_state rng_state
-
- def __init__(self, seed=None):
- BitGenerator.__init__(self, seed)
- self._bitgen.state = <void *>&self.rng_state
- self._bitgen.next_uint64 = &sfc64_uint64
- self._bitgen.next_uint32 = &sfc64_uint32
- self._bitgen.next_double = &sfc64_double
- self._bitgen.next_raw = &sfc64_uint64
- # Seed the _bitgen
- val = self._seed_seq.generate_state(3, np.uint64)
- sfc64_set_seed(&self.rng_state, <uint64_t*>np.PyArray_DATA(val))
- self._reset_state_variables()
-
- cdef _reset_state_variables(self):
- self.rng_state.has_uint32 = 0
- self.rng_state.uinteger = 0
-
- @property
- def state(self):
- """
- Get or set the PRNG state
-
- Returns
- -------
- state : dict
- Dictionary containing the information required to describe the
- state of the PRNG
- """
- cdef np.ndarray state_vec
- cdef int has_uint32
- cdef uint32_t uinteger
-
- state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
- sfc64_get_state(&self.rng_state,
- <uint64_t *>np.PyArray_DATA(state_vec),
- &has_uint32, &uinteger)
- return {'bit_generator': self.__class__.__name__,
- 'state': {'state': state_vec},
- 'has_uint32': has_uint32,
- 'uinteger': uinteger}
-
- @state.setter
- def state(self, value):
- cdef np.ndarray state_vec
- cdef int has_uint32
- cdef uint32_t uinteger
- if not isinstance(value, dict):
- raise TypeError('state must be a dict')
- bitgen = value.get('bit_generator', '')
- if bitgen != self.__class__.__name__:
- raise ValueError('state must be for a {0} '
- 'RNG'.format(self.__class__.__name__))
- state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
- state_vec[:] = value['state']['state']
- has_uint32 = value['has_uint32']
- uinteger = value['uinteger']
- sfc64_set_state(&self.rng_state,
- <uint64_t *>np.PyArray_DATA(state_vec),
- has_uint32, uinteger)
+++ /dev/null
-#include "aligned_malloc.h"
-
-static NPY_INLINE void *PyArray_realloc_aligned(void *p, size_t n);
-
-static NPY_INLINE void *PyArray_malloc_aligned(size_t n);
-
-static NPY_INLINE void *PyArray_calloc_aligned(size_t n, size_t s);
-
-static NPY_INLINE void PyArray_free_aligned(void *p);
\ No newline at end of file
+++ /dev/null
-#ifndef _RANDOMDGEN__ALIGNED_MALLOC_H_
-#define _RANDOMDGEN__ALIGNED_MALLOC_H_
-
-#include "Python.h"
-#include "numpy/npy_common.h"
-
-#define NPY_MEMALIGN 16 /* 16 for SSE2, 32 for AVX, 64 for Xeon Phi */
-
-static NPY_INLINE void *PyArray_realloc_aligned(void *p, size_t n)
-{
- void *p1, **p2, *base;
- size_t old_offs, offs = NPY_MEMALIGN - 1 + sizeof(void *);
- if (NPY_UNLIKELY(p != NULL))
- {
- base = *(((void **)p) - 1);
- if (NPY_UNLIKELY((p1 = PyMem_Realloc(base, n + offs)) == NULL))
- return NULL;
- if (NPY_LIKELY(p1 == base))
- return p;
- p2 = (void **)(((Py_uintptr_t)(p1) + offs) & ~(NPY_MEMALIGN - 1));
- old_offs = (size_t)((Py_uintptr_t)p - (Py_uintptr_t)base);
- memmove((void *)p2, ((char *)p1) + old_offs, n);
- }
- else
- {
- if (NPY_UNLIKELY((p1 = PyMem_Malloc(n + offs)) == NULL))
- return NULL;
- p2 = (void **)(((Py_uintptr_t)(p1) + offs) & ~(NPY_MEMALIGN - 1));
- }
- *(p2 - 1) = p1;
- return (void *)p2;
-}
-
-static NPY_INLINE void *PyArray_malloc_aligned(size_t n)
-{
- return PyArray_realloc_aligned(NULL, n);
-}
-
-static NPY_INLINE void *PyArray_calloc_aligned(size_t n, size_t s)
-{
- void *p;
- if (NPY_UNLIKELY((p = PyArray_realloc_aligned(NULL, n * s)) == NULL))
- return NULL;
- memset(p, 0, n * s);
- return p;
-}
-
-static NPY_INLINE void PyArray_free_aligned(void *p)
-{
- void *base = *(((void **)p) - 1);
- PyMem_Free(base);
-}
-
-#endif
+++ /dev/null
-#ifndef _RANDOM_BITGEN_H
-#define _RANDOM_BITGEN_H
-
-#pragma once
-#include <stddef.h>
-#include <stdbool.h>
-#include <stdint.h>
-
-/* Must match the declaration in numpy/random/common.pxd */
-
-typedef struct bitgen {
- void *state;
- uint64_t (*next_uint64)(void *st);
- uint32_t (*next_uint32)(void *st);
- double (*next_double)(void *st);
- uint64_t (*next_raw)(void *st);
-} bitgen_t;
-
-
-#endif
-#include "distributions.h"
+#include "numpy/random/distributions.h"
#include "ziggurat_constants.h"
#include "logfactorial.h"
#include <intrin.h>
#endif
-/* Random generators for external use */
-float random_float(bitgen_t *bitgen_state) { return next_float(bitgen_state); }
-
-double random_double(bitgen_t *bitgen_state) {
- return next_double(bitgen_state);
+/* Inline generators for internal use */
+static NPY_INLINE uint32_t next_uint32(bitgen_t *bitgen_state) {
+ return bitgen_state->next_uint32(bitgen_state->state);
}
-
-static NPY_INLINE double next_standard_exponential(bitgen_t *bitgen_state) {
- return -log(1.0 - next_double(bitgen_state));
+static NPY_INLINE uint64_t next_uint64(bitgen_t *bitgen_state) {
+ return bitgen_state->next_uint64(bitgen_state->state);
}
-double random_standard_exponential(bitgen_t *bitgen_state) {
- return next_standard_exponential(bitgen_state);
+static NPY_INLINE float next_float(bitgen_t *bitgen_state) {
+ return (next_uint32(bitgen_state) >> 9) * (1.0f / 8388608.0f);
}
-void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt,
- double *out) {
- npy_intp i;
- for (i = 0; i < cnt; i++) {
- out[i] = next_standard_exponential(bitgen_state);
- }
+/* Random generators for external use */
+float random_standard_uniform_f(bitgen_t *bitgen_state) {
+ return next_float(bitgen_state);
}
-float random_standard_exponential_f(bitgen_t *bitgen_state) {
- return -logf(1.0f - next_float(bitgen_state));
+double random_standard_uniform(bitgen_t *bitgen_state) {
+ return next_double(bitgen_state);
}
-void random_double_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) {
+void random_standard_uniform_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) {
npy_intp i;
for (i = 0; i < cnt; i++) {
out[i] = next_double(bitgen_state);
}
}
-#if 0
-double random_gauss(bitgen_t *bitgen_state) {
- if (bitgen_state->has_gauss) {
- const double temp = bitgen_state->gauss;
- bitgen_state->has_gauss = false;
- bitgen_state->gauss = 0.0;
- return temp;
- } else {
- double f, x1, x2, r2;
-
- do {
- x1 = 2.0 * next_double(bitgen_state) - 1.0;
- x2 = 2.0 * next_double(bitgen_state) - 1.0;
- r2 = x1 * x1 + x2 * x2;
- } while (r2 >= 1.0 || r2 == 0.0);
-
- /* Polar method, a more efficient version of the Box-Muller approach. */
- f = sqrt(-2.0 * log(r2) / r2);
- /* Keep for next call */
- bitgen_state->gauss = f * x1;
- bitgen_state->has_gauss = true;
- return f * x2;
- }
-}
-float random_gauss_f(bitgen_t *bitgen_state) {
- if (bitgen_state->has_gauss_f) {
- const float temp = bitgen_state->gauss_f;
- bitgen_state->has_gauss_f = false;
- bitgen_state->gauss_f = 0.0f;
- return temp;
- } else {
- float f, x1, x2, r2;
-
- do {
- x1 = 2.0f * next_float(bitgen_state) - 1.0f;
- x2 = 2.0f * next_float(bitgen_state) - 1.0f;
- r2 = x1 * x1 + x2 * x2;
- } while (r2 >= 1.0 || r2 == 0.0);
-
- /* Polar method, a more efficient version of the Box-Muller approach. */
- f = sqrtf(-2.0f * logf(r2) / r2);
- /* Keep for next call */
- bitgen_state->gauss_f = f * x1;
- bitgen_state->has_gauss_f = true;
- return f * x2;
+void random_standard_uniform_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) {
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = next_float(bitgen_state);
}
}
-#endif
-
-static NPY_INLINE double standard_exponential_zig(bitgen_t *bitgen_state);
-static double standard_exponential_zig_unlikely(bitgen_t *bitgen_state,
+static double standard_exponential_unlikely(bitgen_t *bitgen_state,
uint8_t idx, double x) {
if (idx == 0) {
/* Switch to 1.0 - U to avoid log(0.0), see GH 13361 */
exp(-x)) {
return x;
} else {
- return standard_exponential_zig(bitgen_state);
+ return random_standard_exponential(bitgen_state);
}
}
-static NPY_INLINE double standard_exponential_zig(bitgen_t *bitgen_state) {
+double random_standard_exponential(bitgen_t *bitgen_state) {
uint64_t ri;
uint8_t idx;
double x;
if (ri < ke_double[idx]) {
return x; /* 98.9% of the time we return here 1st try */
}
- return standard_exponential_zig_unlikely(bitgen_state, idx, x);
+ return standard_exponential_unlikely(bitgen_state, idx, x);
}
-double random_standard_exponential_zig(bitgen_t *bitgen_state) {
- return standard_exponential_zig(bitgen_state);
-}
-
-void random_standard_exponential_zig_fill(bitgen_t *bitgen_state, npy_intp cnt,
- double *out) {
+void random_standard_exponential_fill(bitgen_t * bitgen_state, npy_intp cnt, double * out)
+{
npy_intp i;
for (i = 0; i < cnt; i++) {
- out[i] = standard_exponential_zig(bitgen_state);
+ out[i] = random_standard_exponential(bitgen_state);
}
}
-static NPY_INLINE float standard_exponential_zig_f(bitgen_t *bitgen_state);
-
-static float standard_exponential_zig_unlikely_f(bitgen_t *bitgen_state,
+static float standard_exponential_unlikely_f(bitgen_t *bitgen_state,
uint8_t idx, float x) {
if (idx == 0) {
/* Switch to 1.0 - U to avoid log(0.0), see GH 13361 */
expf(-x)) {
return x;
} else {
- return standard_exponential_zig_f(bitgen_state);
+ return random_standard_exponential_f(bitgen_state);
}
}
-static NPY_INLINE float standard_exponential_zig_f(bitgen_t *bitgen_state) {
+float random_standard_exponential_f(bitgen_t *bitgen_state) {
uint32_t ri;
uint8_t idx;
float x;
if (ri < ke_float[idx]) {
return x; /* 98.9% of the time we return here 1st try */
}
- return standard_exponential_zig_unlikely_f(bitgen_state, idx, x);
+ return standard_exponential_unlikely_f(bitgen_state, idx, x);
}
-float random_standard_exponential_zig_f(bitgen_t *bitgen_state) {
- return standard_exponential_zig_f(bitgen_state);
+void random_standard_exponential_fill_f(bitgen_t * bitgen_state, npy_intp cnt, float * out)
+{
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = random_standard_exponential_f(bitgen_state);
+ }
}
-static NPY_INLINE double next_gauss_zig(bitgen_t *bitgen_state) {
+void random_standard_exponential_inv_fill(bitgen_t * bitgen_state, npy_intp cnt, double * out)
+{
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = -log(1.0 - next_double(bitgen_state));
+ }
+}
+
+void random_standard_exponential_inv_fill_f(bitgen_t * bitgen_state, npy_intp cnt, float * out)
+{
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = -log(1.0 - next_float(bitgen_state));
+ }
+}
+
+
+double random_standard_normal(bitgen_t *bitgen_state) {
uint64_t r;
int sign;
uint64_t rabs;
}
}
-double random_gauss_zig(bitgen_t *bitgen_state) {
- return next_gauss_zig(bitgen_state);
-}
-
-void random_gauss_zig_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) {
+void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) {
npy_intp i;
for (i = 0; i < cnt; i++) {
- out[i] = next_gauss_zig(bitgen_state);
+ out[i] = random_standard_normal(bitgen_state);
}
}
-float random_gauss_zig_f(bitgen_t *bitgen_state) {
+float random_standard_normal_f(bitgen_t *bitgen_state) {
uint32_t r;
int sign;
uint32_t rabs;
}
}
-/*
-static NPY_INLINE double standard_gamma(bitgen_t *bitgen_state, double shape) {
- double b, c;
- double U, V, X, Y;
-
- if (shape == 1.0) {
- return random_standard_exponential(bitgen_state);
- } else if (shape < 1.0) {
- for (;;) {
- U = next_double(bitgen_state);
- V = random_standard_exponential(bitgen_state);
- if (U <= 1.0 - shape) {
- X = pow(U, 1. / shape);
- if (X <= V) {
- return X;
- }
- } else {
- Y = -log((1 - U) / shape);
- X = pow(1.0 - shape + shape * Y, 1. / shape);
- if (X <= (V + Y)) {
- return X;
- }
- }
- }
- } else {
- b = shape - 1. / 3.;
- c = 1. / sqrt(9 * b);
- for (;;) {
- do {
- X = random_gauss(bitgen_state);
- V = 1.0 + c * X;
- } while (V <= 0.0);
-
- V = V * V * V;
- U = next_double(bitgen_state);
- if (U < 1.0 - 0.0331 * (X * X) * (X * X))
- return (b * V);
- if (log(U) < 0.5 * X * X + b * (1. - V + log(V)))
- return (b * V);
- }
- }
-}
-
-static NPY_INLINE float standard_gamma_float(bitgen_t *bitgen_state, float
-shape) { float b, c; float U, V, X, Y;
-
- if (shape == 1.0f) {
- return random_standard_exponential_f(bitgen_state);
- } else if (shape < 1.0f) {
- for (;;) {
- U = next_float(bitgen_state);
- V = random_standard_exponential_f(bitgen_state);
- if (U <= 1.0f - shape) {
- X = powf(U, 1.0f / shape);
- if (X <= V) {
- return X;
- }
- } else {
- Y = -logf((1.0f - U) / shape);
- X = powf(1.0f - shape + shape * Y, 1.0f / shape);
- if (X <= (V + Y)) {
- return X;
- }
- }
- }
- } else {
- b = shape - 1.0f / 3.0f;
- c = 1.0f / sqrtf(9.0f * b);
- for (;;) {
- do {
- X = random_gauss_f(bitgen_state);
- V = 1.0f + c * X;
- } while (V <= 0.0f);
-
- V = V * V * V;
- U = next_float(bitgen_state);
- if (U < 1.0f - 0.0331f * (X * X) * (X * X))
- return (b * V);
- if (logf(U) < 0.5f * X * X + b * (1.0f - V + logf(V)))
- return (b * V);
- }
+void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) {
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = random_standard_normal_f(bitgen_state);
}
}
-
-double random_standard_gamma(bitgen_t *bitgen_state, double shape) {
- return standard_gamma(bitgen_state, shape);
-}
-
-float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) {
- return standard_gamma_float(bitgen_state, shape);
-}
-*/
-
-static NPY_INLINE double standard_gamma_zig(bitgen_t *bitgen_state,
+double random_standard_gamma(bitgen_t *bitgen_state,
double shape) {
double b, c;
double U, V, X, Y;
if (shape == 1.0) {
- return random_standard_exponential_zig(bitgen_state);
+ return random_standard_exponential(bitgen_state);
} else if (shape == 0.0) {
return 0.0;
} else if (shape < 1.0) {
for (;;) {
U = next_double(bitgen_state);
- V = random_standard_exponential_zig(bitgen_state);
+ V = random_standard_exponential(bitgen_state);
if (U <= 1.0 - shape) {
X = pow(U, 1. / shape);
if (X <= V) {
c = 1. / sqrt(9 * b);
for (;;) {
do {
- X = random_gauss_zig(bitgen_state);
+ X = random_standard_normal(bitgen_state);
V = 1.0 + c * X;
} while (V <= 0.0);
}
}
-static NPY_INLINE float standard_gamma_zig_f(bitgen_t *bitgen_state,
+float random_standard_gamma_f(bitgen_t *bitgen_state,
float shape) {
float b, c;
float U, V, X, Y;
if (shape == 1.0f) {
- return random_standard_exponential_zig_f(bitgen_state);
+ return random_standard_exponential_f(bitgen_state);
} else if (shape == 0.0) {
return 0.0;
} else if (shape < 1.0f) {
for (;;) {
U = next_float(bitgen_state);
- V = random_standard_exponential_zig_f(bitgen_state);
+ V = random_standard_exponential_f(bitgen_state);
if (U <= 1.0f - shape) {
X = powf(U, 1.0f / shape);
if (X <= V) {
c = 1.0f / sqrtf(9.0f * b);
for (;;) {
do {
- X = random_gauss_zig_f(bitgen_state);
+ X = random_standard_normal_f(bitgen_state);
V = 1.0f + c * X;
} while (V <= 0.0f);
}
}
-double random_standard_gamma_zig(bitgen_t *bitgen_state, double shape) {
- return standard_gamma_zig(bitgen_state, shape);
-}
-
-float random_standard_gamma_zig_f(bitgen_t *bitgen_state, float shape) {
- return standard_gamma_zig_f(bitgen_state, shape);
-}
-
int64_t random_positive_int64(bitgen_t *bitgen_state) {
return next_uint64(bitgen_state) >> 1;
}
* algorithm comes from SPECFUN by Shanjie Zhang and Jianming Jin and their
* book "Computation of Special Functions", 1996, John Wiley & Sons, Inc.
*
- * If loggam(k+1) is being used to compute log(k!) for an integer k, consider
+ * If random_loggam(k+1) is being used to compute log(k!) for an integer k, consider
* using logfactorial(k) instead.
*/
-double loggam(double x) {
+double random_loggam(double x) {
double x0, x2, xp, gl, gl0;
RAND_INT_TYPE k, n;
}
*/
-double random_normal_zig(bitgen_t *bitgen_state, double loc, double scale) {
- return loc + scale * random_gauss_zig(bitgen_state);
+double random_normal(bitgen_t *bitgen_state, double loc, double scale) {
+ return loc + scale * random_standard_normal(bitgen_state);
}
double random_exponential(bitgen_t *bitgen_state, double scale) {
- return scale * standard_exponential_zig(bitgen_state);
+ return scale * random_standard_exponential(bitgen_state);
}
double random_uniform(bitgen_t *bitgen_state, double lower, double range) {
}
double random_gamma(bitgen_t *bitgen_state, double shape, double scale) {
- return scale * random_standard_gamma_zig(bitgen_state, shape);
+ return scale * random_standard_gamma(bitgen_state, shape);
}
-float random_gamma_float(bitgen_t *bitgen_state, float shape, float scale) {
- return scale * random_standard_gamma_zig_f(bitgen_state, shape);
+float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) {
+ return scale * random_standard_gamma_f(bitgen_state, shape);
}
double random_beta(bitgen_t *bitgen_state, double a, double b) {
}
}
} else {
- Ga = random_standard_gamma_zig(bitgen_state, a);
- Gb = random_standard_gamma_zig(bitgen_state, b);
+ Ga = random_standard_gamma(bitgen_state, a);
+ Gb = random_standard_gamma(bitgen_state, b);
return Ga / (Ga + Gb);
}
}
double random_chisquare(bitgen_t *bitgen_state, double df) {
- return 2.0 * random_standard_gamma_zig(bitgen_state, df / 2.0);
+ return 2.0 * random_standard_gamma(bitgen_state, df / 2.0);
}
double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) {
}
double random_standard_cauchy(bitgen_t *bitgen_state) {
- return random_gauss_zig(bitgen_state) / random_gauss_zig(bitgen_state);
+ return random_standard_normal(bitgen_state) / random_standard_normal(bitgen_state);
}
double random_pareto(bitgen_t *bitgen_state, double a) {
- return exp(standard_exponential_zig(bitgen_state) / a) - 1;
+ return exp(random_standard_exponential(bitgen_state) / a) - 1;
}
double random_weibull(bitgen_t *bitgen_state, double a) {
if (a == 0.0) {
return 0.0;
}
- return pow(standard_exponential_zig(bitgen_state), 1. / a);
+ return pow(random_standard_exponential(bitgen_state), 1. / a);
}
double random_power(bitgen_t *bitgen_state, double a) {
- return pow(1 - exp(-standard_exponential_zig(bitgen_state)), 1. / a);
+ return pow(1 - exp(-random_standard_exponential(bitgen_state)), 1. / a);
}
double random_laplace(bitgen_t *bitgen_state, double loc, double scale) {
}
double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) {
- return exp(random_normal_zig(bitgen_state, mean, sigma));
+ return exp(random_normal(bitgen_state, mean, sigma));
}
double random_rayleigh(bitgen_t *bitgen_state, double mode) {
double random_standard_t(bitgen_t *bitgen_state, double df) {
double num, denom;
- num = random_gauss_zig(bitgen_state);
- denom = random_standard_gamma_zig(bitgen_state, df / 2);
+ num = random_standard_normal(bitgen_state);
+ denom = random_standard_gamma(bitgen_state, df / 2);
return sqrt(df / 2) * num / sqrt(denom);
}
/* log(V) == log(0.0) ok here */
/* if U==0.0 so that us==0.0, log is ok since always returns */
if ((log(V) + log(invalpha) - log(a / (us * us) + b)) <=
- (-lam + k * loglam - loggam(k + 1))) {
+ (-lam + k * loglam - random_loggam(k + 1))) {
return k;
}
}
}
if (1 < df) {
const double Chi2 = random_chisquare(bitgen_state, df - 1);
- const double n = random_gauss_zig(bitgen_state) + sqrt(nonc);
+ const double n = random_standard_normal(bitgen_state) + sqrt(nonc);
return Chi2 + n * n;
} else {
const RAND_INT_TYPE i = random_poisson(bitgen_state, nonc / 2.0);
double mu_2l;
mu_2l = mean / (2 * scale);
- Y = random_gauss_zig(bitgen_state);
+ Y = random_standard_normal(bitgen_state);
Y = mean * Y * Y;
X = mean + mu_2l * (Y - sqrt(4 * scale * Y + Y * Y));
U = next_double(bitgen_state);
while (1) {
double T, U, V, X;
- U = 1.0 - random_double(bitgen_state);
- V = random_double(bitgen_state);
+ U = 1.0 - next_double(bitgen_state);
+ V = next_double(bitgen_state);
X = floor(pow(U, -1.0 / am1));
/*
* The real result may be above what can be represented in a signed
+++ /dev/null
-#ifndef _RANDOMDGEN__DISTRIBUTIONS_H_
-#define _RANDOMDGEN__DISTRIBUTIONS_H_
-
-#pragma once
-#include <stddef.h>
-#include <stdbool.h>
-#include <stdint.h>
-
-#include "Python.h"
-#include "numpy/npy_common.h"
-#include "numpy/npy_math.h"
-#include "src/bitgen.h"
-
-/*
- * RAND_INT_TYPE is used to share integer generators with RandomState which
- * used long in place of int64_t. If changing a distribution that uses
- * RAND_INT_TYPE, then the original unmodified copy must be retained for
- * use in RandomState by copying to the legacy distributions source file.
- */
-#ifdef NP_RANDOM_LEGACY
-#define RAND_INT_TYPE long
-#define RAND_INT_MAX LONG_MAX
-#else
-#define RAND_INT_TYPE int64_t
-#define RAND_INT_MAX INT64_MAX
-#endif
-
-#ifdef DLL_EXPORT
-#define DECLDIR __declspec(dllexport)
-#else
-#define DECLDIR extern
-#endif
-
-#ifndef MIN
-#define MIN(x, y) (((x) < (y)) ? x : y)
-#define MAX(x, y) (((x) > (y)) ? x : y)
-#endif
-
-#ifndef M_PI
-#define M_PI 3.14159265358979323846264338328
-#endif
-
-typedef struct s_binomial_t {
- int has_binomial; /* !=0: following parameters initialized for binomial */
- double psave;
- RAND_INT_TYPE nsave;
- double r;
- double q;
- double fm;
- RAND_INT_TYPE m;
- double p1;
- double xm;
- double xl;
- double xr;
- double c;
- double laml;
- double lamr;
- double p2;
- double p3;
- double p4;
-} binomial_t;
-
-/* Inline generators for internal use */
-static NPY_INLINE uint32_t next_uint32(bitgen_t *bitgen_state) {
- return bitgen_state->next_uint32(bitgen_state->state);
-}
-
-static NPY_INLINE uint64_t next_uint64(bitgen_t *bitgen_state) {
- return bitgen_state->next_uint64(bitgen_state->state);
-}
-
-static NPY_INLINE float next_float(bitgen_t *bitgen_state) {
- return (next_uint32(bitgen_state) >> 9) * (1.0f / 8388608.0f);
-}
-
-static NPY_INLINE double next_double(bitgen_t *bitgen_state) {
- return bitgen_state->next_double(bitgen_state->state);
-}
-
-DECLDIR double loggam(double x);
-
-DECLDIR float random_float(bitgen_t *bitgen_state);
-DECLDIR double random_double(bitgen_t *bitgen_state);
-DECLDIR void random_double_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out);
-
-DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state);
-DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state);
-DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state);
-DECLDIR uint64_t random_uint(bitgen_t *bitgen_state);
-
-DECLDIR double random_standard_exponential(bitgen_t *bitgen_state);
-DECLDIR void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt,
- double *out);
-DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state);
-DECLDIR double random_standard_exponential_zig(bitgen_t *bitgen_state);
-DECLDIR void random_standard_exponential_zig_fill(bitgen_t *bitgen_state,
- npy_intp cnt, double *out);
-DECLDIR float random_standard_exponential_zig_f(bitgen_t *bitgen_state);
-
-/*
-DECLDIR double random_gauss(bitgen_t *bitgen_state);
-DECLDIR float random_gauss_f(bitgen_t *bitgen_state);
-*/
-DECLDIR double random_gauss_zig(bitgen_t *bitgen_state);
-DECLDIR float random_gauss_zig_f(bitgen_t *bitgen_state);
-DECLDIR void random_gauss_zig_fill(bitgen_t *bitgen_state, npy_intp cnt,
- double *out);
-
-/*
-DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape);
-DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape);
-*/
-DECLDIR double random_standard_gamma_zig(bitgen_t *bitgen_state, double shape);
-DECLDIR float random_standard_gamma_zig_f(bitgen_t *bitgen_state, float shape);
-
-/*
-DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale);
-*/
-DECLDIR double random_normal_zig(bitgen_t *bitgen_state, double loc, double scale);
-
-DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale);
-DECLDIR float random_gamma_float(bitgen_t *bitgen_state, float shape, float scale);
-
-DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale);
-DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range);
-DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b);
-DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df);
-DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden);
-DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state);
-DECLDIR double random_pareto(bitgen_t *bitgen_state, double a);
-DECLDIR double random_weibull(bitgen_t *bitgen_state, double a);
-DECLDIR double random_power(bitgen_t *bitgen_state, double a);
-DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale);
-DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale);
-DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale);
-DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma);
-DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode);
-DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df);
-DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
- double nonc);
-DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
- double dfden, double nonc);
-DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale);
-DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa);
-DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode,
- double right);
-
-DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam);
-DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n,
- double p);
-
-DECLDIR RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state,
- RAND_INT_TYPE n,
- double p,
- binomial_t *binomial);
-DECLDIR RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
- RAND_INT_TYPE n,
- double p,
- binomial_t *binomial);
-DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p,
- int64_t n, binomial_t *binomial);
-
-DECLDIR RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p);
-DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p);
-DECLDIR RAND_INT_TYPE random_geometric_inversion(bitgen_t *bitgen_state, double p);
-DECLDIR RAND_INT_TYPE random_geometric(bitgen_t *bitgen_state, double p);
-DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a);
-DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state,
- int64_t good, int64_t bad, int64_t sample);
-
-DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max);
-
-/* Generate random uint64 numbers in closed interval [off, off + rng]. */
-DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off,
- uint64_t rng, uint64_t mask,
- bool use_masked);
-
-/* Generate random uint32 numbers in closed interval [off, off + rng]. */
-DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
- uint32_t off, uint32_t rng,
- uint32_t mask, bool use_masked,
- int *bcnt, uint32_t *buf);
-DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
- uint16_t off, uint16_t rng,
- uint16_t mask, bool use_masked,
- int *bcnt, uint32_t *buf);
-DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off,
- uint8_t rng, uint8_t mask,
- bool use_masked, int *bcnt,
- uint32_t *buf);
-DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off,
- npy_bool rng, npy_bool mask,
- bool use_masked, int *bcnt,
- uint32_t *buf);
-
-DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off,
- uint64_t rng, npy_intp cnt,
- bool use_masked, uint64_t *out);
-DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off,
- uint32_t rng, npy_intp cnt,
- bool use_masked, uint32_t *out);
-DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off,
- uint16_t rng, npy_intp cnt,
- bool use_masked, uint16_t *out);
-DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off,
- uint8_t rng, npy_intp cnt,
- bool use_masked, uint8_t *out);
-DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off,
- npy_bool rng, npy_intp cnt,
- bool use_masked, npy_bool *out);
-
-DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix,
- double *pix, npy_intp d, binomial_t *binomial);
-
-#endif
-#include <stdint.h>
-#include "distributions.h"
+#include "numpy/random/distributions.h"
#include "logfactorial.h"
+#include <stdint.h>
/*
* Generate a sample from the hypergeometric distribution.
while (1) {
double U, V, X, T;
double gp;
- U = random_double(bitgen_state);
- V = random_double(bitgen_state); // "U star" in Stadlober (1989)
+ U = next_double(bitgen_state);
+ V = next_double(bitgen_state); // "U star" in Stadlober (1989)
X = a + h*(V - 0.5) / U;
// fast rejection:
--- /dev/null
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+#include "numpy/random/distributions.h"
+
+/*
+ * random_multivariate_hypergeometric_count
+ *
+ * Draw variates from the multivariate hypergeometric distribution--
+ * the "count" algorithm.
+ *
+ * Parameters
+ * ----------
+ * bitgen_t *bitgen_state
+ * Pointer to a `bitgen_t` instance.
+ * int64_t total
+ * The sum of the values in the array `colors`. (This is redundant
+ * information, but we know the caller has already computed it, so
+ * we might as well use it.)
+ * size_t num_colors
+ * The length of the `colors` array.
+ * int64_t *colors
+ * The array of colors (i.e. the number of each type in the collection
+ * from which the random variate is drawn).
+ * int64_t nsample
+ * The number of objects drawn without replacement for each variate.
+ * `nsample` must not exceed sum(colors). This condition is not checked;
+ * it is assumed that the caller has already validated the value.
+ * size_t num_variates
+ * The number of variates to be produced and put in the array
+ * pointed to by `variates`. One variate is a vector of length
+ * `num_colors`, so the array pointed to by `variates` must have length
+ * `num_variates * num_colors`.
+ * int64_t *variates
+ * The array that will hold the result. It must have length
+ * `num_variates * num_colors`.
+ * The array is not initialized in the function; it is expected that the
+ * array has been initialized with zeros when the function is called.
+ *
+ * Notes
+ * -----
+ * The "count" algorithm for drawing one variate is roughly equivalent to the
+ * following numpy code:
+ *
+ * choices = np.repeat(np.arange(len(colors)), colors)
+ * selection = np.random.choice(choices, nsample, replace=False)
+ * variate = np.bincount(selection, minlength=len(colors))
+ *
+ * This function uses a temporary array with length sum(colors).
+ *
+ * Assumptions on the arguments (not checked in the function):
+ * * colors[k] >= 0 for k in range(num_colors)
+ * * total = sum(colors)
+ * * 0 <= nsample <= total
+ * * the product total * sizeof(size_t) does not exceed SIZE_MAX
+ * * the product num_variates * num_colors does not overflow
+ */
+
+int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates)
+{
+ size_t *choices;
+ bool more_than_half;
+
+ if ((total == 0) || (nsample == 0) || (num_variates == 0)) {
+ // Nothing to do.
+ return 0;
+ }
+
+ choices = malloc(total * (sizeof *choices));
+ if (choices == NULL) {
+ return -1;
+ }
+
+ /*
+ * If colors contains, for example, [3 2 5], then choices
+ * will contain [0 0 0 1 1 2 2 2 2 2].
+ */
+ for (size_t i = 0, k = 0; i < num_colors; ++i) {
+ for (int64_t j = 0; j < colors[i]; ++j) {
+ choices[k] = i;
+ ++k;
+ }
+ }
+
+ more_than_half = nsample > (total / 2);
+ if (more_than_half) {
+ nsample = total - nsample;
+ }
+
+ for (size_t i = 0; i < num_variates * num_colors; i += num_colors) {
+ /*
+ * Fisher-Yates shuffle, but only loop through the first
+ * `nsample` entries of `choices`. After the loop,
+ * choices[:nsample] contains a random sample from the
+ * the full array.
+ */
+ for (size_t j = 0; j < (size_t) nsample; ++j) {
+ size_t tmp, k;
+ // Note: nsample is not greater than total, so there is no danger
+ // of integer underflow in `(size_t) total - j - 1`.
+ k = j + (size_t) random_interval(bitgen_state,
+ (size_t) total - j - 1);
+ tmp = choices[k];
+ choices[k] = choices[j];
+ choices[j] = tmp;
+ }
+ /*
+ * Count the number of occurrences of each value in choices[:nsample].
+ * The result, stored in sample[i:i+num_colors], is the sample from
+ * the multivariate hypergeometric distribution.
+ */
+ for (size_t j = 0; j < (size_t) nsample; ++j) {
+ variates[i + choices[j]] += 1;
+ }
+
+ if (more_than_half) {
+ for (size_t k = 0; k < num_colors; ++k) {
+ variates[i + k] = colors[k] - variates[i + k];
+ }
+ }
+ }
+
+ free(choices);
+
+ return 0;
+}
--- /dev/null
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <math.h>
+
+#include "numpy/random/distributions.h"
+#include "logfactorial.h"
+
+
+/*
+ * random_multivariate_hypergeometric_marginals
+ *
+ * Draw samples from the multivariate hypergeometric distribution--
+ * the "marginals" algorithm.
+ *
+ * This version generates the sample by iteratively calling
+ * hypergeometric() (the univariate hypergeometric distribution).
+ *
+ * Parameters
+ * ----------
+ * bitgen_t *bitgen_state
+ * Pointer to a `bitgen_t` instance.
+ * int64_t total
+ * The sum of the values in the array `colors`. (This is redundant
+ * information, but we know the caller has already computed it, so
+ * we might as well use it.)
+ * size_t num_colors
+ * The length of the `colors` array. The functions assumes
+ * num_colors > 0.
+ * int64_t *colors
+ * The array of colors (i.e. the number of each type in the collection
+ * from which the random variate is drawn).
+ * int64_t nsample
+ * The number of objects drawn without replacement for each variate.
+ * `nsample` must not exceed sum(colors). This condition is not checked;
+ * it is assumed that the caller has already validated the value.
+ * size_t num_variates
+ * The number of variates to be produced and put in the array
+ * pointed to by `variates`. One variate is a vector of length
+ * `num_colors`, so the array pointed to by `variates` must have length
+ * `num_variates * num_colors`.
+ * int64_t *variates
+ * The array that will hold the result. It must have length
+ * `num_variates * num_colors`.
+ * The array is not initialized in the function; it is expected that the
+ * array has been initialized with zeros when the function is called.
+ *
+ * Notes
+ * -----
+ * Here's an example that demonstrates the idea of this algorithm.
+ *
+ * Suppose the urn contains red, green, blue and yellow marbles.
+ * Let nred be the number of red marbles, and define the quantities for
+ * the other colors similarly. The total number of marbles is
+ *
+ * total = nred + ngreen + nblue + nyellow.
+ *
+ * To generate a sample using rk_hypergeometric:
+ *
+ * red_sample = hypergeometric(ngood=nred, nbad=total - nred,
+ * nsample=nsample)
+ *
+ * This gives us the number of red marbles in the sample. The number of
+ * marbles in the sample that are *not* red is nsample - red_sample.
+ * To figure out the distribution of those marbles, we again use
+ * rk_hypergeometric:
+ *
+ * green_sample = hypergeometric(ngood=ngreen,
+ * nbad=total - nred - ngreen,
+ * nsample=nsample - red_sample)
+ *
+ * Similarly,
+ *
+ * blue_sample = hypergeometric(
+ * ngood=nblue,
+ * nbad=total - nred - ngreen - nblue,
+ * nsample=nsample - red_sample - green_sample)
+ *
+ * Finally,
+ *
+ * yellow_sample = total - (red_sample + green_sample + blue_sample).
+ *
+ * The above sequence of steps is implemented as a loop for an arbitrary
+ * number of colors in the innermost loop in the code below. `remaining`
+ * is the value passed to `nbad`; it is `total - colors[0]` in the first
+ * call to random_hypergeometric(), and then decreases by `colors[j]` in
+ * each iteration. `num_to_sample` is the `nsample` argument. It
+ * starts at this function's `nsample` input, and is decreased by the
+ * result of the call to random_hypergeometric() in each iteration.
+ *
+ * Assumptions on the arguments (not checked in the function):
+ * * colors[k] >= 0 for k in range(num_colors)
+ * * total = sum(colors)
+ * * 0 <= nsample <= total
+ * * the product num_variates * num_colors does not overflow
+ */
+
+void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates)
+{
+ bool more_than_half;
+
+ if ((total == 0) || (nsample == 0) || (num_variates == 0)) {
+ // Nothing to do.
+ return;
+ }
+
+ more_than_half = nsample > (total / 2);
+ if (more_than_half) {
+ nsample = total - nsample;
+ }
+
+ for (size_t i = 0; i < num_variates * num_colors; i += num_colors) {
+ int64_t num_to_sample = nsample;
+ int64_t remaining = total;
+ for (size_t j = 0; (num_to_sample > 0) && (j + 1 < num_colors); ++j) {
+ int64_t r;
+ remaining -= colors[j];
+ r = random_hypergeometric(bitgen_state,
+ colors[j], remaining, num_to_sample);
+ variates[i + j] = r;
+ num_to_sample -= r;
+ }
+
+ if (num_to_sample > 0) {
+ variates[i + num_colors - 1] = num_to_sample;
+ }
+
+ if (more_than_half) {
+ for (size_t k = 0; k < num_colors; ++k) {
+ variates[i + k] = colors[k] - variates[i + k];
+ }
+ }
+ }
+}
-#include "legacy-distributions.h"
+#include "include/legacy-distributions.h"
static NPY_INLINE double legacy_double(aug_bitgen_t *aug_state) {
d7 = sqrt((double)(popsize - m) * sample * d4 * d5 / (popsize - 1) + 0.5);
d8 = D1 * d7 + D2;
d9 = (RAND_INT_TYPE)floor((double)(m + 1) * (mingoodbad + 1) / (popsize + 2));
- d10 = (loggam(d9 + 1) + loggam(mingoodbad - d9 + 1) + loggam(m - d9 + 1) +
- loggam(maxgoodbad - m + d9 + 1));
+ d10 = (random_loggam(d9 + 1) + random_loggam(mingoodbad - d9 + 1) +
+ random_loggam(m - d9 + 1) + random_loggam(maxgoodbad - m + d9 + 1));
d11 = MIN(MIN(m, mingoodbad) + 1.0, floor(d6 + 16 * d7));
/* 16 for 16-decimal-digit precision in D1 and D2 */
continue;
Z = (RAND_INT_TYPE)floor(W);
- T = d10 - (loggam(Z + 1) + loggam(mingoodbad - Z + 1) + loggam(m - Z + 1) +
- loggam(maxgoodbad - m + Z + 1));
+ T = d10 - (random_loggam(Z + 1) + random_loggam(mingoodbad - Z + 1) +
+ random_loggam(m - Z + 1) + random_loggam(maxgoodbad - m + Z + 1));
/* fast acceptance: */
if ((X * (4.0 - X) - 3.0) <= T)
+++ /dev/null
-#ifndef _RANDOMDGEN__DISTRIBUTIONS_LEGACY_H_
-#define _RANDOMDGEN__DISTRIBUTIONS_LEGACY_H_
-
-
-#include "../distributions/distributions.h"
-
-typedef struct aug_bitgen {
- bitgen_t *bit_generator;
- int has_gauss;
- double gauss;
-} aug_bitgen_t;
-
-extern double legacy_gauss(aug_bitgen_t *aug_state);
-extern double legacy_standard_exponential(aug_bitgen_t *aug_state);
-extern double legacy_pareto(aug_bitgen_t *aug_state, double a);
-extern double legacy_weibull(aug_bitgen_t *aug_state, double a);
-extern double legacy_power(aug_bitgen_t *aug_state, double a);
-extern double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale);
-extern double legacy_pareto(aug_bitgen_t *aug_state, double a);
-extern double legacy_weibull(aug_bitgen_t *aug_state, double a);
-extern double legacy_chisquare(aug_bitgen_t *aug_state, double df);
-extern double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df,
- double nonc);
-
-extern double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum,
- double dfden, double nonc);
-extern double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale);
-extern double legacy_lognormal(aug_bitgen_t *aug_state, double mean,
- double sigma);
-extern double legacy_standard_t(aug_bitgen_t *aug_state, double df);
-extern int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n,
- double p);
-extern double legacy_standard_cauchy(aug_bitgen_t *state);
-extern double legacy_beta(aug_bitgen_t *aug_state, double a, double b);
-extern double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden);
-extern double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale);
-extern double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape);
-extern double legacy_exponential(aug_bitgen_t *aug_state, double scale);
-extern int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p,
- int64_t n, binomial_t *binomial);
-extern int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n,
- double p);
-extern int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state,
- int64_t good, int64_t bad,
- int64_t sample);
-extern int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p);
-extern int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam);
-extern int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a);
-extern int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p);
-void legacy_random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n,
- RAND_INT_TYPE *mnix, double *pix, npy_intp d,
- binomial_t *binomial);
-
-#endif
#ifndef _RANDOMDGEN__PHILOX_H_
#define _RANDOMDGEN__PHILOX_H_
-#include <inttypes.h>
#include "numpy/npy_common.h"
+#include <inttypes.h>
#define PHILOX_BUFFER_SIZE 4L
#ifndef _RANDOMDGEN__SFC64_H_
#define _RANDOMDGEN__SFC64_H_
+#include "numpy/npy_common.h"
#include <inttypes.h>
#ifdef _WIN32
#include <stdlib.h>
#endif
-#include "numpy/npy_common.h"
typedef struct s_sfc64_state {
uint64_t s[4];
import os
from os.path import join
+import sys
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_array_equal,
Generator, MT19937, PCG64, Philox, RandomState, SeedSequence, SFC64,
default_rng
)
-from numpy.random.common import interface
+from numpy.random._common import interface
try:
import cffi # noqa: F401
except ImportError:
MISSING_CTYPES = False
+if sys.flags.optimize > 1:
+ # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
+ # cffi cannot succeed
+ MISSING_CFFI = True
+
+
pwd = os.path.dirname(os.path.abspath(__file__))
return gauss[:n]
def test_seedsequence():
- from numpy.random.bit_generator import (ISeedSequence,
+ from numpy.random._bit_generator import (ISeedSequence,
ISpawnableSeedSequence,
SeedlessSeedSequence)
--- /dev/null
+import os, sys
+import pytest
+import warnings
+
+try:
+ import cffi
+except ImportError:
+ cffi = None
+
+if sys.flags.optimize > 1:
+ # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
+ # cffi cannot succeed
+ cffi = None
+
+try:
+ with warnings.catch_warnings(record=True) as w:
+ # numba issue gh-4733
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ import numba
+except ImportError:
+ numba = None
+
+try:
+ import cython
+except ImportError:
+ cython = None
+
+@pytest.mark.skipif(cython is None, reason="requires cython")
+def test_cython():
+ curdir = os.getcwd()
+ argv = sys.argv
+ examples = (os.path.dirname(__file__), '..', '_examples')
+ try:
+ os.chdir(os.path.join(*examples))
+ sys.argv = argv[:1] + ['build']
+ with warnings.catch_warnings(record=True) as w:
+ # setuptools issue gh-1885
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ from numpy.random._examples.cython import setup
+ finally:
+ sys.argv = argv
+ os.chdir(curdir)
+
+@pytest.mark.skipif(numba is None or cffi is None,
+ reason="requires numba and cffi")
+def test_numba():
+ from numpy.random._examples.numba import extending
+
+@pytest.mark.skipif(cffi is None, reason="requires cffi")
+def test_cffi():
+ from numpy.random._examples.cffi import extending
import pytest
import numpy as np
+from numpy.dual import cholesky, eigh, svd
+from numpy.linalg import LinAlgError
from numpy.testing import (
- assert_, assert_raises, assert_equal,
+ assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
assert_array_equal(non_contig, contig)
+class TestMultivariateHypergeometric(object):
+
+ def setup(self):
+ self.seed = 8675309
+
+ def test_argument_validation(self):
+ # Error cases...
+
+ # `colors` must be a 1-d sequence
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ 10, 4)
+
+ # Negative nsample
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [2, 3, 4], -1)
+
+ # Negative color
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [-1, 2, 3], 2)
+
+ # nsample exceeds sum(colors)
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [2, 3, 4], 10)
+
+ # nsample exceeds sum(colors) (edge case of empty colors)
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [], 1)
+
+ # Validation errors associated with very large values in colors.
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [999999999, 101], 5, 1, 'marginals')
+
+ int64_info = np.iinfo(np.int64)
+ max_int64 = int64_info.max
+ max_int64_index = max_int64 // int64_info.dtype.itemsize
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [max_int64_index - 100, 101], 5, 1, 'count')
+
+ @pytest.mark.parametrize('method', ['count', 'marginals'])
+ def test_edge_cases(self, method):
+ # Set the seed, but in fact, all the results in this test are
+ # deterministic, so we don't really need this.
+ random = Generator(MT19937(self.seed))
+
+ x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
+ assert_array_equal(x, [0, 0, 0])
+
+ x = random.multivariate_hypergeometric([], 0, method=method)
+ assert_array_equal(x, [])
+
+ x = random.multivariate_hypergeometric([], 0, size=1, method=method)
+ assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
+
+ x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
+ assert_array_equal(x, [0, 0, 0])
+
+ x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
+ assert_array_equal(x, [3, 0, 0])
+
+ colors = [1, 1, 0, 1, 1]
+ x = random.multivariate_hypergeometric(colors, sum(colors),
+ method=method)
+ assert_array_equal(x, colors)
+
+ x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
+ method=method)
+ assert_array_equal(x, [[3, 4, 5]]*3)
+
+ # Cases for nsample:
+ # nsample < 10
+ # 10 <= nsample < colors.sum()/2
+ # colors.sum()/2 < nsample < colors.sum() - 10
+ # colors.sum() - 10 < nsample < colors.sum()
+ @pytest.mark.parametrize('nsample', [8, 25, 45, 55])
+ @pytest.mark.parametrize('method', ['count', 'marginals'])
+ @pytest.mark.parametrize('size', [5, (2, 3), 150000])
+ def test_typical_cases(self, nsample, method, size):
+ random = Generator(MT19937(self.seed))
+
+ colors = np.array([10, 5, 20, 25])
+ sample = random.multivariate_hypergeometric(colors, nsample, size,
+ method=method)
+ if isinstance(size, int):
+ expected_shape = (size,) + colors.shape
+ else:
+ expected_shape = size + colors.shape
+ assert_equal(sample.shape, expected_shape)
+ assert_((sample >= 0).all())
+ assert_((sample <= colors).all())
+ assert_array_equal(sample.sum(axis=-1),
+ np.full(size, fill_value=nsample, dtype=int))
+ if isinstance(size, int) and size >= 100000:
+ # This sample is large enough to compare its mean to
+ # the expected values.
+ assert_allclose(sample.mean(axis=0),
+ nsample * colors / colors.sum(),
+ rtol=1e-3, atol=0.005)
+
+ def test_repeatability1(self):
+ random = Generator(MT19937(self.seed))
+ sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
+ method='count')
+ expected = np.array([[2, 1, 2],
+ [2, 1, 2],
+ [1, 1, 3],
+ [2, 0, 3],
+ [2, 1, 2]])
+ assert_array_equal(sample, expected)
+
+ def test_repeatability2(self):
+ random = Generator(MT19937(self.seed))
+ sample = random.multivariate_hypergeometric([20, 30, 50], 50,
+ size=5,
+ method='marginals')
+ expected = np.array([[ 9, 17, 24],
+ [ 7, 13, 30],
+ [ 9, 15, 26],
+ [ 9, 17, 24],
+ [12, 14, 24]])
+ assert_array_equal(sample, expected)
+
+ def test_repeatability3(self):
+ random = Generator(MT19937(self.seed))
+ sample = random.multivariate_hypergeometric([20, 30, 50], 12,
+ size=5,
+ method='marginals')
+ expected = np.array([[2, 3, 7],
+ [5, 3, 4],
+ [2, 5, 5],
+ [5, 3, 4],
+ [1, 5, 6]])
+ assert_array_equal(sample, expected)
+
+
class TestSetState(object):
def setup(self):
self.seed = 1234567890
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
- lbnd = 0 if dt in (np.bool, bool, np.bool_) else np.iinfo(dt).min
- ubnd = 2 if dt in (
- np.bool, bool, np.bool_) else np.iinfo(dt).max + 1
+ lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
+ ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
- low_o = np.array([[low]*10], dtype=np.object)
- high_o = np.array([high] * 10, dtype=np.object)
+ low_o = np.array([[low]*10], dtype=object)
+ high_o = np.array([high] * 10, dtype=object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
- for dt in (bool, int, np.long):
+ for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
+ def test_shuffle_custom_axis(self):
+ random = Generator(MT19937(self.seed))
+ actual = np.arange(16).reshape((4, 4))
+ random.shuffle(actual, axis=1)
+ desired = np.array([[ 0, 3, 1, 2],
+ [ 4, 7, 5, 6],
+ [ 8, 11, 9, 10],
+ [12, 15, 13, 14]])
+ assert_array_equal(actual, desired)
+ random = Generator(MT19937(self.seed))
+ actual = np.arange(16).reshape((4, 4))
+ random.shuffle(actual, axis=-1)
+ assert_array_equal(actual, desired)
+
+ def test_shuffle_axis_nonsquare(self):
+ y1 = np.arange(20).reshape(2, 10)
+ y2 = y1.copy()
+ random = Generator(MT19937(self.seed))
+ random.shuffle(y1, axis=1)
+ random = Generator(MT19937(self.seed))
+ random.shuffle(y2.T)
+ assert_array_equal(y1, y2)
+
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
+ def test_shuffle_exceptions(self):
+ random = Generator(MT19937(self.seed))
+ arr = np.arange(10)
+ assert_raises(np.AxisError, random.shuffle, arr, 1)
+ arr = np.arange(9).reshape((3, 3))
+ assert_raises(np.AxisError, random.shuffle, arr, 3)
+ assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
+ arr = [[1, 2, 3], [4, 5, 6]]
+ assert_raises(NotImplementedError, random.shuffle, arr, 1)
+
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
+ def test_permutation_custom_axis(self):
+ a = np.arange(16).reshape((4, 4))
+ desired = np.array([[ 0, 3, 1, 2],
+ [ 4, 7, 5, 6],
+ [ 8, 11, 9, 10],
+ [12, 15, 13, 14]])
+ random = Generator(MT19937(self.seed))
+ actual = random.permutation(a, axis=1)
+ assert_array_equal(actual, desired)
+ random = Generator(MT19937(self.seed))
+ actual = random.permutation(a, axis=-1)
+ assert_array_equal(actual, desired)
+
+ def test_permutation_exceptions(self):
+ random = Generator(MT19937(self.seed))
+ arr = np.arange(10)
+ assert_raises(np.AxisError, random.permutation, arr, 1)
+ arr = np.arange(9).reshape((3, 3))
+ assert_raises(np.AxisError, random.permutation, arr, 3)
+ assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
+
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
- def test_multivariate_normal(self):
+ @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
+ def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
- actual = random.multivariate_normal(mean, cov, size)
+ actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
- actual = random.multivariate_normal(mean, cov)
+ actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
+ # Check that non symmetric covariance input raises exception when
+ # check_valid='raises' if using default svd method.
+ mean = [0, 0]
+ cov = [[1, 2], [1, 2]]
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise')
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
- mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
+ assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
+ method='eigh')
+ assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
+ method='cholesky')
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise', method='eigh')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
- random.multivariate_normal(mean, cov)
+ random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
- itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
+ itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
- for dt in (bool, int, np.long):
+ for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
suppress_warnings
)
-from numpy.random import MT19937, PCG64, mtrand as random
+from numpy.random import MT19937, PCG64
+from numpy import random
INT_FUNCS = {'binomial': (100.0, 0.6),
'geometric': (.5,),
new_state = ('Unknown', ) + state[1:]
assert_raises(ValueError, self.random_state.set_state, new_state)
assert_raises(TypeError, self.random_state.set_state,
- np.array(new_state, dtype=np.object))
+ np.array(new_state, dtype=object))
state = self.random_state.get_state(legacy=False)
del state['bit_generator']
assert_raises(ValueError, self.random_state.set_state, state)
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
- for dt in (bool, int, np.long):
+ for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
random.seed(self.seed)
rs = random.RandomState(self.seed)
actual = rs.tomaxint(size=(3, 2))
- if np.iinfo(np.int).max == 2147483647:
+ if np.iinfo(int).max == 2147483647:
desired = np.array([[1328851649, 731237375],
[1270502067, 320041495],
[1908433478, 499156889]], dtype=np.int64)
from numpy.compat import long
import numpy as np
-from numpy.random import mtrand as random
+from numpy import random
class TestRegression(object):
import numpy as np
from numpy.testing import assert_array_equal
-from numpy.random.bit_generator import SeedSequence
+from numpy.random import SeedSequence
def test_reference_data():
from numpy.random import (Generator, MT19937, PCG64, Philox, SFC64)
@pytest.fixture(scope='module',
- params=(np.bool, np.int8, np.int16, np.int32, np.int64,
+ params=(np.bool_, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64))
def dtype(request):
return request.param
rg.standard_gamma(1.0, out=existing[::3])
def test_integers_broadcast(self, dtype):
- if dtype == np.bool:
+ if dtype == np.bool_:
upper = 2
lower = 0
else:
assert_equal(a, c)
self._reset_state()
d = self.rg.integers(np.array(
- [lower] * 10), np.array([upper], dtype=np.object), size=10,
+ [lower] * 10), np.array([upper], dtype=object), size=10,
dtype=dtype)
assert_equal(a, d)
self._reset_state()
assert out.shape == (1,)
def test_integers_broadcast_errors(self, dtype):
- if dtype == np.bool:
+ if dtype == np.bool_:
upper = 2
lower = 0
else:
from unittest import TestCase
-PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
-if PY3:
+if PY2:
+ from types import InstanceType
+ lzip = zip
+ text_type = unicode
+ bytes_type = str
+ string_types = basestring,
+ def make_method(func, instance, type):
+ return MethodType(func, instance, type)
+else:
# Python 3 doesn't have an InstanceType, so just use a dummy type.
class InstanceType():
pass
if instance is None:
return func
return MethodType(func, instance)
-else:
- from types import InstanceType
- lzip = zip
- text_type = unicode
- bytes_type = str
- string_types = basestring,
- def make_method(func, instance, type):
- return MethodType(func, instance, type)
_param = namedtuple("param", "args kwargs")
from numpy.core import(
intp, float32, empty, arange, array_repr, ndarray, isnat, array)
-from numpy.lib.utils import deprecate
+import numpy.__config__
if sys.version_info[0] >= 3:
from io import StringIO
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
- 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
+ 'raises', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
'_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles',
- 'break_cycles',
+ 'break_cycles', 'HAS_LAPACK64'
]
IS_PYPY = platform.python_implementation() == 'PyPy'
HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
+HAS_LAPACK64 = hasattr(numpy.__config__, 'lapack_ilp64_opt_info')
def import_nose():
return st
-@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
- "Use numpy.random.rand instead.")
-def rand(*args):
- """Returns an array of random numbers with the given shape.
-
- This only uses the standard library, so it is useful for testing purposes.
- """
- import random
- from numpy.core import zeros, float64
- results = zeros(args, float64)
- f = results.flat
- for i in range(len(f)):
- f[i] = random.random()
- return results
-
-
if os.name == 'nt':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance=None,
gc.collect()
# one more, just to make sure
gc.collect()
+
+
+def requires_memory(free_bytes):
+ """Decorator to skip a test if not enough memory is available"""
+ import pytest
+
+ def decorator(func):
+ @wraps(func)
+ def wrapper(*a, **kw):
+ msg = check_free_memory(free_bytes)
+ if msg is not None:
+ pytest.skip(msg)
+
+ try:
+ return func(*a, **kw)
+ except MemoryError:
+ # Probably ran out of memory regardless: don't regard as failure
+ pytest.xfail("MemoryError raised")
+
+ return wrapper
+
+ return decorator
+
+
+def check_free_memory(free_bytes):
+ """
+ Check whether `free_bytes` amount of memory is currently free.
+ Returns: None if enough memory available, otherwise error message
+ """
+ env_var = 'NPY_AVAILABLE_MEM'
+ env_value = os.environ.get(env_var)
+ if env_value is not None:
+ try:
+ mem_free = _parse_size(env_value)
+ except ValueError as exc:
+ raise ValueError('Invalid environment variable {}: {!s}'.format(
+ env_var, exc))
+
+ msg = ('{0} GB memory required, but environment variable '
+ 'NPY_AVAILABLE_MEM={1} set'.format(
+ free_bytes/1e9, env_value))
+ else:
+ mem_free = _get_mem_available()
+
+ if mem_free is None:
+ msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM "
+ "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
+ "the test.")
+ mem_free = -1
+ else:
+ msg = '{0} GB memory required, but {1} GB available'.format(
+ free_bytes/1e9, mem_free/1e9)
+
+ return msg if mem_free < free_bytes else None
+
+
+def _parse_size(size_str):
+ """Convert memory size strings ('12 GB' etc.) to float"""
+ suffixes = {'': 1, 'b': 1,
+ 'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4,
+ 'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4,
+ 'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4}
+
+ size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format(
+ '|'.join(suffixes.keys())), re.I)
+
+ m = size_re.match(size_str.lower())
+ if not m or m.group(2) not in suffixes:
+ raise ValueError("value {!r} not a valid size".format(size_str))
+ return int(float(m.group(1)) * suffixes[m.group(2)])
+
+
+def _get_mem_available():
+ """Return available memory in bytes, or None if unknown."""
+ try:
+ import psutil
+ return psutil.virtual_memory().available
+ except (ImportError, AttributeError):
+ pass
+
+ if sys.platform.startswith('linux'):
+ info = {}
+ with open('/proc/meminfo', 'r') as f:
+ for line in f:
+ p = line.split()
+ info[p[0].strip(':').lower()] = int(p[1]) * 1024
+
+ if 'memavailable' in info:
+ # Linux >= 3.14
+ return info['memavailable']
+ else:
+ return info['memfree'] + info['cached']
+
+ return None
+++ /dev/null
-"""
-Back compatibility decorators module. It will import the appropriate
-set of tools
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
-
-# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.decorators is deprecated "
- "since numpy 1.15.0, import from numpy.testing instead.",
- DeprecationWarning, stacklevel=2)
-
-from ._private.decorators import *
+++ /dev/null
-"""
-Back compatibility noseclasses module. It will import the appropriate
-set of tools
-"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
-
-# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.noseclasses is deprecated "
- "since 1.15.0, import from numpy.testing instead",
- DeprecationWarning, stacklevel=2)
-
-from ._private.noseclasses import *
+++ /dev/null
-"""
-Back compatibility nosetester module. It will import the appropriate
-set of tools
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
-
-# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.nosetester is deprecated "
- "since 1.15.0, import from numpy.testing instead.",
- DeprecationWarning, stacklevel=2)
-
-from ._private.nosetester import *
-
-__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
- '_numpy_tester', 'get_package_name', 'import_nose',
- 'suppress_warnings']
print(char, end=' ')
print()
-print("can cast")
-print_cancast_table(np.typecodes['All'])
-print()
-print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
-print()
-print("scalar + scalar")
-print_coercion_table(np.typecodes['All'], 0, 0, False)
-print()
-print("scalar + neg scalar")
-print_coercion_table(np.typecodes['All'], 0, -1, False)
-print()
-print("array + scalar")
-print_coercion_table(np.typecodes['All'], 0, 0, True)
-print()
-print("array + neg scalar")
-print_coercion_table(np.typecodes['All'], 0, -1, True)
-print()
-print("promote_types")
-print_coercion_table(np.typecodes['All'], 0, 0, False, True)
+
+if __name__ == '__main__':
+ print("can cast")
+ print_cancast_table(np.typecodes['All'])
+ print()
+ print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
+ print()
+ print("scalar + scalar")
+ print_coercion_table(np.typecodes['All'], 0, 0, False)
+ print()
+ print("scalar + neg scalar")
+ print_coercion_table(np.typecodes['All'], 0, -1, False)
+ print()
+ print("array + scalar")
+ print_coercion_table(np.typecodes['All'], 0, 0, True)
+ print()
+ print("array + neg scalar")
+ print_coercion_table(np.typecodes['All'], 0, -1, True)
+ print()
+ print("promote_types")
+ print_coercion_table(np.typecodes['All'], 0, 0, False, True)
for t in ['S1', 'U1']:
foo(t)
- def test_0_ndim_array(self):
- x = np.array(473963742225900817127911193656584771)
- y = np.array(18535119325151578301457182298393896)
- assert_raises(AssertionError, self._assert_func, x, y)
-
- y = x
- self._assert_func(x, y)
-
- x = np.array(43)
- y = np.array(10)
- assert_raises(AssertionError, self._assert_func, x, y)
-
- y = x
- self._assert_func(x, y)
-
def test_generic_rank3(self):
"""Test rank 3 array for all dtypes."""
def foo(t):
def setup(self):
self._assert_func = assert_approx_equal
- def test_simple_0d_arrays(self):
- x = np.array(1234.22)
- y = np.array(1234.23)
+ def test_simple_arrays(self):
+ x = np.array([1234.22])
+ y = np.array([1234.23])
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
import warnings
-# 2018-04-04, numpy 1.15.0
+# 2018-04-04, numpy 1.15.0 ImportWarning
+# 2019-09-18, numpy 1.18.0 DeprecatonWarning (changed)
warnings.warn("Importing from numpy.testing.utils is deprecated "
"since 1.15.0, import from numpy.testing instead.",
- ImportWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=2)
from ._private.utils import *
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
- 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
+ 'raises', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
from __future__ import division, absolute_import, print_function
import sys
+import subprocess
+import pkgutil
+import types
+import importlib
+import warnings
import numpy as np
+import numpy
import pytest
+
try:
import ctypes
except ImportError:
ctypes = None
+
def check_dir(module, module_name=None):
"""Returns a mapping of all objects with the wrong __module__ attribute."""
if module_name is None:
sys.version_info[0] < 3,
reason="NumPy exposes slightly different functions on Python 2")
def test_numpy_namespace():
- # None of these objects are publicly documented.
+ # None of these objects are publicly documented to be part of the main
+ # NumPy namespace (some are useful though, others need to be cleaned up)
undocumented = {
'Tester': 'numpy.testing._private.nosetester.NoseTester',
'_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
assert bad_results == whitelist
+@pytest.mark.parametrize('name', ['testing', 'Tester'])
+def test_import_lazy_import(name):
+ """Make sure we can actually use the modules we lazy load.
+
+ While not exported as part of the public API, it was accessible. With the
+ use of __getattr__ and __dir__, this isn't always true It can happen that
+ an infinite recursion may happen.
+
+ This is the only way I found that would force the failure to appear on the
+ badly implemented code.
+
+ We also test for the presence of the lazily imported modules in dir
+
+ """
+ exe = (sys.executable, '-c', "import numpy; numpy." + name)
+ result = subprocess.check_output(exe)
+ assert not result
+
+ # Make sure they are still in the __dir__
+ assert name in dir(np)
+
+
def test_numpy_linalg():
bad_results = check_dir(np.linalg)
assert bad_results == {}
bad_results = check_dir(np.fft)
assert bad_results == {}
+
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available in this python")
def test_NPY_NO_EXPORT():
f = getattr(cdll, 'test_not_exported', None)
assert f is None, ("'test_not_exported' is mistakenly exported, "
"NPY_NO_EXPORT does not work")
+
+
+# Historically NumPy has not used leading underscores for private submodules
+# much. This has resulted in lots of things that look like public modules
+# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`),
+# but were never intended to be public. The PUBLIC_MODULES list contains
+# modules that are either public because they were meant to be, or because they
+# contain public functions/objects that aren't present in any other namespace
+# for whatever reason and therefore should be treated as public.
+#
+# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack
+# of underscores) but should not be used. For many of those modules the
+# current status is fine. For others it may make sense to work on making them
+# private, to clean up our public API and avoid confusion.
+PUBLIC_MODULES = ['numpy.' + s for s in [
+ "ctypeslib",
+ "distutils",
+ "distutils.cpuinfo",
+ "distutils.exec_command",
+ "distutils.misc_util",
+ "distutils.log",
+ "distutils.system_info",
+ "doc",
+ "doc.basics",
+ "doc.broadcasting",
+ "doc.byteswapping",
+ "doc.constants",
+ "doc.creation",
+ "doc.dispatch",
+ "doc.glossary",
+ "doc.indexing",
+ "doc.internals",
+ "doc.misc",
+ "doc.structured_arrays",
+ "doc.subclassing",
+ "doc.ufuncs",
+ "dual",
+ "f2py",
+ "fft",
+ "lib",
+ "lib.format", # was this meant to be public?
+ "lib.mixins",
+ "lib.recfunctions",
+ "lib.scimath",
+ "linalg",
+ "ma",
+ "ma.extras",
+ "ma.mrecords",
+ "matlib",
+ "polynomial",
+ "polynomial.chebyshev",
+ "polynomial.hermite",
+ "polynomial.hermite_e",
+ "polynomial.laguerre",
+ "polynomial.legendre",
+ "polynomial.polynomial",
+ "polynomial.polyutils",
+ "random",
+ "testing",
+ "version",
+]]
+
+
+PUBLIC_ALIASED_MODULES = [
+ "numpy.char",
+ "numpy.emath",
+ "numpy.rec",
+]
+
+
+PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
+ "compat",
+ "compat.py3k",
+ "conftest",
+ "core",
+ "core.arrayprint",
+ "core.defchararray",
+ "core.einsumfunc",
+ "core.fromnumeric",
+ "core.function_base",
+ "core.getlimits",
+ "core.machar",
+ "core.memmap",
+ "core.multiarray",
+ "core.numeric",
+ "core.numerictypes",
+ "core.overrides",
+ "core.records",
+ "core.shape_base",
+ "core.umath",
+ "core.umath_tests",
+ "distutils.ccompiler",
+ "distutils.command",
+ "distutils.command.autodist",
+ "distutils.command.bdist_rpm",
+ "distutils.command.build",
+ "distutils.command.build_clib",
+ "distutils.command.build_ext",
+ "distutils.command.build_py",
+ "distutils.command.build_scripts",
+ "distutils.command.build_src",
+ "distutils.command.config",
+ "distutils.command.config_compiler",
+ "distutils.command.develop",
+ "distutils.command.egg_info",
+ "distutils.command.install",
+ "distutils.command.install_clib",
+ "distutils.command.install_data",
+ "distutils.command.install_headers",
+ "distutils.command.sdist",
+ "distutils.compat",
+ "distutils.conv_template",
+ "distutils.core",
+ "distutils.extension",
+ "distutils.fcompiler",
+ "distutils.fcompiler.absoft",
+ "distutils.fcompiler.compaq",
+ "distutils.fcompiler.environment",
+ "distutils.fcompiler.g95",
+ "distutils.fcompiler.gnu",
+ "distutils.fcompiler.hpux",
+ "distutils.fcompiler.ibm",
+ "distutils.fcompiler.intel",
+ "distutils.fcompiler.lahey",
+ "distutils.fcompiler.mips",
+ "distutils.fcompiler.nag",
+ "distutils.fcompiler.none",
+ "distutils.fcompiler.pathf95",
+ "distutils.fcompiler.pg",
+ "distutils.fcompiler.sun",
+ "distutils.fcompiler.vast",
+ "distutils.from_template",
+ "distutils.intelccompiler",
+ "distutils.lib2def",
+ "distutils.line_endings",
+ "distutils.mingw32ccompiler",
+ "distutils.msvccompiler",
+ "distutils.npy_pkg_config",
+ "distutils.numpy_distribution",
+ "distutils.pathccompiler",
+ "distutils.unixccompiler",
+ "f2py.auxfuncs",
+ "f2py.capi_maps",
+ "f2py.cb_rules",
+ "f2py.cfuncs",
+ "f2py.common_rules",
+ "f2py.crackfortran",
+ "f2py.diagnose",
+ "f2py.f2py2e",
+ "f2py.f2py_testing",
+ "f2py.f90mod_rules",
+ "f2py.func2subr",
+ "f2py.rules",
+ "f2py.use_rules",
+ "fft.helper",
+ "lib.arraypad",
+ "lib.arraysetops",
+ "lib.arrayterator",
+ "lib.financial",
+ "lib.function_base",
+ "lib.histograms",
+ "lib.index_tricks",
+ "lib.nanfunctions",
+ "lib.npyio",
+ "lib.polynomial",
+ "lib.shape_base",
+ "lib.stride_tricks",
+ "lib.twodim_base",
+ "lib.type_check",
+ "lib.ufunclike",
+ "lib.user_array", # note: not in np.lib, but probably should just be deleted
+ "lib.utils",
+ "linalg.lapack_lite",
+ "linalg.linalg",
+ "ma.bench",
+ "ma.core",
+ "ma.testutils",
+ "ma.timer_comparison",
+ "matrixlib",
+ "matrixlib.defmatrix",
+ "random.mtrand",
+ "testing.print_coercion_tables",
+ "testing.utils",
+]]
+
+
+def is_unexpected(name):
+ """Check if this needs to be considered."""
+ if '._' in name or '.tests' in name or '.setup' in name:
+ return False
+
+ if name in PUBLIC_MODULES:
+ return False
+
+ if name in PUBLIC_ALIASED_MODULES:
+ return False
+
+ if name in PRIVATE_BUT_PRESENT_MODULES:
+ return False
+
+ return True
+
+
+# These are present in a directory with an __init__.py but cannot be imported
+# code_generators/ isn't installed, but present for an inplace build
+SKIP_LIST = [
+ "numpy.core.code_generators",
+ "numpy.core.code_generators.genapi",
+ "numpy.core.code_generators.generate_umath",
+ "numpy.core.code_generators.ufunc_docstrings",
+ "numpy.core.code_generators.generate_numpy_api",
+ "numpy.core.code_generators.generate_ufunc_api",
+ "numpy.core.code_generators.numpy_api",
+ "numpy.core.cversions",
+ "numpy.core.generate_numpy_api",
+ "numpy.distutils.msvc9compiler",
+]
+
+
+def test_all_modules_are_expected():
+ """
+ Test that we don't add anything that looks like a new public module by
+ accident. Check is based on filenames.
+ """
+
+ modnames = []
+ for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__,
+ prefix=np.__name__ + '.',
+ onerror=None):
+ if is_unexpected(modname) and modname not in SKIP_LIST:
+ # We have a name that is new. If that's on purpose, add it to
+ # PUBLIC_MODULES. We don't expect to have to add anything to
+ # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
+ modnames.append(modname)
+
+ if modnames:
+ raise AssertionError("Found unexpected modules: {}".format(modnames))
+
+
+# Stuff that clearly shouldn't be in the API and is detected by the next test
+# below
+SKIP_LIST_2 = [
+ 'numpy.math',
+ 'numpy.distutils.log.sys',
+ 'numpy.distutils.system_info.copy',
+ 'numpy.distutils.system_info.distutils',
+ 'numpy.distutils.system_info.log',
+ 'numpy.distutils.system_info.os',
+ 'numpy.distutils.system_info.platform',
+ 'numpy.distutils.system_info.re',
+ 'numpy.distutils.system_info.shutil',
+ 'numpy.distutils.system_info.subprocess',
+ 'numpy.distutils.system_info.sys',
+ 'numpy.distutils.system_info.tempfile',
+ 'numpy.distutils.system_info.textwrap',
+ 'numpy.distutils.system_info.warnings',
+ 'numpy.doc.constants.re',
+ 'numpy.doc.constants.textwrap',
+ 'numpy.lib.emath',
+ 'numpy.lib.math',
+ 'numpy.matlib.char',
+ 'numpy.matlib.rec',
+ 'numpy.matlib.emath',
+ 'numpy.matlib.math',
+ 'numpy.matlib.linalg',
+ 'numpy.matlib.fft',
+ 'numpy.matlib.random',
+ 'numpy.matlib.ctypeslib',
+ 'numpy.matlib.ma',
+]
+
+
+def test_all_modules_are_expected_2():
+ """
+ Method checking all objects. The pkgutil-based method in
+ `test_all_modules_are_expected` does not catch imports into a namespace,
+ only filenames. So this test is more thorough, and checks this like:
+
+ import .lib.scimath as emath
+
+ To check if something in a module is (effectively) public, one can check if
+ there's anything in that namespace that's a public function/object but is
+ not exposed in a higher-level namespace. For example for a `numpy.lib`
+ submodule::
+
+ mod = np.lib.mixins
+ for obj in mod.__all__:
+ if obj in np.__all__:
+ continue
+ elif obj in np.lib.__all__:
+ continue
+
+ else:
+ print(obj)
+
+ """
+
+ def find_unexpected_members(mod_name):
+ members = []
+ module = importlib.import_module(mod_name)
+ if hasattr(module, '__all__'):
+ objnames = module.__all__
+ else:
+ objnames = dir(module)
+
+ for objname in objnames:
+ if not objname.startswith('_'):
+ fullobjname = mod_name + '.' + objname
+ if isinstance(getattr(module, objname), types.ModuleType):
+ if is_unexpected(fullobjname):
+ if fullobjname not in SKIP_LIST_2:
+ members.append(fullobjname)
+
+ return members
+
+ unexpected_members = find_unexpected_members("numpy")
+ for modname in PUBLIC_MODULES:
+ unexpected_members.extend(find_unexpected_members(modname))
+
+ if unexpected_members:
+ raise AssertionError("Found unexpected object(s) that look like "
+ "modules: {}".format(unexpected_members))
+
+
+def test_api_importable():
+ """
+ Check that all submodules listed higher up in this file can be imported
+
+ Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
+ simply need to be removed from the list (deprecation may or may not be
+ needed - apply common sense).
+ """
+ def check_importable(module_name):
+ try:
+ importlib.import_module(module_name)
+ except (ImportError, AttributeError):
+ return False
+
+ return True
+
+ module_names = []
+ for module_name in PUBLIC_MODULES:
+ if not check_importable(module_name):
+ module_names.append(module_name)
+
+ if module_names:
+ raise AssertionError("Modules in the public API that cannot be "
+ "imported: {}".format(module_names))
+
+ for module_name in PUBLIC_ALIASED_MODULES:
+ try:
+ eval(module_name)
+ except AttributeError:
+ module_names.append(module_name)
+
+ if module_names:
+ raise AssertionError("Modules in the public API that were not "
+ "found: {}".format(module_names))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', category=DeprecationWarning)
+ warnings.filterwarnings('always', category=ImportWarning)
+ for module_name in PRIVATE_BUT_PRESENT_MODULES:
+ if not check_importable(module_name):
+ module_names.append(module_name)
+
+ if module_names:
+ raise AssertionError("Modules that are not really public but looked "
+ "public and can not be imported: "
+ "{}".format(module_names))
paver write_release
paver write_note
-This automatically put the checksum into README.rst, and write the Changelog
-which can be uploaded to sourceforge.
+This automatically put the checksum into README.rst, and writes the Changelog.
TODO
====
#-----------------------------------
# Path to the release notes
-RELEASE_NOTES = 'doc/release/1.17.5-notes.rst'
+RELEASE_NOTES = 'doc/source/release/1.18.0-notes.rst'
#-------------------------------------------------------
--- /dev/null
+[build-system]
+# Minimum requirements for the build system to execute.
+requires = [
+ "setuptools",
+ "wheel",
+ "Cython>=0.29.13", # Note: keep in sync with tools/cythonize.py
+]
+
+
+[tool.towncrier]
+ # Do no set this since it is hard to import numpy inside the source directory
+ # the name is hardcoded. Use "--version 1.18.0" to set the version
+ single_file = true
+ filename = "doc/source/release/{version}-notes.rst"
+ directory = "doc/release/upcoming_changes/"
+ issue_format = "`gh-{issue} <https://github.com/numpy/numpy/pull/{issue}>`__"
+ template = "doc/release/upcoming_changes/template.rst"
+ underlines = "~="
+ all_bullets = false
+
+
+ [[tool.towncrier.type]]
+ directory = "highlight"
+ name = "Highlights"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "new_function"
+ name = "New functions"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "deprecation"
+ name = "Deprecations"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "future"
+ name = "Future Changes"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "expired"
+ name = "Expired deprecations"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "compatibility"
+ name = "Compatibility notes"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "c_api"
+ name = "C API changes"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "new_feature"
+ name = "New Features"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "improvement"
+ name = "Improvements"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "change"
+ name = "Changes"
+ showcontent = true
+
$ gdb --args python runtests.py [...other args...]
+Disable pytest capturing of output by using its '-s' option:
+
+ $ python runtests.py -- -s
+
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
+ parser.add_argument("--debug-info", action="store_true",
+ help=("add --verbose-cfg to build_src to show compiler "
+ "configuration output while creating "
+ "_numpyconfig.h and config.h"))
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project (use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true", default=False,
help="Debug build")
parser.add_argument("--parallel", "-j", type=int, default=0,
help="Number of parallel jobs during build")
+ parser.add_argument("--warn-error", action="store_true",
+ help="Set -Werror to convert all compiler warnings to errors")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
cmd += ["build"]
if args.parallel > 1:
cmd += ["-j", str(args.parallel)]
+ if args.debug_info:
+ cmd += ["build_src", "--verbose-cfg"]
+ if args.warn_error:
+ cmd += ["--warn-error"]
# Install; avoid producing eggs so numpy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
+Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
"""
MAJOR = 1
-MINOR = 17
-MICRO = 5
+MINOR = 18
+MICRO = 0
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
except (subprocess.SubprocessError, OSError):
GIT_REVISION = "Unknown"
+ if not GIT_REVISION:
+ # this shouldn't happen but apparently can (see gh-8512)
+ GIT_REVISION = "Unknown"
+
return GIT_REVISION
# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be
try:
from numpy.version import git_revision as GIT_REVISION
except ImportError:
- raise ImportError("Unable to import git_revision. Try removing " \
- "numpy/version.py and the build directory " \
+ raise ImportError("Unable to import git_revision. Try removing "
+ "numpy/version.py and the build directory "
"before building.")
else:
GIT_REVISION = "Unknown"
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
- 'bdist_wininst', 'bdist_msi', 'bdist_mpkg')
+ 'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src')
for command in good_commands:
if command in args:
def setup_package():
- src_path = os.path.dirname(os.path.abspath(sys.argv[0]))
+ src_path = os.path.dirname(os.path.abspath(__file__))
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
- cmdclass={"sdist": sdist_checked},
+ cmdclass={"sdist": sdist_checked,
+ },
python_requires='>=3.5',
zip_safe=False,
entry_points={
if run_build:
from numpy.distutils.core import setup
cwd = os.path.abspath(os.path.dirname(__file__))
- if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
- # Generate Cython sources, unless building from source release
+ if not 'sdist' in sys.argv:
+ # Generate Cython sources, unless we're generating an sdist
generate_cython()
metadata['configuration'] = configuration
# we will pay the ~13 minute cost of compiling Cython only when a new
# version is scraped in by pip; otherwise, use the cached
# wheel shippable places on Amazon S3 after we build it once
- - pip install cython --cache-dir=/root/.cache/pip/wheels/$SHIPPABLE_PYTHON_VERSION
- # install pytz for datetime testing
- - pip install pytz
+ - pip install -r test_requirements.txt --cache-dir=/root/.cache/pip/wheels/$SHIPPABLE_PYTHON_VERSION
# install pytest-xdist to leverage a second core
# for unit tests
- pip install pytest-xdist
# build first and adjust PATH so f2py is found in scripts dir
# use > 1 core for build sometimes slows down a fair bit,
# other times modestly speeds up, so avoid for now
- - python setup.py install
+ - pip install .
- extra_directories=($SHIPPABLE_REPO_DIR/build/*scripts*)
- extra_path=$(printf "%s:" "${extra_directories[@]}")
- export PATH="${extra_path}${PATH}"
# check OpenBLAS version
- - python tools/openblas_support.py --check_version 0.3.5
+ - python tools/openblas_support.py --check_version 0.3.7
# run the test suite
- - python runtests.py -- -rsx --junit-xml=$SHIPPABLE_REPO_DIR/shippable/testresults/tests.xml -n 2 --durations=10
+ - python runtests.py --debug-info --show-build-log -- -rsx --junit-xml=$SHIPPABLE_REPO_DIR/shippable/testresults/tests.xml -n 2 --durations=10
cache: true
cache_dir_list:
# include_dirs = /opt/OpenBLAS/include
# runtime_library_dirs = /opt/OpenBLAS/lib
+# OpenBLAS (64-bit with suffix)
+# -----------------------------
+# OpenBLAS can be compiled with 64-bit integer size and symbol suffix '64_'
+# (INTERFACE64=1 SYMBOLSUFFIX=64_). OpenBLAS built with this setting are also
+# provided by some Linux distributions (e.g. Fedora's 64-bit openblas packages).
+# This is an emerging "standard" for 64-bit BLAS/LAPACK, avoiding symbol clashes
+# with 32-bit BLAS/LAPACK.
+#
+# To build Numpy with such 64-bit BLAS/LAPACK, set environment
+# variables NPY_USE_BLAS_ILP64=1, NPY_BLAS_ILP64_ORDER=openblas64_,
+# NPY_LAPACK_ILP64_ORDER=openblas64_ at build time.
+#
+# See:
+# https://github.com/xianyi/OpenBLAS/issues/646
+#
+# [openblas64_]
+# libraries = openblas64_
+# library_dirs = /opt/OpenBLAS/lib
+# include_dirs = /opt/OpenBLAS/include
+# runtime_library_dirs = /opt/OpenBLAS/lib
+
+# OpenBLAS (64-bit ILP64)
+# -----------------------
+# It is possible to also use OpenBLAS compiled with 64-bit integer
+# size (ILP64) but no symbol name changes. To do that, set the
+# environment variables NPY_USE_BLAS_ILP64=1,
+# NPY_BLAS_ILP64_ORDER=openblas_ilp64,
+# NPY_LAPACK_ILP64_ORDER=openblas_ilp64 at build time.
+#
+# Note that mixing both 64-bit and 32-bit BLAS without symbol suffixes
+# in the same application may cause problems due to symbol name
+# clashes, especially with embedded Python interpreters.
+#
+# The name of the library file may vary on different systems, so you
+# may need to check your specific OpenBLAS installation and
+# uncomment and e.g. set ``libraries = openblas`` below.
+#
+# [openblas_ilp64]
+# libraries = openblas64
+# library_dirs = /opt/OpenBLAS/lib
+# include_dirs = /opt/OpenBLAS/include
+# runtime_library_dirs = /opt/OpenBLAS/lib
+# symbol_prefix =
+# symbol_suffix =
+
# BLIS
# ----
# BLIS (https://github.com/flame/blis) also provides a BLAS interface. It's a
--- /dev/null
+cython==0.29.14
+pytest==5.3.1
+pytz==2019.3
+pytest-cov==2.8.1
+pickle5; python_version == '3.7'
+pickle5; python_version == '3.6' and platform_python_implementation != 'PyPy'
+nose
+# for numpy.random.test.test_extending
+cffi
+++ /dev/null
-cython
-nose
-pytest-timeout
-pytest-xdist
-pytest-env
-pytest-faulthandler
--- /dev/null
+#!/usr/bin/env python
+
+import sys
+import toml
+import os
+
+path = toml.load("pyproject.toml")["tool"]["towncrier"]["directory"]
+
+fragments = os.listdir(path)
+fragments.remove("README.rst")
+fragments.remove("template.rst")
+
+if fragments:
+ print("The following files were not found by towncrier:")
+ print(" " + " \n".join(fragments))
+ sys.exit(1)
-#!/usr/bin/env python
+#!/usr/bin/env python3
""" cythonize
Cythonize pyx files into C files as needed.
def process_pyx(fromfile, tofile):
flags = ['-3', '--fast-fail']
if tofile.endswith('.cxx'):
- flags += ['--cplus']
+ flags.append('--cplus')
try:
# try the cython in the installed python first (somewhat related to scipy/scipy#2397)
from Cython.Compiler.Version import version as cython_version
except ImportError:
- # if that fails, use the one on the path, which might be the wrong version
- try:
- # Try the one on the path as a last resort
- subprocess.check_call(
- ['cython'] + flags + ["-o", tofile, fromfile])
- except OSError:
- raise OSError('Cython needs to be installed')
+ # The `cython` command need not point to the version installed in the
+ # Python running this script, so raise an error to avoid the chance of
+ # using the wrong version of Cython.
+ raise OSError('Cython needs to be installed in Python as a module')
else:
# check the version, and invoke through python
from distutils.version import LooseVersion
- # requiring the newest version on all pythons doesn't work, since
- # we're relying on the version of the distribution cython. Add new
- # versions as they become required for new python versions.
- if sys.version_info[:2] < (3, 7):
- required_version = LooseVersion('0.19')
- else:
- required_version = LooseVersion('0.28')
+ # Cython 0.29.13 is required for Python 3.8 and there are
+ # other fixes in the 0.29 series that are needed even for earlier
+ # Python versions.
+ # Note: keep in sync with that in pyproject.toml
+ required_version = LooseVersion('0.29.13')
if LooseVersion(cython_version) < required_version:
raise RuntimeError('Building {} requires Cython >= {}'.format(
def __init__(self, content, name=None, namespace=None, stacklevel=None,
get_template=None, default_inherit=None, line_offset=0,
- delimeters=None):
+ delimiters=None):
self.content = content
- # set delimeters
- if delimeters is None:
- delimeters = (self.default_namespace['start_braces'],
+ # set delimiters
+ if delimiters is None:
+ delimiters = (self.default_namespace['start_braces'],
self.default_namespace['end_braces'])
else:
- assert len(delimeters) == 2 and all(
- [isinstance(delimeter, basestring_)
- for delimeter in delimeters])
+ assert len(delimiters) == 2 and all(
+ [isinstance(delimiter, basestring_)
+ for delimiter in delimiters])
self.default_namespace = self.__class__.default_namespace.copy()
- self.default_namespace['start_braces'] = delimeters[0]
- self.default_namespace['end_braces'] = delimeters[1]
- self.delimeters = delimeters
+ self.default_namespace['start_braces'] = delimiters[0]
+ self.default_namespace['end_braces'] = delimiters[1]
+ self.delimiters = delimiters
self._unicode = is_unicode(content)
if name is None and stacklevel is not None:
self.name = name
self._parsed = parse(
content, name=name, line_offset=line_offset,
- delimeters=self.delimeters)
+ delimiters=self.delimiters)
if namespace is None:
namespace = {}
self.namespace = namespace
return msg
-def sub(content, delimeters=None, **kw):
+def sub(content, delimiters=None, **kw):
name = kw.get('__name')
- tmpl = Template(content, name=name, delimeters=delimeters)
+ tmpl = Template(content, name=name, delimiters=delimiters)
return tmpl.substitute(kw)
############################################################
-def lex(s, name=None, trim_whitespace=True, line_offset=0, delimeters=None):
- if delimeters is None:
- delimeters = (Template.default_namespace['start_braces'],
+def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None):
+ if delimiters is None:
+ delimiters = (Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'])
in_expr = False
chunks = []
last = 0
last_pos = (line_offset + 1, 1)
- token_re = re.compile(r'%s|%s' % (re.escape(delimeters[0]),
- re.escape(delimeters[1])))
+ token_re = re.compile(r'%s|%s' % (re.escape(delimiters[0]),
+ re.escape(delimiters[1])))
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end(), last, last_pos)
- if expr == delimeters[0] and in_expr:
- raise TemplateError('%s inside expression' % delimeters[0],
+ if expr == delimiters[0] and in_expr:
+ raise TemplateError('%s inside expression' % delimiters[0],
position=pos,
name=name)
- elif expr == delimeters[1] and not in_expr:
- raise TemplateError('%s outside expression' % delimeters[1],
+ elif expr == delimiters[1] and not in_expr:
+ raise TemplateError('%s outside expression' % delimiters[1],
position=pos,
name=name)
- if expr == delimeters[0]:
+ if expr == delimiters[0]:
part = s[last:match.start()]
if part:
chunks.append(part)
last = match.end()
last_pos = pos
if in_expr:
- raise TemplateError('No %s to finish last expression' % delimeters[1],
+ raise TemplateError('No %s to finish last expression' % delimiters[1],
name=name, position=last_pos)
part = s[last:]
if part:
return (last_pos[0] + lines, column)
-def parse(s, name=None, line_offset=0, delimeters=None):
+def parse(s, name=None, line_offset=0, delimiters=None):
- if delimeters is None:
- delimeters = (Template.default_namespace['start_braces'],
+ if delimiters is None:
+ delimiters = (Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'])
- tokens = lex(s, name=name, line_offset=line_offset, delimeters=delimeters)
+ tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters)
result = []
while tokens:
next_chunk, tokens = parse_expr(tokens, name)
__all__ = ['PY3', 'b', 'basestring_', 'bytes', 'next', 'is_unicode',
'iteritems']
-PY3 = True if sys.version_info[0] == 3 else False
+PY3 = True if sys.version_info[0] >= 3 else False
if sys.version_info[0] < 3:
import zipfile
import tarfile
-OPENBLAS_V = 'v0.3.5'
-OPENBLAS_LONG = 'v0.3.5-274-g6a8b4269'
+OPENBLAS_V = 'v0.3.7'
+OPENBLAS_LONG = 'v0.3.7'
BASE_LOC = ''
RACKSPACE = 'https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com'
ARCHITECTURES = ['', 'windows', 'darwin', 'arm', 'x86', 'ppc64']
# https://github.com/tylerjereddy/openblas-static-gcc/tree/master/ARMv8
# build done on GCC compile farm machine named gcc115
# tarball uploaded manually to an unshared Dropbox location
- filename = ('https://www.dropbox.com/s/pbqkxzlmih4cky1/'
+ filename = ('https://www.dropbox.com/s/vdeckao4omss187/'
'openblas-{}-armv8.tar.gz?dl=1'.format(OPENBLAS_V))
typ = 'tar.gz'
elif arch == 'ppc64':
# https://github.com/tylerjereddy/openblas-static-gcc/blob/master/power8
# built on GCC compile farm machine named gcc112
# manually uploaded tarball to an unshared Dropbox location
- filename = ('https://www.dropbox.com/s/zcwhk7c2zptwy0s/'
+ filename = ('https://www.dropbox.com/s/yt0d2j86x1j8nh1/'
'openblas-{}-ppc64le-power8.tar.gz?dl=1'.format(OPENBLAS_V))
typ = 'tar.gz'
elif arch == 'darwin':
(cd pypy3; tar --strip-components=1 -xf ../pypy.tar.bz2)
pypy3/bin/pypy3 -mensurepip
pypy3/bin/pypy3 -m pip install --upgrade pip setuptools
-pypy3/bin/pypy3 -m pip install --user cython==0.29.0 pytest pytz --no-warn-script-location
+pypy3/bin/pypy3 -m pip install --user -r test_requirements.txt --no-warn-script-location
echo
echo pypy3 version
pypy3/bin/pypy3 -c "import sys; print(sys.version)"
echo
-pypy3/bin/pypy3 runtests.py --show-build-log -- -rsx \
+pypy3/bin/pypy3 runtests.py --debug-info --show-build-log -v -- -rsx \
--junitxml=junit/test-results.xml --durations 10
echo Make sure the correct openblas has been linked in
"""
refguide_check.py [OPTIONS] [-- ARGS]
-Check for a NumPy submodule whether the objects in its __all__ dict
-correspond to the objects included in the reference guide.
+- Check for a NumPy submodule whether the objects in its __all__ dict
+ correspond to the objects included in the reference guide.
+- Check docstring examples
+- Check example blocks in RST files
Example of usage::
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
-in docstrings. This is different from doctesting [we do not aim to have
-numpy docstrings doctestable!], this is just to make sure that code in
-docstrings is valid python::
+in docstrings::
- $ python refguide_check.py --doctests optimize
+ $ python refguide_check.py --doctests ma
+or in RST-based documentations::
+
+ $ python refguide_check.py --rst docs
"""
from __future__ import print_function
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
+SKIPBLOCK = doctest.register_optionflag('SKIPBLOCK')
+
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
- from sphinx.directives import SeeAlso, Only
+ from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
'numpy.lib.Repository',
])
+# Skip non-numpy RST files, historical release notes
+# Any single-directory exact match will skip the directory and all subdirs.
+# Any exact match (like 'doc/release') will scan subdirs but skip files in
+# the matched directory.
+# Any filename will skip that file
+RST_SKIPLIST = [
+ 'scipy-sphinx-theme',
+ 'sphinxext',
+ 'neps',
+ 'changelog',
+ 'doc/release',
+ 'doc/source/release',
+ 'c-info.ufunc-tutorial.rst',
+]
+
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
return only_all, only_ref, missing
+
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
pass
return False
+
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
+ """
+ Check that `all_dict` is consistent with the `names` in `module_name`
+ For instance, that there are no deprecated or extra objects.
+ """
num_all = len(all_dict)
num_ref = len(names)
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
+ 'numpy': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
- 'Inf': np.inf,}
+ 'Inf': np.inf,
+ 'StringIO': io.StringIO,
+}
class DTRunner(doctest.DocTestRunner):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
- self.ns = dict(CHECK_NAMESPACE)
+ self.ns = CHECK_NAMESPACE
else:
self.ns = ns
# and then compare the tuples.
try:
num = len(a_want)
- regex = ('[\w\d_]+\(' +
- ', '.join(['[\w\d_]+=(.+)']*num) +
- '\)')
+ regex = (r'[\w\d_]+\(' +
+ ', '.join([r'[\w\d_]+=(.+)']*num) +
+ r'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# try to ensure random seed is NOT reproducible
np.random.seed(None)
+ ns = {}
for t in tests:
+ # We broke the tests up into chunks to try to avoid PSEUDOCODE
+ # This has the unfortunate side effect of restarting the global
+ # namespace for each test chunk, so variables will be "lost" after
+ # a chunk. Chain the globals to avoid this
+ t.globs.update(ns)
t.filename = short_path(t.filename, cwd)
- fails, successes = runner.run(t, out=out)
+ # Process our options
+ if any([SKIPBLOCK in ex.options for ex in t.examples]):
+ continue
+ fails, successes = runner.run(t, out=out, clear_globs=False)
if fails > 0:
success = False
+ ns = t.globs
finally:
sys.stderr = old_stderr
os.chdir(cwd)
5
"""
- results = []
-
if ns is None:
- ns = dict(DEFAULT_NAMESPACE)
+ ns = CHECK_NAMESPACE
+ results = []
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
+ base_line_no = 0
for part in text.split('\n\n'):
- tests = parser.get_doctest(part, ns, fname, fname, 0)
+ try:
+ tests = parser.get_doctest(part, ns, fname, fname, base_line_no)
+ except ValueError as e:
+ if e.args[0].startswith('line '):
+ # fix line number since `parser.get_doctest` does not increment
+ # the reported line number by base_line_no in the error message
+ parts = e.args[0].split()
+ parts[1] = str(int(parts[1]) + base_line_no)
+ e.args = (' '.join(parts),) + e.args[1:]
+ raise
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
- good_parts += [part]
+ good_parts.append((part, base_line_no))
+ base_line_no += part.count('\n') + 2
# Reassemble the good bits and doctest them:
- good_text = '\n\n'.join(good_parts)
- tests = parser.get_doctest(good_text, ns, fname, fname, 0)
- success, output = _run_doctests([tests], full_name, verbose,
+ tests = []
+ for good_text, line_no in good_parts:
+ tests.append(parser.get_doctest(good_text, ns, fname, fname, line_no))
+ success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
return results
+def iter_included_files(base_path, verbose=0, suffixes=('.rst',)):
+ """
+ Generator function to walk `base_path` and its subdirectories, skipping
+ files or directories in RST_SKIPLIST, and yield each file with a suffix in
+ `suffixes`
+ """
+ if os.path.exists(base_path) and os.path.isfile(base_path):
+ yield base_path
+ for dir_name, subdirs, files in os.walk(base_path, topdown=True):
+ if dir_name in RST_SKIPLIST:
+ if verbose > 0:
+ sys.stderr.write('skipping files in %s' % dir_name)
+ files = []
+ for p in RST_SKIPLIST:
+ if p in subdirs:
+ if verbose > 0:
+ sys.stderr.write('skipping %s and subdirs' % p)
+ subdirs.remove(p)
+ for f in files:
+ if (os.path.splitext(f)[1] in suffixes and
+ f not in RST_SKIPLIST):
+ yield os.path.join(dir_name, f)
+
+
+def check_documentation(base_path, results, args, dots):
+ """
+ Check examples in any *.rst located inside `base_path`.
+ Add the output to `results`.
+
+ See Also
+ --------
+ check_doctests_testfile
+ """
+ for filename in iter_included_files(base_path, args.verbose):
+ if dots:
+ sys.stderr.write(filename + ' ')
+ sys.stderr.flush()
+
+ tut_results = check_doctests_testfile(
+ filename,
+ (args.verbose >= 2), dots=dots,
+ doctest_warnings=args.doctest_warnings)
+
+ # stub out a "module" which is needed when reporting the result
+ def scratch():
+ pass
+ scratch.__name__ = filename
+ results.append((scratch, tut_results))
+ if dots:
+ sys.stderr.write('\n')
+ sys.stderr.flush()
+
+
def init_matplotlib():
global HAVE_MATPLOTLIB
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
- parser.add_argument("--doctests", action="store_true", help="Run also doctests")
+ parser.add_argument("--doctests", action="store_true",
+ help="Run also doctests on ")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
- parser.add_argument("--skip-tutorial", action="store_true",
- help="Skip running doctests in the tutorial.")
+ parser.add_argument("--rst", nargs='?', const='doc', default=None,
+ help=("Run also examples from *rst files "
+ "discovered walking the directory(s) specified, "
+ "defaults to 'doc'"))
args = parser.parse_args(argv)
modules = []
names_dict = {}
- if args.module_names:
- args.skip_tutorial = True
- else:
+ if not args.module_names:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
if name not in module_names:
module_names.append(name)
+ dots = True
+ success = True
+ results = []
+ errormsgs = []
+
+
+ if args.doctests or args.rst:
+ init_matplotlib()
+
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
if submodule_name in args.module_names:
modules.append(module)
- dots = True
- success = True
- results = []
-
- print("Running checks for %d modules:" % (len(modules),))
- if args.doctests or not args.skip_tutorial:
- init_matplotlib()
-
- for module in modules:
- if dots:
- if module is not modules[0]:
- sys.stderr.write(' ')
- sys.stderr.write(module.__name__ + ' ')
- sys.stderr.flush()
+ if args.doctests or not args.rst:
+ print("Running checks for %d modules:" % (len(modules),))
+ for module in modules:
+ if dots:
+ sys.stderr.write(module.__name__ + ' ')
+ sys.stderr.flush()
- all_dict, deprecated, others = get_all_dict(module)
- names = names_dict.get(module.__name__, set())
+ all_dict, deprecated, others = get_all_dict(module)
+ names = names_dict.get(module.__name__, set())
- mod_results = []
- mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
- mod_results += check_rest(module, set(names).difference(deprecated),
- dots=dots)
- if args.doctests:
- mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
- doctest_warnings=args.doctest_warnings)
+ mod_results = []
+ mod_results += check_items(all_dict, names, deprecated, others,
+ module.__name__)
+ mod_results += check_rest(module, set(names).difference(deprecated),
+ dots=dots)
+ if args.doctests:
+ mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
+ doctest_warnings=args.doctest_warnings)
- for v in mod_results:
- assert isinstance(v, tuple), v
+ for v in mod_results:
+ assert isinstance(v, tuple), v
- results.append((module, mod_results))
+ results.append((module, mod_results))
- if dots:
- sys.stderr.write("\n")
- sys.stderr.flush()
-
- if not args.skip_tutorial:
- base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
- tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst')
- print('\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd()))
- for filename in sorted(glob.glob(tut_path)):
if dots:
sys.stderr.write('\n')
- sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
- tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
- dots=dots, doctest_warnings=args.doctest_warnings)
-
- def scratch(): pass # stub out a "module", see below
- scratch.__name__ = filename
- results.append((scratch, tut_results))
+ all_dict, deprecated, others = get_all_dict(module)
+ if args.rst:
+ base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
+ rst_path = os.path.relpath(os.path.join(base_dir, args.rst))
+ if os.path.exists(rst_path):
+ print('\nChecking files in %s:' % rst_path)
+ check_documentation(rst_path, results, args, dots)
+ else:
+ sys.stderr.write(f'\ninvalid --rst argument "{args.rst}"')
+ errormsgs.append('invalid directory argument to --rst')
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
- all_success = True
-
for module, mod_results in results:
success = all(x[1] for x in mod_results)
- all_success = all_success and success
+ if not success:
+ errormsgs.append(f'failed checking {module.__name__}')
if success and args.verbose == 0:
continue
print(output.strip())
print("")
- if all_success:
- print("\nOK: refguide and doctests checks passed!")
+ if len(errormsgs) == 0:
+ print("\nOK: all checks passed!")
sys.exit(0)
else:
- print("\nERROR: refguide or doctests have errors")
+ print('\nERROR: ', '\n '.join(errormsgs))
sys.exit(1)
# Add the distutils-generated build directory to the python search path and then
# import the extension module
-libDir = "lib.%s-%s" % (get_platform(), sys.version[:3])
+libDir = "lib.{}-{}.{}".format(get_platform(), *sys.version_info[:2])
sys.path.insert(0, os.path.join("build", libDir))
import Farray
+++ /dev/null
-#!/usr/bin/env python
-from __future__ import division, absolute_import, print_function
-
-# A simple script to test the installed version of numpy by calling
-# 'numpy.test()'. Key features:
-# -- convenient command-line syntax
-# -- sets exit status appropriately, useful for automated test environments
-
-# It would be better to set this up as a module in the numpy namespace, so
-# that it could be run as:
-# python -m numpy.run_tests <args>
-# But, python2.4's -m switch only works with top-level modules, not modules
-# that are inside packages. So, once we drop 2.4 support, maybe...
-
-import sys, os
-# In case we are run from the source directory, we don't want to import numpy
-# from there, we want to import the installed version:
-sys.path.pop(0)
-
-from optparse import OptionParser
-parser = OptionParser("usage: %prog [options] -- [nosetests options]")
-parser.add_option("-v", "--verbose",
- action="count", dest="verbose", default=1,
- help="increase verbosity")
-parser.add_option("--doctests",
- action="store_true", dest="doctests", default=False,
- help="Run doctests in module")
-parser.add_option("--coverage",
- action="store_true", dest="coverage", default=False,
- help="report coverage of NumPy code (requires 'pytest-cov' module")
-parser.add_option("-m", "--mode",
- action="store", dest="mode", default="fast",
- help="'fast', 'full', or something that could be "
- "passed to pytest [default: %default]")
-parser.add_option("-n", "--durations",
- dest="durations", default=-1,
- help="show time to run slowest N tests [default: -1]")
-(options, args) = parser.parse_args()
-
-import numpy
-
-# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.
-# The same flags check is also used in the tests to switch behavior.
-if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0"):
- if not numpy.ones((10, 1), order='C').flags.f_contiguous:
- print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')
- sys.exit(1)
-elif numpy.ones((10, 1), order='C').flags.f_contiguous:
- print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')
- sys.exit(1)
-
-if options.coverage:
- # Produce code coverage XML report for codecov.io
- args += ["--cov-report=xml"]
-
-result = numpy.test(options.mode,
- verbose=options.verbose,
- extra_argv=args,
- doctests=options.doctests,
- durations=int(options.durations),
- coverage=options.coverage)
-
-if result:
- sys.exit(0)
-else:
- sys.exit(1)
source venv/bin/activate
python -V
-if [ -n "$INSTALL_PICKLE5" ]; then
- pip install pickle5
-fi
-
+popd
pip install --upgrade pip setuptools
-pip install pytz cython pytest==5.1.2
+pip install -r test_requirements.txt
if [ -n "$USE_ASV" ]; then pip install asv; fi
-popd
setup_base()
{
- # use default python flags but remoge sign-compare
+ # use default python flags but remove sign-compare
sysflags="$($PYTHON -c "from distutils import sysconfig; \
print (sysconfig.get_config_var('CFLAGS'))")"
export CFLAGS="$sysflags $werrors -Wlogical-op -Wno-sign-compare"
- # use c99
- export CFLAGS=$CFLAGS" -std=c99"
# We used to use 'setup.py install' here, but that has the terrible
# behaviour that if a copy of the package is already installed in the
# install location, then the new copy just gets dropped on top of it.
else
# Python3.5-dbg on travis seems to need this
export CFLAGS=$CFLAGS" -Wno-maybe-uninitialized"
- $PYTHON setup.py build_ext --inplace 2>&1 | tee log
+ $PYTHON setup.py build build_src --verbose-cfg build_ext --inplace 2>&1 | tee log
fi
grep -v "_configtest" log \
- | grep -vE "ld returned 1|no previously-included files matching|manifest_maker: standard file '-c'" \
+ | grep -vE "ld returned 1|no files found matching" \
+ | grep -vE "no previously-included files matching" \
+ | grep -vE "manifest_maker: standard file '-c'" \
| grep -E "warning\>" \
| tee warnings
if [ "$LAPACK" != "None" ]; then
run_test()
{
+ $PIP install -r test_requirements.txt
+
if [ -n "$USE_DEBUG" ]; then
export PYTHONPATH=$PWD
fi
+ # pytest aborts when running --durations with python3.6-dbg, so only enable
+ # it for non-debug tests. That is a cPython bug fixed in later versions of
+ # python3.7 but python3.7-dbg is not currently available on travisCI.
+ if [ -z "$USE_DEBUG" ]; then
+ DURATIONS_FLAG="--durations 10"
+ fi
+
if [ -n "$RUN_COVERAGE" ]; then
- $PIP install pytest-cov
COVERAGE_FLAG=--coverage
fi
"import os; import numpy; print(os.path.dirname(numpy.__file__))")
export PYTHONWARNINGS=default
- if [ -n "$PPC64_LE" ]; then
+ if [ -n "$CHECK_BLAS" ]; then
$PYTHON ../tools/openblas_support.py --check_version $OpenBLAS_version
fi
if [ -n "$RUN_FULL_TESTS" ]; then
export PYTHONWARNINGS="ignore::DeprecationWarning:virtualenv"
- $PYTHON ../tools/test-installed-numpy.py -v --durations 10 --mode=full $COVERAGE_FLAG
+ $PYTHON -b ../runtests.py -n -v --mode=full $DURATIONS_FLAG $COVERAGE_FLAG
else
- $PYTHON ../tools/test-installed-numpy.py -v --durations 10
+ $PYTHON ../runtests.py -n -v $DURATIONS_FLAG
fi
if [ -n "$RUN_COVERAGE" ]; then
$PIP install -U virtualenv
# ensure some warnings are not issued
export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result"
- # use c99
- export CFLAGS=$CFLAGS" -std=c99"
# adjust gcc flags if C coverage requested
if [ -n "$RUN_COVERAGE" ]; then
export NPY_DISTUTILS_APPEND_FLAGS=1
export F90='gfortran --coverage'
export LDFLAGS='--coverage'
fi
- $PYTHON setup.py bdist_wheel
+ $PYTHON setup.py build build_src --verbose-cfg bdist_wheel
# Make another virtualenv to install into
virtualenv --python=`which $PYTHON` venv-for-wheel
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist
$PIP install --pre --no-index --upgrade --find-links=. numpy
- $PIP install nose pytest
-
- if [ -n "$INSTALL_PICKLE5" ]; then
- $PIP install pickle5
- fi
-
popd
+
run_test
+
elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
# use an up-to-date pip / setuptools inside the venv
$PIP install -U virtualenv
$PYTHON -c "import fcntl; fcntl.fcntl(1, fcntl.F_SETFL, 0)"
# ensure some warnings are not issued
export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result"
- # use c99
- export CFLAGS=$CFLAGS" -std=c99"
$PYTHON setup.py sdist
# Make another virtualenv to install into
virtualenv --python=`which $PYTHON` venv-for-wheel
# Move out of source directory to avoid finding local numpy
pushd dist
$PIP install numpy*
- $PIP install nose pytest
- if [ -n "$INSTALL_PICKLE5" ]; then
- $PIP install pickle5
- fi
-
popd
run_test
else
# To run against a specific subset of Python versions, use:
# tox -e py37
-# Extra arguments will be passed to test-installed-numpy.py. To run
+# Extra arguments will be passed to runtests.py. To run
# the full testsuite:
# tox full
# To run with extra verbosity:
py37-not-relaxed-strides
[testenv]
-deps=
- pytest
+deps= -Ur{toxinidir}/test_requirements.txt
changedir={envdir}
-commands={envpython} {toxinidir}/tools/test-installed-numpy.py --mode=full {posargs:}
+commands={envpython} -b {toxinidir}/runtests.py --mode=full {posargs:}
[testenv:py37-not-relaxed-strides]
basepython=python3.7
# if you want it:
[testenv:debug]
basepython=python-dbg
-commands=gdb --args {envpython} {toxinidir}/tools/test-installed-numpy.py --mode=full {posargs:}
+commands=gdb --args {envpython} {toxinidir}/runtests.py --mode=full {posargs:}