Imported Upstream version 1.16.1 upstream/1.16.1
authorDongHun Kwak <dh0128.kwak@samsung.com>
Thu, 31 Dec 2020 00:47:30 +0000 (09:47 +0900)
committerDongHun Kwak <dh0128.kwak@samsung.com>
Thu, 31 Dec 2020 00:47:30 +0000 (09:47 +0900)
90 files changed:
.mailmap
azure-pipelines.yml
benchmarks/benchmarks/bench_overrides.py
doc/changelog/1.16.1-changelog.rst [new file with mode: 0644]
doc/neps/nep-0018-array-function-protocol.rst
doc/release/1.16.0-notes.rst
doc/release/1.16.1-notes.rst [new file with mode: 0644]
doc/source/f2py/run_main_session.dat
doc/source/f2py/usage.rst
doc/source/release.rst
numpy/compat/py3k.py
numpy/compat/tests/test_compat.py
numpy/core/__init__.py
numpy/core/_internal.py
numpy/core/_methods.py
numpy/core/code_generators/genapi.py
numpy/core/code_generators/generate_umath.py
numpy/core/multiarray.py
numpy/core/overrides.py
numpy/core/records.py
numpy/core/setup.py
numpy/core/shape_base.py
numpy/core/src/common/get_attr_string.h
numpy/core/src/multiarray/_multiarray_module_test.c [new file with mode: 0644]
numpy/core/src/multiarray/_multiarray_tests.c.src
numpy/core/src/multiarray/arrayfunction_override.c [new file with mode: 0644]
numpy/core/src/multiarray/arrayfunction_override.h [new file with mode: 0644]
numpy/core/src/multiarray/arraytypes.c.src
numpy/core/src/multiarray/buffer.c
numpy/core/src/multiarray/common.c
numpy/core/src/multiarray/compiled_base.c
numpy/core/src/multiarray/ctors.c
numpy/core/src/multiarray/datetime.c
numpy/core/src/multiarray/descriptor.c
numpy/core/src/multiarray/dtype_transfer.c
numpy/core/src/multiarray/methods.c
numpy/core/src/multiarray/multiarraymodule.c
numpy/core/src/multiarray/multiarraymodule.h
numpy/core/src/multiarray/nditer_constr.c
numpy/core/src/multiarray/nditer_pywrap.c
numpy/core/src/multiarray/number.c
numpy/core/src/multiarray/refcount.c
numpy/core/src/multiarray/scalartypes.c.src
numpy/core/src/multiarray/usertypes.c
numpy/core/src/npymath/halffloat.c
numpy/core/src/umath/_struct_ufunc_tests.c.src
numpy/core/src/umath/_umath_tests.c.src
numpy/core/src/umath/loops.c.src
numpy/core/src/umath/loops.h.src
numpy/core/src/umath/reduction.c
numpy/core/src/umath/ufunc_object.c
numpy/core/src/umath/ufunc_type_resolution.c
numpy/core/src/umath/ufunc_type_resolution.h
numpy/core/src/umath/umathmodule.c
numpy/core/tests/test_arrayprint.py
numpy/core/tests/test_datetime.py
numpy/core/tests/test_dtype.py
numpy/core/tests/test_half.py
numpy/core/tests/test_multiarray.py
numpy/core/tests/test_overrides.py
numpy/core/tests/test_regression.py
numpy/core/tests/test_shape_base.py
numpy/ctypeslib.py
numpy/distutils/fcompiler/__init__.py
numpy/distutils/fcompiler/absoft.py
numpy/distutils/fcompiler/environment.py
numpy/distutils/fcompiler/gnu.py
numpy/distutils/fcompiler/intel.py
numpy/distutils/fcompiler/pg.py
numpy/distutils/fcompiler/sun.py
numpy/distutils/system_info.py
numpy/distutils/tests/test_fcompiler.py
numpy/f2py/__init__.py
numpy/f2py/f2py2e.py
numpy/f2py/tests/test_compile_function.py
numpy/lib/_datasource.py
numpy/lib/arraysetops.py
numpy/lib/function_base.py
numpy/lib/tests/test_arraysetops.py
numpy/lib/twodim_base.py
numpy/random/mtrand/mtrand.pyx
numpy/random/tests/test_random.py
numpy/testing/_private/nosetester.py
numpy/testing/_private/utils.py
numpy/testing/tests/test_utils.py
numpy/tests/test_ctypeslib.py
numpy/tests/test_scripts.py
pavement.py
setup.py
tools/travis-test.sh

index db67cfeb33452f03701c41b4078168a6e7799e8f..e57e1229791b258e12258eea4d5aba0d447cfa02 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -59,6 +59,7 @@ Christoph Gohlke <cgohlke@uci.edu> Christolph Gohlke <cgohlke@uci.edu>
 Daniel B Allan <daniel.b.allan@gmail.com> danielballan <daniel.b.allan@gmail.com>
 Daniel da Silva <mail@danieldasilva.org> Daniel da Silva <daniel@meltingwax.net>
 Daniel da Silva <mail@danieldasilva.org> Daniel da Silva <var.mail.daniel@gmail.com>
+Daniel Hrisca <daniel.hrisca@gmail.com> danielhrisca <daniel.hrisca@gmail.com>
 Daniel J Farrell <danieljfarrel@me.com> danieljfarrell <danieljfarrel@me.com>
 Daniel Müllner <Daniel Müllner muellner@math.stanford.edu> Daniel <muellner@localhost.localdomain>
 Daniel Müllner <Daniel Müllner muellner@math.stanford.edu> dmuellner <Daniel Müllner muellner@math.stanford.edu>
index 7a98bedd66c1720ae9f38628aec9cd4b468d9622..32144de802c5224703ea6cdbd87b32221401e9f2 100644 (file)
@@ -160,7 +160,7 @@ jobs:
       cp $tmpdir\$(BITS)\lib\libopenblas_v0.3.3-186-g701ea883-gcc_7_1_0.a $target
     displayName: 'Download / Install OpenBLAS'
   - powershell: |
-      choco install -y mingw --forcex86 --force
+      choco install -y mingw --forcex86 --force --version=5.3.0
     displayName: 'Install 32-bit mingw for 32-bit builds'
     condition: eq(variables['BITS'], 32)
   - script: python -m pip install cython nose pytz pytest
index 155d44fa968a73164939105a6fde0335a83522c7..58572d07d05066112e56bc41cca6af956e2b65fe 100644 (file)
@@ -24,10 +24,10 @@ def mock_broadcast_to(array, shape, subok=False):
 
 
 def _concatenate_dispatcher(arrays, axis=None, out=None):
-    for array in arrays:
-        yield array
     if out is not None:
-        yield out
+        arrays = list(arrays)
+        arrays.append(out)
+    return arrays
 
 
 @array_function_dispatch(_concatenate_dispatcher)
diff --git a/doc/changelog/1.16.1-changelog.rst b/doc/changelog/1.16.1-changelog.rst
new file mode 100644 (file)
index 0000000..aaa803c
--- /dev/null
@@ -0,0 +1,62 @@
+
+Contributors
+============
+
+A total of 16 people contributed to this release.  People with a "+" by their
+names contributed a patch for the first time.
+
+* Antoine Pitrou
+* Arcesio Castaneda Medina +
+* Charles Harris
+* Chris Markiewicz +
+* Christoph Gohlke
+* Christopher J. Markiewicz +
+* Daniel Hrisca +
+* EelcoPeacs +
+* Eric Wieser
+* Kevin Sheppard
+* Matti Picus
+* OBATA Akio +
+* Ralf Gommers
+* Sebastian Berg
+* Stephan Hoyer
+* Tyler Reddy
+
+Pull requests merged
+====================
+
+A total of 32 pull requests were merged for this release.
+
+* `#12754 <https://github.com/numpy/numpy/pull/12754>`__: BUG: Check paths are unicode, bytes or path-like
+* `#12767 <https://github.com/numpy/numpy/pull/12767>`__: ENH: add mm->q floordiv
+* `#12768 <https://github.com/numpy/numpy/pull/12768>`__: ENH: port np.core.overrides to C for speed
+* `#12769 <https://github.com/numpy/numpy/pull/12769>`__: ENH: Add np.ctypeslib.as_ctypes_type(dtype), improve `np.ctypeslib.as_ctypes`
+* `#12771 <https://github.com/numpy/numpy/pull/12771>`__: BUG: Ensure probabilities are not NaN in choice
+* `#12772 <https://github.com/numpy/numpy/pull/12772>`__: MAINT: add warning to numpy.distutils for LDFLAGS append behavior.
+* `#12773 <https://github.com/numpy/numpy/pull/12773>`__: ENH: add "max difference" messages to np.testing.assert_array_equal...
+* `#12774 <https://github.com/numpy/numpy/pull/12774>`__: BUG: Fix incorrect/missing reference cleanups found using valgrind
+* `#12776 <https://github.com/numpy/numpy/pull/12776>`__: BUG,TST: Remove the misguided `run_command` that wraps subprocess
+* `#12777 <https://github.com/numpy/numpy/pull/12777>`__: DOC, TST: Clean up matplotlib imports
+* `#12781 <https://github.com/numpy/numpy/pull/12781>`__: BUG: Fix reference counting for subarrays containing objects
+* `#12782 <https://github.com/numpy/numpy/pull/12782>`__: BUG: Ensure failing memory allocations are reported
+* `#12784 <https://github.com/numpy/numpy/pull/12784>`__: BUG: Fix leak of void scalar buffer info
+* `#12788 <https://github.com/numpy/numpy/pull/12788>`__: MAINT: Change the order of checking for local file.
+* `#12808 <https://github.com/numpy/numpy/pull/12808>`__: BUG: loosen kwargs requirements in ediff1d
+* `#12809 <https://github.com/numpy/numpy/pull/12809>`__: DOC: clarify the extend of __array_function__ support in NumPy...
+* `#12810 <https://github.com/numpy/numpy/pull/12810>`__: BUG: Check that dtype or formats arguments are not None.
+* `#12811 <https://github.com/numpy/numpy/pull/12811>`__: BUG: fix f2py problem to build wrappers using PGI's Fortran
+* `#12812 <https://github.com/numpy/numpy/pull/12812>`__: BUG: double decref of dtype in failure codepath. Test and fix
+* `#12813 <https://github.com/numpy/numpy/pull/12813>`__: BUG, DOC: test, fix that f2py.compile accepts str and bytes,...
+* `#12816 <https://github.com/numpy/numpy/pull/12816>`__: BUG: resolve writeback in arr_insert failure paths
+* `#12820 <https://github.com/numpy/numpy/pull/12820>`__: ENH: Add mm->qm divmod
+* `#12843 <https://github.com/numpy/numpy/pull/12843>`__: BUG: fix to check before apply `shlex.split`
+* `#12844 <https://github.com/numpy/numpy/pull/12844>`__: BUG: Fix SystemError when pickling datetime64 array with pickle5
+* `#12845 <https://github.com/numpy/numpy/pull/12845>`__: BUG: Fix rounding of denormals in double and float to half casts.
+* `#12868 <https://github.com/numpy/numpy/pull/12868>`__: TEST: pin mingw version
+* `#12869 <https://github.com/numpy/numpy/pull/12869>`__: BUG: ndarrays pickled by 1.16 cannot be loaded by 1.15.4 and...
+* `#12870 <https://github.com/numpy/numpy/pull/12870>`__: BUG: do not Py_DECREF NULL pointer
+* `#12890 <https://github.com/numpy/numpy/pull/12890>`__: ENH: add _dtype_ctype to namespace for freeze analysis
+* `#12891 <https://github.com/numpy/numpy/pull/12891>`__: BUG: fail if old multiarray module detected
+* `#12898 <https://github.com/numpy/numpy/pull/12898>`__: BUG: Do not double-quote arguments passed on to the linker
+* `#12899 <https://github.com/numpy/numpy/pull/12899>`__: BUG: Do not insert extra double quote into preprocessor macros
+* `#12902 <https://github.com/numpy/numpy/pull/12902>`__: DOC: Prepare for 1.16.1 release.
index 988c9086a631d5bda3892a6ef1925be0b1a71244..ffe780c795fadcbeb79e7cf65135828871f27488 100644 (file)
@@ -340,7 +340,7 @@ Changes within NumPy functions
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Given a function defining the above behavior, for now call it
-``array_function_implementation_or_override``, we now need to call that
+``implement_array_function``, we now need to call that
 function from within every relevant NumPy function. This is a pervasive change,
 but of fairly simple and innocuous code that should complete quickly and
 without effect if no arguments implement the ``__array_function__``
@@ -358,7 +358,7 @@ functions:
             @functools.wraps(implementation)
             def public_api(*args, **kwargs):
                 relevant_args = dispatcher(*args, **kwargs)
-                return array_function_implementation_or_override(
+                return implement_array_function(
                     implementation, public_api, relevant_args, args, kwargs)
             return public_api
         return decorator
@@ -395,11 +395,11 @@ It's particularly worth calling out the decorator's use of
 
 In a few cases, it would not make sense to use the ``array_function_dispatch``
 decorator directly, but override implementation in terms of
-``array_function_implementation_or_override`` should still be straightforward.
+``implement_array_function`` should still be straightforward.
 
 - Functions written entirely in C (e.g., ``np.concatenate``) can't use
   decorators, but they could still use a C equivalent of
-  ``array_function_implementation_or_override``. If performance is not a
+  ``implement_array_function``. If performance is not a
   concern, they could also be easily wrapped with a small Python wrapper.
 - ``np.einsum`` does complicated argument parsing to handle two different
   function signatures. It would probably be best to avoid the overhead of
@@ -475,7 +475,7 @@ the difference in speed between the ``ndarray.sum()`` method (1.6 us) and
 ``numpy.sum()`` function (2.6 us).
 
 Fortunately, we expect significantly less overhead with a C implementation of
-``array_function_implementation_or_override``, which is where the bulk of the
+``implement_array_function``, which is where the bulk of the
 runtime is. This would leave the ``array_function_dispatch`` decorator and
 dispatcher function on their own adding about 0.5 microseconds of overhead,
 for perhaps ~1 microsecond of overhead in the typical case.
@@ -503,7 +503,7 @@ already wrap a limited subset of SciPy functionality (e.g.,
 
 If we want to do this, we should expose at least the decorator
 ``array_function_dispatch()`` and possibly also the lower level
-``array_function_implementation_or_override()`` as part of NumPy's public API.
+``implement_array_function()`` as part of NumPy's public API.
 
 Non-goals
 ---------
@@ -807,7 +807,7 @@ public API.
 
 ``types`` is included because we can compute it almost for free as part of
 collecting ``__array_function__`` implementations to call in
-``array_function_implementation_or_override``. We also think it will be used
+``implement_array_function``. We also think it will be used
 by many ``__array_function__`` methods, which otherwise would need to extract
 this information themselves. It would be equivalently easy to provide single
 instances of each type, but providing only types seemed cleaner.
@@ -823,7 +823,7 @@ There are two other arguments that we think *might* be important to pass to
 - Access to the non-dispatched implementation (i.e., before wrapping with
   ``array_function_dispatch``) in ``ndarray.__array_function__`` would allow
   us to drop special case logic for that method from
-  ``array_function_implementation_or_override``.
+  ``implement_array_function``.
 - Access to the ``dispatcher`` function passed into
   ``array_function_dispatch()`` would allow ``__array_function__``
   implementations to determine the list of "array-like" arguments in a generic
index de636933f4c8e041483da79130818a096ff72176..341d5f71566dffd077f3e8c061777b153c994b28 100644 (file)
@@ -20,7 +20,7 @@ easier going forward.
 Highlights
 ==========
 
-* Experimental support for overriding numpy functions,
+* Experimental (opt-in only) support for overriding numpy functions,
   see ``__array_function__`` below.
 
 * The ``matmul`` function is now a ufunc. This provides better
@@ -518,14 +518,16 @@ accessing invalid memory locations.
 
 NumPy functions now support overrides with ``__array_function__``
 -----------------------------------------------------------------
-It is now possible to override the implementation of almost all NumPy functions
-on non-NumPy arrays by defining a ``__array_function__`` method, as described
-in `NEP 18`_. The sole exception are functions for explicitly casting to NumPy
-arrays such as ``np.array``. As noted in the NEP, this feature remains
-experimental and the details of how to implement such overrides may change in
-the future.
+NumPy has a new experimental mechanism for overriding the implementation of
+almost all NumPy functions on non-NumPy arrays by defining an
+``__array_function__`` method, as described in `NEP 18`_.
+
+This feature is not yet been enabled by default, but has been released to
+facilitate experimentation by potential users. See the NEP for details on
+setting the appropriate environment variable. We expect the NumPy 1.17 release
+will enable overrides by default, which will also be more performant due to a
+new implementation written in C.
 
-.. _`NEP 15` : http://www.numpy.org/neps/nep-0015-merge-multiarray-umath.html
 .. _`NEP 18` : http://www.numpy.org/neps/nep-0018-array-function-protocol.html
 
 Arrays based off readonly buffers cannot be set ``writeable``
diff --git a/doc/release/1.16.1-notes.rst b/doc/release/1.16.1-notes.rst
new file mode 100644 (file)
index 0000000..2483b18
--- /dev/null
@@ -0,0 +1,107 @@
+==========================
+NumPy 1.16.1 Release Notes
+==========================
+
+The NumPy 1.16.1 release fixes bugs reported against the 1.16.0 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.4+,  which should fix the known threading issues
+found in previous OpenBLAS versions.
+
+Downstream developers building this release should use Cython >= 0.29.2 and, if
+using OpenBLAS, OpenBLAS > v0.3.4.
+
+If you are installing using pip, you may encounter a problem with older
+installed versions of NumPy that pip did not delete becoming mixed with the
+current version, resulting in an ``ImportError``. That problem is particularly
+common on Debian derived distributions due to a modified pip.  The fix is to
+make sure all previous NumPy versions installed by pip have been removed. See
+`#12736 <https://github.com/numpy/numpy/issues/12736>`__ for discussion of the
+issue. Note that previously this problem resulted in an ``AttributeError``.
+
+
+Contributors
+============
+
+A total of 16 people contributed to this release.  People with a "+" by their
+names contributed a patch for the first time.
+
+* Antoine Pitrou
+* Arcesio Castaneda Medina +
+* Charles Harris
+* Chris Markiewicz +
+* Christoph Gohlke
+* Christopher J. Markiewicz +
+* Daniel Hrisca +
+* EelcoPeacs +
+* Eric Wieser
+* Kevin Sheppard
+* Matti Picus
+* OBATA Akio +
+* Ralf Gommers
+* Sebastian Berg
+* Stephan Hoyer
+* Tyler Reddy
+
+
+Enhancements
+============
+
+* `#12767 <https://github.com/numpy/numpy/pull/12767>`__: ENH: add mm->q floordiv
+* `#12768 <https://github.com/numpy/numpy/pull/12768>`__: ENH: port np.core.overrides to C for speed
+* `#12769 <https://github.com/numpy/numpy/pull/12769>`__: ENH: Add np.ctypeslib.as_ctypes_type(dtype), improve `np.ctypeslib.as_ctypes`
+* `#12773 <https://github.com/numpy/numpy/pull/12773>`__: ENH: add "max difference" messages to np.testing.assert_array_equal...
+* `#12820 <https://github.com/numpy/numpy/pull/12820>`__: ENH: Add mm->qm divmod
+* `#12890 <https://github.com/numpy/numpy/pull/12890>`__: ENH: add _dtype_ctype to namespace for freeze analysis
+
+
+Compatibility notes
+===================
+
+* The changed error message emited by array comparison testing functions may
+  affect doctests. See below for detail.
+
+* Casting from double and single denormals to float16 has been corrected.  In
+  some rare cases, this may result in results being rounded up instead of down,
+  changing the last bit (ULP) of the result.
+
+
+New Features
+============
+
+divmod operation is now supported for two ``timedelta64`` operands
+------------------------------------------------------------------
+The divmod operator now handles two ``np.timedelta64`` operands, with
+type signature ``mm->qm``.
+
+
+Improvements
+============
+
+Further improvements to ``ctypes`` support in ``np.ctypeslib``
+--------------------------------------------------------------
+A new ``np.ctypeslib.as_ctypes_type`` function has been added, which can be
+used to converts a `dtype` into a best-guess `ctypes` type. Thanks to this
+new function, ``np.ctypeslib.as_ctypes`` now supports a much wider range of
+array types, including structures, booleans, and integers of non-native
+endianness.
+
+Array comparison assertions include maximum differences
+-------------------------------------------------------
+Error messages from array comparison tests such as
+`np.testing.assert_allclose` now include "max absolute difference" and
+"max relative difference," in addition to the previous "mismatch" percentage.
+This information makes it easier to update absolute and relative error
+tolerances.
+
+
+Changes
+=======
+
+``timedelta64 % 0`` behavior adjusted to return ``NaT``
+-------------------------------------------------------
+The modulus operation with two ``np.timedelta64`` operands now returns
+``NaT`` in the case of division by zero, rather than returning zero
+
+
+
index b9a7e1b0d24f24e864071fdd5dd9cde7ed7bc843..be6cacd22634942d62d1b9dd2585c284efeaf5e0 100644 (file)
@@ -8,7 +8,7 @@ Post-processing...
 Building modules...
         Building module "scalar"...
         Wrote C/API module "scalar" to file "./scalarmodule.c"
->>> printr(r)
+>>> print(r)
 {'scalar': {'h': ['/home/users/pearu/src_cvs/f2py/src/fortranobject.h'],
         'csrc': ['./scalarmodule.c', 
                   '/home/users/pearu/src_cvs/f2py/src/fortranobject.c']}}
index 0f5068e0ed72adae078bdcca76cf993b33cf6936..5043ec43095889bbf2583ddd5131c497be1004fe 100644 (file)
@@ -214,32 +214,7 @@ Python module ``numpy.f2py``
   The current Python interface to the ``f2py`` module is not mature and
   may change in the future.
 
-The following functions are provided by the ``numpy.f2py`` module:
 
-``run_main(<list>)``
-  Equivalent to running::
+.. automodule:: numpy.f2py
+    :members:
 
-    f2py <args>
-
-  where ``<args>=string.join(<list>,' ')``, but in Python.  Unless
-  ``-h`` is used, this function returns a dictionary containing
-  information on generated modules and their dependencies on source
-  files.  For example, the command ``f2py -m scalar scalar.f`` can be
-  executed from Python as follows
-
-  .. include:: run_main_session.dat
-     :literal:
-
-  You cannot build extension modules with this function, that is,
-  using ``-c`` is not allowed. Use ``compile`` command instead, see
-  below.
-
-``compile(source, modulename='untitled', extra_args='', verbose=1, source_fn=None)``
-  Build extension module from Fortran 77 source string ``source``.
-  Return 0 if successful.
-  Note that this function actually calls ``f2py -c ..`` from shell to
-  ensure safety of the current Python process.
-  For example,
-
-  .. include:: compile_session.dat
-    :literal:
index 1cf2155498bbb9b21383d20a7a0fbb9377e4a011..ea582e2aaa5244d74cf3efc90382d0383f8a6764 100644 (file)
@@ -2,6 +2,7 @@
 Release Notes
 *************
 
+.. include:: ../release/1.16.1-notes.rst
 .. include:: ../release/1.16.0-notes.rst
 .. include:: ../release/1.15.4-notes.rst
 .. include:: ../release/1.15.3-notes.rst
index 8e06ead780bea7b84befd3f4c8265fc03c94d3f9..067292776e20b8f0d271fc62590307e0748a6de4 100644 (file)
@@ -219,7 +219,7 @@ else:
         path representation is not str or bytes, TypeError is raised. If the
         provided path is not str, bytes, or os.PathLike, TypeError is raised.
         """
-        if isinstance(path, (str, bytes)):
+        if isinstance(path, (unicode, bytes)):
             return path
 
         # Work from the object's type to match method resolution of other magic
@@ -235,7 +235,7 @@ else:
             else:
                 raise TypeError("expected str, bytes or os.PathLike object, "
                                 "not " + path_type.__name__)
-        if isinstance(path_repr, (str, bytes)):
+        if isinstance(path_repr, (unicode, bytes)):
             return path_repr
         else:
             raise TypeError("expected {}.__fspath__() to return str or bytes, "
index 1543aafaf540cf719176858c17a5cfd9a31a4c26..9bb316a4de7ba6c6f2c05657297f53f6be248c7d 100644 (file)
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
 
 from os.path import join
 
-from numpy.compat import isfileobj
+from numpy.compat import isfileobj, os_fspath
 from numpy.testing import assert_
 from numpy.testing import tempdir
 
@@ -19,3 +19,8 @@ def test_isfileobj():
 
         with open(filename, 'rb') as f:
             assert_(isfileobj(f))
+
+
+def test_os_fspath_strings():
+    for string_path in (b'/a/b/c.d', u'/a/b/c.d'):
+        assert_(os_fspath(string_path) == string_path)
index 80ce84f00aeee7ea6337b55b3bc84ebd1729ad68..6b76e63b7aa131d457e7b9107700448c52b46add 100644 (file)
@@ -53,7 +53,19 @@ del env_added
 del os
 
 from . import umath
-from . import _internal  # for freeze programs
+
+# Check that multiarray,umath are pure python modules wrapping
+# _multiarray_umath and not either of the old c-extension modules
+if not (hasattr(multiarray, '_multiarray_umath') and
+        hasattr(umath, '_multiarray_umath')):
+    import sys
+    path = sys.modules['numpy'].__path__
+    msg = ("Something is wrong with the numpy installation. "
+        "While importing we detected an older version of "
+        "numpy in {}. One method of fixing this is to repeatedly uninstall "
+        "numpy until none is found, then reinstall this version.")
+    raise ImportError(msg.format(path))
+
 from . import numerictypes as nt
 multiarray.set_typeDict(nt.sctypeDict)
 from . import numeric
@@ -83,6 +95,11 @@ from .numeric import absolute as abs
 # do this after everything else, to minimize the chance of this misleadingly
 # appearing in an import-time traceback
 from . import _add_newdocs
+# add these for module-freeze analysis (like PyInstaller)
+from . import _dtype_ctypes
+from . import _internal
+from . import _dtype
+from . import _methods
 
 __all__ = ['char', 'rec', 'memmap']
 __all__ += numeric.__all__
index 27a3deeda107ee93d3c233be05eb80427f9a3336..1d3bb558415c505a948f4753e61844a59f2d861e 100644 (file)
@@ -830,6 +830,13 @@ def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
             .format(ufunc, method, args_string, types_string))
 
 
+def array_function_errmsg_formatter(public_api, types):
+    """ Format the error message for when __array_ufunc__ gives up. """
+    func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
+    return ("no implementation found for '{}' on types that implement "
+            '__array_function__: {}'.format(func_name, list(types)))
+
+
 def _ufunc_doc_signature_formatter(ufunc):
     """
     Builds a signature string which resembles PEP 457
index baeab6383a0b39a4c91dd0f76695aa5a1cb2e896..33f6d01a89c39dabe491a4ea9d9b12f3e1560be2 100644 (file)
@@ -154,15 +154,3 @@ def _ptp(a, axis=None, out=None, keepdims=False):
         umr_minimum(a, axis, None, None, keepdims),
         out
     )
-
-_NDARRAY_ARRAY_FUNCTION = mu.ndarray.__array_function__
-
-def _array_function(self, func, types, args, kwargs):
-    # TODO: rewrite this in C
-    # Cannot handle items that have __array_function__ other than our own.
-    for t in types:
-        if not issubclass(t, mu.ndarray) and hasattr(t, '__array_function__'):
-            return NotImplemented
-
-    # The regular implementation can handle this, so we call it directly.
-    return func.__wrapped__(*args, **kwargs)
index 1d2cd25c877a0e00345bffc35a21bdcaf8d1d929..4aca2373c6653339d9d75d783044beeeecebbe6c 100644 (file)
@@ -19,6 +19,7 @@ __docformat__ = 'restructuredtext'
 
 # The files under src/ that are scanned for API functions
 API_FILES = [join('multiarray', 'alloc.c'),
+             join('multiarray', 'arrayfunction_override.c'),
              join('multiarray', 'array_assign_array.c'),
              join('multiarray', 'array_assign_scalar.c'),
              join('multiarray', 'arrayobject.c'),
index f5ee02c4220aee888b32a83bf28fa558c077d1aa..0fac9b05eeffb5c0036be1260a226302e0f030f7 100644 (file)
@@ -315,7 +315,7 @@ defdict = {
           TD(intfltcmplx),
           [TypeDescription('m', FullTypeDescr, 'mq', 'm'),
            TypeDescription('m', FullTypeDescr, 'md', 'm'),
-           #TypeDescription('m', FullTypeDescr, 'mm', 'd'),
+           TypeDescription('m', FullTypeDescr, 'mm', 'q'),
           ],
           TD(O, f='PyNumber_FloorDivide'),
           ),
@@ -802,8 +802,9 @@ defdict = {
 'divmod':
     Ufunc(2, 2, None,
           docstrings.get('numpy.core.umath.divmod'),
-          None,
+          'PyUFunc_DivmodTypeResolver',
           TD(intflt),
+          [TypeDescription('m', FullTypeDescr, 'mm', 'qm')],
           # TD(O, f='PyNumber_Divmod'),  # gh-9730
           ),
 'hypot':
index df0ed2df4658a349a2bd378b578525215311516b..7908969220dacc01da99a5a8d394b71fba41cd69 100644 (file)
@@ -40,6 +40,10 @@ __all__ = [
     'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
     'where', 'zeros']
 
+# For backward compatibility, make sure pickle imports these functions from here
+_reconstruct.__module__ = 'numpy.core.multiarray'
+scalar.__module__ = 'numpy.core.multiarray'
+
 
 arange.__module__ = 'numpy'
 array.__module__ = 'numpy'
@@ -211,9 +215,11 @@ def concatenate(arrays, axis=None, out=None):
            fill_value=999999)
 
     """
-    for array in arrays:
-        yield array
-    yield out
+    if out is not None:
+        # optimize for the typical case where only arrays is provided
+        arrays = list(arrays)
+        arrays.append(out)
+    return arrays
 
 
 @array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
index 0979858a1578f9c95a71f36e93a87f9fea3277d1..c55174ecda6638b05655a3852af26123062c74c9 100644 (file)
@@ -1,73 +1,23 @@
-"""Preliminary implementation of NEP-18
-
-TODO: rewrite this in C for performance.
-"""
+"""Implementation of __array_function__ overrides from NEP-18."""
 import collections
 import functools
 import os
 
-from numpy.core._multiarray_umath import add_docstring, ndarray
+from numpy.core._multiarray_umath import (
+    add_docstring, implement_array_function, _get_implementing_args)
 from numpy.compat._inspect import getargspec
 
 
-_NDARRAY_ARRAY_FUNCTION = ndarray.__array_function__
-_NDARRAY_ONLY = [ndarray]
-
 ENABLE_ARRAY_FUNCTION = bool(
     int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 0)))
 
 
-def get_overloaded_types_and_args(relevant_args):
-    """Returns a list of arguments on which to call __array_function__.
-
-    Parameters
-    ----------
-    relevant_args : iterable of array-like
-        Iterable of array-like arguments to check for __array_function__
-        methods.
-
-    Returns
-    -------
-    overloaded_types : collection of types
-        Types of arguments from relevant_args with __array_function__ methods.
-    overloaded_args : list
-        Arguments from relevant_args on which to call __array_function__
-        methods, in the order in which they should be called.
+add_docstring(
+    implement_array_function,
     """
-    # Runtime is O(num_arguments * num_unique_types)
-    overloaded_types = []
-    overloaded_args = []
-    for arg in relevant_args:
-        arg_type = type(arg)
-        # We only collect arguments if they have a unique type, which ensures
-        # reasonable performance even with a long list of possibly overloaded
-        # arguments.
-        if (arg_type not in overloaded_types and
-                hasattr(arg_type, '__array_function__')):
-
-            # Create lists explicitly for the first type (usually the only one
-            # done) to avoid setting up the iterator for overloaded_args.
-            if overloaded_types:
-                overloaded_types.append(arg_type)
-                # By default, insert argument at the end, but if it is
-                # subclass of another argument, insert it before that argument.
-                # This ensures "subclasses before superclasses".
-                index = len(overloaded_args)
-                for i, old_arg in enumerate(overloaded_args):
-                    if issubclass(arg_type, type(old_arg)):
-                        index = i
-                        break
-                overloaded_args.insert(index, arg)
-            else:
-                overloaded_types = [arg_type]
-                overloaded_args = [arg]
-
-    return overloaded_types, overloaded_args
-
-
-def array_function_implementation_or_override(
-        implementation, public_api, relevant_args, args, kwargs):
-    """Implement a function with checks for __array_function__ overrides.
+    Implement a function with checks for __array_function__ overrides.
+
+    All arguments are required, and can only be passed by position.
 
     Arguments
     ---------
@@ -82,41 +32,37 @@ def array_function_implementation_or_override(
         Iterable of arguments to check for __array_function__ methods.
     args : tuple
         Arbitrary positional arguments originally passed into ``public_api``.
-    kwargs : tuple
+    kwargs : dict
         Arbitrary keyword arguments originally passed into ``public_api``.
 
     Returns
     -------
-    Result from calling `implementation()` or an `__array_function__`
+    Result from calling ``implementation()`` or an ``__array_function__``
     method, as appropriate.
 
     Raises
     ------
     TypeError : if no implementation is found.
+    """)
+
+
+# exposed for testing purposes; used internally by implement_array_function
+add_docstring(
+    _get_implementing_args,
     """
-    # Check for __array_function__ methods.
-    types, overloaded_args = get_overloaded_types_and_args(relevant_args)
-    # Short-cut for common cases: no overload or only ndarray overload
-    # (directly or with subclasses that do not override __array_function__).
-    if (not overloaded_args or types == _NDARRAY_ONLY or
-            all(type(arg).__array_function__ is _NDARRAY_ARRAY_FUNCTION
-                for arg in overloaded_args)):
-        return implementation(*args, **kwargs)
-
-    # Call overrides
-    for overloaded_arg in overloaded_args:
-        # Use `public_api` instead of `implemenation` so __array_function__
-        # implementations can do equality/identity comparisons.
-        result = overloaded_arg.__array_function__(
-            public_api, types, args, kwargs)
-
-        if result is not NotImplemented:
-            return result
-
-    func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
-    raise TypeError("no implementation found for '{}' on types that implement "
-                    '__array_function__: {}'
-                    .format(func_name, list(map(type, overloaded_args))))
+    Collect arguments on which to call __array_function__.
+
+    Parameters
+    ----------
+    relevant_args : iterable of array-like
+        Iterable of possibly array-like arguments to check for
+        __array_function__ methods.
+
+    Returns
+    -------
+    Sequence of arguments with __array_function__ methods, in the order in
+    which they should be called.
+    """)
 
 
 ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
@@ -215,7 +161,7 @@ def array_function_dispatch(dispatcher, module=None, verify=True,
         @functools.wraps(implementation)
         def public_api(*args, **kwargs):
             relevant_args = dispatcher(*args, **kwargs)
-            return array_function_implementation_or_override(
+            return implement_array_function(
                 implementation, public_api, relevant_args, args, kwargs)
 
         if module is not None:
index 86a43306a698197731556bc664ca37d3cf27f9aa..5898bb1631f6e792ec5e4640ae60a40ee4bea273 100644 (file)
@@ -711,7 +711,7 @@ def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
     a string"""
 
     if dtype is None and formats is None:
-        raise ValueError("Must have dtype= or formats=")
+        raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
 
     if dtype is not None:
         descr = sb.dtype(dtype)
@@ -758,6 +758,9 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
     >>> r.shape
     (10,)
     """
+    
+    if dtype is None and formats is None:
+        raise TypeError("fromfile() needs a 'dtype' or 'formats' argument")
 
     if (shape is None or shape == 0):
         shape = (-1,)
index 467b590ac0afe4a47e9ae4a5d627ae089f518abb..00a9ffe69a2750f6224dd7bf9469668209ff1b8d 100644 (file)
@@ -775,6 +775,7 @@ def configuration(parent_package='',top_path=None):
     multiarray_deps = [
             join('src', 'multiarray', 'arrayobject.h'),
             join('src', 'multiarray', 'arraytypes.h'),
+            join('src', 'multiarray', 'arrayfunction_override.h'),
             join('src', 'multiarray', 'buffer.h'),
             join('src', 'multiarray', 'calculation.h'),
             join('src', 'multiarray', 'common.h'),
@@ -827,6 +828,7 @@ def configuration(parent_package='',top_path=None):
             join('src', 'multiarray', 'arraytypes.c.src'),
             join('src', 'multiarray', 'array_assign_scalar.c'),
             join('src', 'multiarray', 'array_assign_array.c'),
+            join('src', 'multiarray', 'arrayfunction_override.c'),
             join('src', 'multiarray', 'buffer.c'),
             join('src', 'multiarray', 'calculation.c'),
             join('src', 'multiarray', 'compiled_base.c'),
@@ -960,6 +962,14 @@ def configuration(parent_package='',top_path=None):
     config.add_extension('_operand_flag_tests',
                     sources=[join('src', 'umath', '_operand_flag_tests.c.src')])
 
+    #######################################################################
+    #                        _multiarray_module_test module               #
+    #######################################################################
+
+    config.add_extension('_multiarray_module_test',
+                    sources=[join('src', 'multiarray',
+                                         '_multiarray_module_test.c')])
+
     config.add_data_dir('tests')
     config.add_data_dir('tests/data')
 
index a529d2ad74c388b989c683e08f5e41d3ad5ff2e1..d20afd8be9dc7d2ac13e841a01642c495959b38b 100644 (file)
@@ -342,10 +342,11 @@ def hstack(tup):
 
 def _stack_dispatcher(arrays, axis=None, out=None):
     arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
-    for a in arrays:
-        yield a
     if out is not None:
-        yield out
+        # optimize for the typical case where only arrays is provided
+        arrays = list(arrays)
+        arrays.append(out)
+    return arrays
 
 
 @array_function_dispatch(_stack_dispatcher)
index bec87c5ed1c972122fc3c78115eb9d04784dfd55..d458d955004ae948725d3b78c7e5c2b8652e7eb8 100644 (file)
@@ -103,7 +103,6 @@ PyArray_LookupSpecial(PyObject *obj, char *name)
     if (_is_basic_python_type(tp)) {
         return NULL;
     }
-
     return maybe_get_attr((PyObject *)tp, name);
 }
 
diff --git a/numpy/core/src/multiarray/_multiarray_module_test.c b/numpy/core/src/multiarray/_multiarray_module_test.c
new file mode 100644 (file)
index 0000000..8dc0172
--- /dev/null
@@ -0,0 +1,129 @@
+#include "Python.h"
+
+/*
+ * This is a dummy module. It will be used to ruin the import of multiarray
+ * during testing. It exports two entry points, one to make the build happy,
+ * and a multiarray one for the actual test. The content of the module is
+ * irrelevant to the test.
+ *
+ * The code is from
+ * https://docs.python.org/3/howto/cporting.html
+ * or
+ * https://github.com/python/cpython/blob/v3.7.0/Doc/howto/cporting.rst
+ */
+
+#if defined _WIN32 || defined __CYGWIN__ || defined __MINGW32__
+  #if defined __GNUC__ || defined __clang__
+    #define DLL_PUBLIC __attribute__ ((dllexport))
+  #else
+    #define DLL_PUBLIC __declspec(dllexport)
+  #endif
+#elif defined __GNUC__  || defined __clang__
+  #define DLL_PUBLIC __attribute__ ((visibility ("default")))
+#else
+    /* Enhancement: error now instead ? */
+    #define DLL_PUBLIC
+#endif
+
+struct module_state {
+    PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+static struct module_state _state;
+#endif
+
+static PyObject *
+error_out(PyObject *m) {
+    struct module_state *st = GETSTATE(m);
+    PyErr_SetString(st->error, "something bad happened");
+    return NULL;
+}
+
+static PyMethodDef multiarray_methods[] = {
+    {"error_out", (PyCFunction)error_out, METH_NOARGS, NULL},
+    {NULL, NULL}
+};
+
+#if PY_MAJOR_VERSION >= 3
+
+static int multiarray_traverse(PyObject *m, visitproc visit, void *arg) {
+    Py_VISIT(GETSTATE(m)->error);
+    return 0;
+}
+
+static int multiarray_clear(PyObject *m) {
+    Py_CLEAR(GETSTATE(m)->error);
+    return 0;
+}
+
+
+static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "multiarray",
+        NULL,
+        sizeof(struct module_state),
+        multiarray_methods,
+        NULL,
+        multiarray_traverse,
+        multiarray_clear,
+        NULL
+};
+
+#define INITERROR return NULL
+
+DLL_PUBLIC PyObject *
+PyInit_multiarray(void)
+
+#else
+#define INITERROR return
+
+void
+DLL_PUBLIC initmultiarray(void)
+#endif
+{
+#if PY_MAJOR_VERSION >= 3
+    PyObject *module = PyModule_Create(&moduledef);
+#else
+    PyObject *module = Py_InitModule("multiarray", multiarray_methods);
+#endif
+    struct module_state *st;
+    if (module == NULL)
+        INITERROR;
+    st = GETSTATE(module);
+
+    st->error = PyErr_NewException("multiarray.Error", NULL, NULL);
+    if (st->error == NULL) {
+        Py_DECREF(module);
+        INITERROR;
+    }
+
+#if PY_MAJOR_VERSION >= 3
+    return module;
+#endif
+}
+
+/*
+ * Define a dummy entry point to make MSVC happy
+ * Python's build system will export this function automatically
+ */
+#if PY_MAJOR_VERSION >= 3
+
+PyObject *
+PyInit__multiarray_module_test(void)
+{
+    return PyInit_multiarray();
+}
+
+#else
+
+void
+init_multiarray_module_test(void)
+{
+    initmultiarray();
+}
+
+#endif                                                    
index 2a827557215a0960a2fe8a3c329857ffd609d22a..c26bd16ac992f803955a5f74769982c71368d54a 100644 (file)
@@ -1871,11 +1871,14 @@ printf_float_g(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
 static PyObject *
 getset_numericops(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
 {
-    PyObject * ops = PyArray_GetNumericOps();
+    PyObject *ret;
+    PyObject *ops = PyArray_GetNumericOps();
     if (ops == NULL) {
         return NULL;
     }
-    return PyLong_FromLong(PyArray_SetNumericOps(ops));
+    ret = PyLong_FromLong(PyArray_SetNumericOps(ops));
+    Py_DECREF(ops);
+    return ret;
 }
 
 static PyMethodDef Multiarray_TestsMethods[] = {
diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c
new file mode 100644 (file)
index 0000000..e62b32a
--- /dev/null
@@ -0,0 +1,376 @@
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _MULTIARRAYMODULE
+
+#include "npy_pycompat.h"
+#include "get_attr_string.h"
+#include "npy_import.h"
+#include "multiarraymodule.h"
+
+
+/* Return the ndarray.__array_function__ method. */
+static PyObject *
+get_ndarray_array_function(void)
+{
+    PyObject* method = PyObject_GetAttrString((PyObject *)&PyArray_Type,
+                                              "__array_function__");
+    assert(method != NULL);
+    return method;
+}
+
+
+/*
+ * Get an object's __array_function__ method in the fastest way possible.
+ * Never raises an exception. Returns NULL if the method doesn't exist.
+ */
+static PyObject *
+get_array_function(PyObject *obj)
+{
+    static PyObject *ndarray_array_function = NULL;
+
+    if (ndarray_array_function == NULL) {
+        ndarray_array_function = get_ndarray_array_function();
+    }
+
+    /* Fast return for ndarray */
+    if (PyArray_CheckExact(obj)) {
+        Py_INCREF(ndarray_array_function);
+        return ndarray_array_function;
+    }
+
+    return PyArray_LookupSpecial(obj, "__array_function__");
+}
+
+
+/*
+ * Like list.insert(), but for C arrays of PyObject*. Skips error checking.
+ */
+static void
+pyobject_array_insert(PyObject **array, int length, int index, PyObject *item)
+{
+    int j;
+
+    for (j = length; j > index; j--) {
+        array[j] = array[j - 1];
+    }
+    array[index] = item;
+}
+
+
+/*
+ * Collects arguments with __array_function__ and their corresponding methods
+ * in the order in which they should be tried (i.e., skipping redundant types).
+ * `relevant_args` is expected to have been produced by PySequence_Fast.
+ * Returns the number of arguments, or -1 on failure. 
+ */
+static int
+get_implementing_args_and_methods(PyObject *relevant_args,
+                                  PyObject **implementing_args,
+                                  PyObject **methods)
+{
+    int num_implementing_args = 0;
+    Py_ssize_t i;
+    int j;
+
+    PyObject **items = PySequence_Fast_ITEMS(relevant_args);
+    Py_ssize_t length = PySequence_Fast_GET_SIZE(relevant_args);
+
+    for (i = 0; i < length; i++) {
+        int new_class = 1;
+        PyObject *argument = items[i];
+
+        /* Have we seen this type before? */
+        for (j = 0; j < num_implementing_args; j++) {
+            if (Py_TYPE(argument) == Py_TYPE(implementing_args[j])) {
+                new_class = 0;
+                break;
+            }
+        }
+        if (new_class) {
+            PyObject *method = get_array_function(argument);
+
+            if (method != NULL) {
+                int arg_index;
+
+                if (num_implementing_args >= NPY_MAXARGS) {
+                    PyErr_Format(
+                        PyExc_TypeError,
+                        "maximum number (%d) of distinct argument types " \
+                        "implementing __array_function__ exceeded",
+                        NPY_MAXARGS);
+                    Py_DECREF(method);
+                    goto fail;
+                }
+
+                /* "subclasses before superclasses, otherwise left to right" */
+                arg_index = num_implementing_args;
+                for (j = 0; j < num_implementing_args; j++) {
+                    PyObject *other_type;
+                    other_type = (PyObject *)Py_TYPE(implementing_args[j]);
+                    if (PyObject_IsInstance(argument, other_type)) {
+                        arg_index = j;
+                        break;
+                    }
+                }
+                Py_INCREF(argument);
+                pyobject_array_insert(implementing_args, num_implementing_args,
+                                      arg_index, argument);
+                pyobject_array_insert(methods, num_implementing_args,
+                                      arg_index, method);
+                ++num_implementing_args;
+            }
+        }
+    }
+    return num_implementing_args;
+
+fail:
+    for (j = 0; j < num_implementing_args; j++) {
+        Py_DECREF(implementing_args[j]);
+        Py_DECREF(methods[j]);
+    }
+    return -1;
+}
+
+
+/*
+ * Is this object ndarray.__array_function__?
+ */
+static int
+is_default_array_function(PyObject *obj)
+{
+    static PyObject *ndarray_array_function = NULL;
+
+    if (ndarray_array_function == NULL) {
+        ndarray_array_function = get_ndarray_array_function();
+    }
+    return obj == ndarray_array_function;
+}
+
+
+/*
+ * Core implementation of ndarray.__array_function__. This is exposed
+ * separately so we can avoid the overhead of a Python method call from
+ * within `implement_array_function`.
+ */
+NPY_NO_EXPORT PyObject *
+array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
+                           PyObject *kwargs)
+{
+    Py_ssize_t j;
+    PyObject *implementation, *result;
+
+    PyObject **items = PySequence_Fast_ITEMS(types);
+    Py_ssize_t length = PySequence_Fast_GET_SIZE(types);
+
+    for (j = 0; j < length; j++) {
+        int is_subclass = PyObject_IsSubclass(
+            items[j], (PyObject *)&PyArray_Type);
+        if (is_subclass == -1) {
+            return NULL;
+        }
+        if (!is_subclass) {
+            Py_INCREF(Py_NotImplemented);
+            return Py_NotImplemented;
+        }
+    }
+
+    implementation = PyObject_GetAttr(func, npy_ma_str_wrapped);
+    if (implementation == NULL) {
+        return NULL;
+    }
+    result = PyObject_Call(implementation, args, kwargs);
+    Py_DECREF(implementation);
+    return result;
+}
+
+
+/*
+ * Calls __array_function__ on the provided argument, with a fast-path for
+ * ndarray.
+ */
+static PyObject *
+call_array_function(PyObject* argument, PyObject* method,
+                    PyObject* public_api, PyObject* types,
+                    PyObject* args, PyObject* kwargs)
+{
+    if (is_default_array_function(method)) {
+        return array_function_method_impl(public_api, types, args, kwargs);
+    }
+    else {
+        return PyObject_CallFunctionObjArgs(
+            method, argument, public_api, types, args, kwargs, NULL);
+    }
+}
+
+
+/*
+ * Implements the __array_function__ protocol for a function, as described in
+ * in NEP-18. See numpy.core.overrides for a full docstring.
+ */
+NPY_NO_EXPORT PyObject *
+array_implement_array_function(
+    PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
+{
+    PyObject *implementation, *public_api, *relevant_args, *args, *kwargs;
+
+    PyObject *types = NULL;
+    PyObject *implementing_args[NPY_MAXARGS];
+    PyObject *array_function_methods[NPY_MAXARGS];
+
+    int j, any_overrides;
+    int num_implementing_args = 0;
+    PyObject *result = NULL;
+
+    static PyObject *errmsg_formatter = NULL;
+
+    if (!PyArg_UnpackTuple(
+            positional_args, "implement_array_function", 5, 5,
+            &implementation, &public_api, &relevant_args, &args, &kwargs)) {
+        return NULL;
+    }
+
+    relevant_args = PySequence_Fast(
+        relevant_args,
+        "dispatcher for __array_function__ did not return an iterable");
+    if (relevant_args == NULL) {
+        return NULL;
+    }
+
+    /* Collect __array_function__ implementations */
+    num_implementing_args = get_implementing_args_and_methods(
+        relevant_args, implementing_args, array_function_methods);
+    if (num_implementing_args == -1) {
+        goto cleanup;
+    }
+
+    /*
+     * Handle the typical case of no overrides. This is merely an optimization
+     * if some arguments are ndarray objects, but is also necessary if no
+     * arguments implement __array_function__ at all (e.g., if they are all
+     * built-in types).
+     */
+    any_overrides = 0;
+    for (j = 0; j < num_implementing_args; j++) {
+        if (!is_default_array_function(array_function_methods[j])) {
+            any_overrides = 1;
+            break;
+        }
+    }
+    if (!any_overrides) {
+        result = PyObject_Call(implementation, args, kwargs);
+        goto cleanup;
+    }
+
+    /*
+     * Create a Python object for types.
+     * We use a tuple, because it's the fastest Python collection to create
+     * and has the bonus of being immutable.
+     */
+    types = PyTuple_New(num_implementing_args);
+    if (types == NULL) {
+        goto cleanup;
+    }
+    for (j = 0; j < num_implementing_args; j++) {
+        PyObject *arg_type = (PyObject *)Py_TYPE(implementing_args[j]);
+        Py_INCREF(arg_type);
+        PyTuple_SET_ITEM(types, j, arg_type);
+    }
+
+    /* Call __array_function__ methods */
+    for (j = 0; j < num_implementing_args; j++) {
+        PyObject *argument = implementing_args[j];
+        PyObject *method = array_function_methods[j];
+
+        /*
+         * We use `public_api` instead of `implementation` here so
+         * __array_function__ implementations can do equality/identity
+         * comparisons.
+         */
+        result = call_array_function(
+            argument, method, public_api, types, args, kwargs);
+
+        if (result == Py_NotImplemented) {
+            /* Try the next one */
+            Py_DECREF(result);
+            result = NULL;
+        }
+        else {
+            /* Either a good result, or an exception was raised. */
+            goto cleanup;
+        }
+    }
+
+    /* No acceptable override found, raise TypeError. */
+    npy_cache_import("numpy.core._internal",
+                     "array_function_errmsg_formatter",
+                     &errmsg_formatter);
+    if (errmsg_formatter != NULL) {
+        PyObject *errmsg = PyObject_CallFunctionObjArgs(
+            errmsg_formatter, public_api, types, NULL);
+        if (errmsg != NULL) {
+            PyErr_SetObject(PyExc_TypeError, errmsg);
+            Py_DECREF(errmsg);
+        }
+    }
+
+cleanup:
+    for (j = 0; j < num_implementing_args; j++) {
+        Py_DECREF(implementing_args[j]);
+        Py_DECREF(array_function_methods[j]);
+    }
+    Py_XDECREF(types);
+    Py_DECREF(relevant_args);
+    return result;
+}
+
+
+/*
+ * Python wrapper for get_implementing_args_and_methods, for testing purposes.
+ */
+NPY_NO_EXPORT PyObject *
+array__get_implementing_args(
+    PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
+{
+    PyObject *relevant_args;
+    int j;
+    int num_implementing_args = 0;
+    PyObject *implementing_args[NPY_MAXARGS];
+    PyObject *array_function_methods[NPY_MAXARGS];
+    PyObject *result = NULL;
+
+    if (!PyArg_ParseTuple(positional_args, "O:array__get_implementing_args",
+                          &relevant_args)) {
+        return NULL;
+    }
+
+    relevant_args = PySequence_Fast(
+        relevant_args,
+        "dispatcher for __array_function__ did not return an iterable");
+    if (relevant_args == NULL) {
+        return NULL;
+    }
+
+    num_implementing_args = get_implementing_args_and_methods(
+        relevant_args, implementing_args, array_function_methods);
+    if (num_implementing_args == -1) {
+        goto cleanup;
+    }
+
+    /* create a Python object for implementing_args */
+    result = PyList_New(num_implementing_args);
+    if (result == NULL) {
+        goto cleanup;
+    }
+    for (j = 0; j < num_implementing_args; j++) {
+        PyObject *argument = implementing_args[j];
+        Py_INCREF(argument);
+        PyList_SET_ITEM(result, j, argument);
+    }
+
+cleanup:
+    for (j = 0; j < num_implementing_args; j++) {
+        Py_DECREF(implementing_args[j]);
+        Py_DECREF(array_function_methods[j]);
+    }
+    Py_DECREF(relevant_args);
+    return result;
+}
diff --git a/numpy/core/src/multiarray/arrayfunction_override.h b/numpy/core/src/multiarray/arrayfunction_override.h
new file mode 100644 (file)
index 0000000..0d224e2
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _NPY_PRIVATE__ARRAYFUNCTION_OVERRIDE_H
+#define _NPY_PRIVATE__ARRAYFUNCTION_OVERRIDE_H
+
+NPY_NO_EXPORT PyObject *
+array_implement_array_function(
+    PyObject *NPY_UNUSED(dummy), PyObject *positional_args);
+
+NPY_NO_EXPORT PyObject *
+array__get_implementing_args(
+    PyObject *NPY_UNUSED(dummy), PyObject *positional_args);
+
+NPY_NO_EXPORT PyObject *
+array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
+                           PyObject *kwargs);
+
+#endif
index 823ee7115cc63812bba3389d1a8cc796dc87a8c4..ca5f5a47be5c694dc7e34c67138eb51b3df257fb 100644 (file)
@@ -2205,15 +2205,19 @@ static void
 VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
                 npy_intp n, int swap, PyArrayObject *arr)
 {
+    PyArray_Descr *descr;
+
     if (arr == NULL) {
         return;
     }
+
+    descr = PyArray_DESCR(arr);
+
     if (PyArray_HASFIELDS(arr)) {
         PyObject *key, *value;
-        PyArray_Descr *descr;
+
         Py_ssize_t pos = 0;
 
-        descr = PyArray_DESCR(arr);
         while (PyDict_Next(descr->fields, &pos, &key, &value)) {
             npy_intp offset;
             PyArray_Descr * new;
@@ -2236,14 +2240,28 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
         ((PyArrayObject_fields *)arr)->descr = descr;
         return;
     }
-    if (swap && PyArray_DESCR(arr)->subarray != NULL) {
-        PyArray_Descr *descr, *new;
+    if (PyDataType_HASSUBARRAY(descr)) {
+        PyArray_Descr *new;
         npy_intp num;
         npy_intp i;
         int subitemsize;
         char *dstptr, *srcptr;
+        /*
+         * In certain cases subarray copy can be optimized. This is when
+         * swapping is unecessary and the subarrays data type can certainly
+         * be simply copied (no object, fields, subarray, and not a user dtype).
+         */
+        npy_bool can_optimize_subarray = (!swap &&
+                !PyDataType_HASFIELDS(descr->subarray->base) &&
+                !PyDataType_HASSUBARRAY(descr->subarray->base) &&
+                !PyDataType_REFCHK(descr->subarray->base) &&
+                (descr->subarray->base->type_num < NPY_NTYPES));
+
+        if (can_optimize_subarray) {
+            _basic_copyn(dst, dstride, src, sstride, n, descr->elsize);
+            return;
+        }
 
-        descr = PyArray_DESCR(arr);
         new = descr->subarray->base;
         /*
          * TODO: temporarily modifying the array like this
@@ -2253,6 +2271,10 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
         dstptr = dst;
         srcptr = src;
         subitemsize = new->elsize;
+        if (subitemsize == 0) {
+            /* There cannot be any elements, so return */
+            return;
+        }
         num = descr->elsize / subitemsize;
         for (i = 0; i < n; i++) {
             new->f->copyswapn(dstptr, subitemsize, srcptr,
@@ -2265,22 +2287,26 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
         ((PyArrayObject_fields *)arr)->descr = descr;
         return;
     }
-    _basic_copyn(dst, dstride, src, sstride, n, PyArray_DESCR(arr)->elsize);
+    /* Must be a naive Void type (e.g. a "V8") so simple copy is sufficient. */
+    _basic_copyn(dst, dstride, src, sstride, n, descr->elsize);
     return;
 }
 
 static void
 VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr)
 {
+    PyArray_Descr *descr;
+
     if (arr == NULL) {
         return;
     }
+
+    descr = PyArray_DESCR(arr);
+
     if (PyArray_HASFIELDS(arr)) {
         PyObject *key, *value;
-        PyArray_Descr *descr;
         Py_ssize_t pos = 0;
 
-        descr = PyArray_DESCR(arr);
         while (PyDict_Next(descr->fields, &pos, &key, &value)) {
             npy_intp offset;
             PyArray_Descr * new;
@@ -2303,28 +2329,45 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr)
         ((PyArrayObject_fields *)arr)->descr = descr;
         return;
     }
-    if (swap && PyArray_DESCR(arr)->subarray != NULL) {
-        PyArray_Descr *descr, *new;
+    if (PyDataType_HASSUBARRAY(descr)) {
+        PyArray_Descr *new;
         npy_intp num;
-        int itemsize;
+        int subitemsize;
+        /*
+         * In certain cases subarray copy can be optimized. This is when
+         * swapping is unecessary and the subarrays data type can certainly
+         * be simply copied (no object, fields, subarray, and not a user dtype).
+         */
+        npy_bool can_optimize_subarray = (!swap &&
+                !PyDataType_HASFIELDS(descr->subarray->base) &&
+                !PyDataType_HASSUBARRAY(descr->subarray->base) &&
+                !PyDataType_REFCHK(descr->subarray->base) &&
+                (descr->subarray->base->type_num < NPY_NTYPES));
+
+        if (can_optimize_subarray) {
+            _basic_copy(dst, src, descr->elsize);
+            return;
+        }
 
-        descr = PyArray_DESCR(arr);
         new = descr->subarray->base;
         /*
          * TODO: temporarily modifying the array like this
          *       is bad coding style, should be changed.
          */
         ((PyArrayObject_fields *)arr)->descr = new;
-        itemsize = new->elsize;
-        num = descr->elsize / itemsize;
-        new->f->copyswapn(dst, itemsize, src,
-                itemsize, num, swap, arr);
+        subitemsize = new->elsize;
+        if (subitemsize == 0) {
+            /* There cannot be any elements, so return */
+            return;
+        }
+        num = descr->elsize / subitemsize;
+        new->f->copyswapn(dst, subitemsize, src,
+                subitemsize, num, swap, arr);
         ((PyArrayObject_fields *)arr)->descr = descr;
         return;
     }
-
-    /* copy first if needed */
-    _basic_copy(dst, src, PyArray_DESCR(arr)->elsize);
+    /* Must be a naive Void type (e.g. a "V8") so simple copy is sufficient. */
+    _basic_copy(dst, src, descr->elsize);
     return;
 }
 
index 2f66d7f2f391a8c5c170caed85fce575302b623e..d8ad802664316d70d8c635c864e9b2970126f359 100644 (file)
@@ -509,6 +509,10 @@ _buffer_info_new(PyObject *obj)
     PyArray_Descr *descr = NULL;
     int err = 0;
 
+    /*
+     * Note that the buffer info is cached as pyints making them appear like
+     * unreachable lost memory to valgrind.
+     */
     info = malloc(sizeof(_buffer_info_t));
     if (info == NULL) {
         PyErr_NoMemory();
@@ -579,9 +583,11 @@ _buffer_info_new(PyObject *obj)
     err = _buffer_format_string(descr, &fmt, obj, NULL, NULL);
     Py_DECREF(descr);
     if (err != 0) {
+        free(info->shape);
         goto fail;
     }
     if (_append_char(&fmt, '\0') < 0) {
+        free(info->shape);
         goto fail;
     }
     info->format = fmt.s;
index 2e51cee7e2f639f6f37f56a6f24a854ecbd82962..addb67732f318b70485401ae49f20d36874fcac0 100644 (file)
@@ -164,7 +164,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
 
             if (string_type == NPY_STRING) {
                 if ((temp = PyObject_Str(obj)) == NULL) {
-                    return -1;
+                    goto fail;
                 }
 #if defined(NPY_PY3K)
     #if PY_VERSION_HEX >= 0x03030000
@@ -182,7 +182,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
 #else
                 if ((temp = PyObject_Unicode(obj)) == NULL) {
 #endif
-                    return -1;
+                    goto fail;
                 }
                 itemsize = PyUnicode_GET_DATA_SIZE(temp);
 #ifndef Py_UNICODE_WIDE
@@ -216,7 +216,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
 
             if (string_type == NPY_STRING) {
                 if ((temp = PyObject_Str(obj)) == NULL) {
-                    return -1;
+                    goto fail;
                 }
 #if defined(NPY_PY3K)
     #if PY_VERSION_HEX >= 0x03030000
@@ -234,7 +234,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
 #else
                 if ((temp = PyObject_Unicode(obj)) == NULL) {
 #endif
-                    return -1;
+                    goto fail;
                 }
                 itemsize = PyUnicode_GET_DATA_SIZE(temp);
 #ifndef Py_UNICODE_WIDE
@@ -511,7 +511,7 @@ promote_types:
         PyArray_Descr *res_dtype = PyArray_PromoteTypes(dtype, *out_dtype);
         Py_DECREF(dtype);
         if (res_dtype == NULL) {
-            return -1;
+            goto fail;
         }
         if (!string_type &&
                 res_dtype->type_num == NPY_UNICODE &&
index 10e3478e2df3e44383ef3de02886f2d9f44b0ead..625028bfb04bb7c5f4588f7d220977c29748ca00 100644 (file)
@@ -328,6 +328,7 @@ arr_insert(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
         } else {
             Py_XDECREF(values);
             Py_XDECREF(mask);
+            PyArray_ResolveWritebackIfCopy(array);
             Py_XDECREF(array);
             Py_RETURN_NONE;
         }
@@ -358,6 +359,7 @@ arr_insert(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
 
  fail:
     Py_XDECREF(mask);
+    PyArray_ResolveWritebackIfCopy(array);
     Py_XDECREF(array);
     Py_XDECREF(values);
     return NULL;
@@ -1575,6 +1577,7 @@ pack_bits(PyObject *input, int axis)
     if (!PyArray_ISBOOL(inp) && !PyArray_ISINTEGER(inp)) {
         PyErr_SetString(PyExc_TypeError,
                 "Expected an input array of integer or boolean data type");
+        Py_DECREF(inp);
         goto fail;
     }
 
@@ -1682,6 +1685,7 @@ unpack_bits(PyObject *input, int axis)
     if (PyArray_TYPE(inp) != NPY_UBYTE) {
         PyErr_SetString(PyExc_TypeError,
                 "Expected an input array of unsigned byte data type");
+        Py_DECREF(inp);
         goto fail;
     }
 
index a176219460147e0a744ff8857252c0944301ec10..b9059ba4dba7655aca81e9e38a71b76a43f2a936 100644 (file)
@@ -1410,6 +1410,7 @@ _array_from_buffer_3118(PyObject *memoryview)
          * dimensions, so the array is now 0d.
          */
         nd = 0;
+        Py_DECREF(descr);
         descr = (PyArray_Descr *)PyObject_CallFunctionObjArgs(
                 (PyObject *)&PyArrayDescr_Type, Py_TYPE(view->obj), NULL);
         if (descr == NULL) {
@@ -2128,12 +2129,15 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
              */
 
             /* 2017-Nov-10 1.14 */
-            if (DEPRECATE("NPY_ARRAY_UPDATEIFCOPY, NPY_ARRAY_INOUT_ARRAY, and "
-                "NPY_ARRAY_INOUT_FARRAY are deprecated, use NPY_WRITEBACKIFCOPY, "
-                "NPY_ARRAY_INOUT_ARRAY2, or NPY_ARRAY_INOUT_FARRAY2 respectively "
-                "instead, and call PyArray_ResolveWritebackIfCopy before the "
-                "array is deallocated, i.e. before the last call to Py_DECREF.") < 0)
+            if (DEPRECATE(
+                    "NPY_ARRAY_UPDATEIFCOPY, NPY_ARRAY_INOUT_ARRAY, and "
+                    "NPY_ARRAY_INOUT_FARRAY are deprecated, use NPY_WRITEBACKIFCOPY, "
+                    "NPY_ARRAY_INOUT_ARRAY2, or NPY_ARRAY_INOUT_FARRAY2 respectively "
+                    "instead, and call PyArray_ResolveWritebackIfCopy before the "
+                    "array is deallocated, i.e. before the last call to Py_DECREF.") < 0) {
+                Py_DECREF(ret);
                 return NULL;
+            }
             Py_INCREF(arr);
             if (PyArray_SetWritebackIfCopyBase(ret, arr) < 0) {
                 Py_DECREF(ret);
@@ -2160,14 +2164,12 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
 
         Py_DECREF(newtype);
         if (needview) {
-            PyArray_Descr *dtype = PyArray_DESCR(arr);
             PyTypeObject *subtype = NULL;
 
             if (flags & NPY_ARRAY_ENSUREARRAY) {
                 subtype = &PyArray_Type;
             }
 
-            Py_INCREF(dtype);
             ret = (PyArrayObject *)PyArray_View(arr, NULL, subtype);
             if (ret == NULL) {
                 return NULL;
@@ -2495,6 +2497,11 @@ PyArray_FromInterface(PyObject *origin)
             &PyArray_Type, dtype,
             n, dims, NULL, data,
             dataflags, NULL, base);
+    /*
+     * Ref to dtype was stolen by PyArray_NewFromDescrAndBase
+     * Prevent DECREFing dtype in fail codepath by setting to NULL
+     */
+    dtype = NULL;
     if (ret == NULL) {
         goto fail;
     }
index a8550d95857066df7a557db9eaf3635eb80e146d..54d19d993b11ad111e5db95051c2f7ead4efdaef 100644 (file)
@@ -3822,18 +3822,26 @@ recursive_find_object_timedelta64_type(PyObject *obj,
                  * single object using [()], but not by using
                  * __getitem__(integer) approaches
                  */
-                PyObject *item, *meth, *args;
+                PyObject *item, *args;
 
-                meth = PyObject_GetAttrString(obj, "__getitem__");
-                args = Py_BuildValue("(())");
-                item = PyObject_CallObject(meth, args);
+                args = PyTuple_New(0);
+                if (args == NULL) {
+                    return 0;
+                }
+                item = PyObject_GetItem(obj, args);
+                Py_DECREF(args);
+                if (item == NULL) {
+                    return 0;
+                }
                 /*
                  * NOTE: may need other type checks here in the future
                  * for expanded 0 D datetime array conversions?
                  */
                 if (PyDelta_Check(item)) {
+                    Py_DECREF(item);
                     return delta_checker(meta);
                 }
+                Py_DECREF(item);
             }
         }
     }
index 3038e4dead526cc37982640b7d4d6c26202b43b3..0471a2a3e3ad784bcacb4e8bfe24fcf5a895a87e 100644 (file)
@@ -515,6 +515,7 @@ _convert_from_array_descr(PyObject *obj, int align)
 #if defined(NPY_PY3K)
             Py_DECREF(name);
 #endif
+            Py_DECREF(conv);
             goto fail;
         }
         dtypeflags |= (conv->flags & NPY_FROM_FIELDS);
@@ -837,9 +838,11 @@ _use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag)
     else if (new->elsize != conv->elsize) {
         PyErr_SetString(PyExc_ValueError,
                 "mismatch in size of old and new data-descriptor");
+        Py_DECREF(new);
         goto fail;
     }
     else if (invalid_union_object_dtype(new, conv)) {
+        Py_DECREF(new);
         goto fail;
     }
 
@@ -1728,6 +1731,7 @@ PyArray_DescrNew(PyArray_Descr *base)
         newdescr->c_metadata = NPY_AUXDATA_CLONE(base->c_metadata);
         if (newdescr->c_metadata == NULL) {
             PyErr_NoMemory();
+            /* TODO: This seems wrong, as the old fields get decref'd? */
             Py_DECREF(newdescr);
             return NULL;
         }
@@ -3336,12 +3340,15 @@ static PyObject *
 _subscript_by_index(PyArray_Descr *self, Py_ssize_t i)
 {
     PyObject *name = PySequence_GetItem(self->names, i);
+    PyObject *ret;
     if (name == NULL) {
         PyErr_Format(PyExc_IndexError,
                      "Field index %zd out of range.", i);
         return NULL;
     }
-    return _subscript_by_name(self, name);
+    ret = _subscript_by_name(self, name);
+    Py_DECREF(name);
+    return ret;
 }
 
 static PyObject *
index 63b1ead25cb37142cdbb8f3339ca090c504d6574..6347d35eb7f10ae9c8dff3c715ccfd1048ae0c19 100644 (file)
@@ -1572,12 +1572,30 @@ get_cast_transfer_function(int aligned,
                                 src_dtype,
                                 &tobuffer, &todata);
 
-
-        /* Get the copy/swap operation to dst */
-        PyArray_GetDTypeCopySwapFn(aligned,
-                                dst_itemsize, dst_stride,
-                                dst_dtype,
-                                &frombuffer, &fromdata);
+        if (!PyDataType_REFCHK(dst_dtype)) {
+            /* Copying from buffer is a simple copy/swap operation */
+            PyArray_GetDTypeCopySwapFn(aligned,
+                                    dst_itemsize, dst_stride,
+                                    dst_dtype,
+                                    &frombuffer, &fromdata);
+        }
+        else {
+            /*
+             * Since the buffer is initialized to NULL, need to move the
+             * references in order to DECREF the existing data.
+             */
+             /* Object types cannot be byte swapped */
+            assert(PyDataType_ISNOTSWAPPED(dst_dtype));
+            /* The loop already needs the python api if this is reached */
+            assert(*out_needs_api);
+
+            if (PyArray_GetDTypeTransferFunction(
+                    aligned, dst_itemsize, dst_stride,
+                    dst_dtype, dst_dtype, 1,
+                    &frombuffer, &fromdata, out_needs_api) != NPY_SUCCEED) {
+                return NPY_FAIL;
+            }
+        }
 
         if (frombuffer == NULL || tobuffer == NULL) {
             NPY_AUXDATA_FREE(castdata);
@@ -2001,6 +2019,7 @@ typedef struct {
     _subarray_broadcast_offsetrun offsetruns;
 } _subarray_broadcast_data;
 
+
 /* transfer data free function */
 static void _subarray_broadcast_data_free(NpyAuxData *data)
 {
index 7c814e6e6293a27313c4dbafedfeafeb8018cf2d..2fcc2ec22665161f8d6beca47f890a2bb530b4aa 100644 (file)
@@ -8,6 +8,7 @@
 #include "numpy/arrayobject.h"
 #include "numpy/arrayscalars.h"
 
+#include "arrayfunction_override.h"
 #include "npy_config.h"
 #include "npy_pycompat.h"
 #include "npy_import.h"
@@ -1088,13 +1089,29 @@ cleanup:
     return result;
 }
 
-
 static PyObject *
-array_function(PyArrayObject *self, PyObject *args, PyObject *kwds)
+array_function(PyArrayObject *self, PyObject *c_args, PyObject *c_kwds)
 {
-    NPY_FORWARD_NDARRAY_METHOD("_array_function");
-}
+    PyObject *func, *types, *args, *kwargs, *result;
+    static char *kwlist[] = {"func", "types", "args", "kwargs", NULL};
+
+    if (!PyArg_ParseTupleAndKeywords(
+            c_args, c_kwds, "OOOO:__array_function__", kwlist,
+            &func, &types, &args, &kwargs)) {
+        return NULL;
+    }
 
+    types = PySequence_Fast(
+        types,
+        "types argument to ndarray.__array_function__ must be iterable");
+    if (types == NULL) {
+        return NULL;
+    }
+
+    result = array_function_method_impl(func, types, args, kwargs);
+    Py_DECREF(types);
+    return result;
+}
 
 static PyObject *
 array_copy(PyArrayObject *self, PyObject *args, PyObject *kwds)
@@ -1364,6 +1381,7 @@ array_argsort(PyArrayObject *self, PyObject *args, PyObject *kwds)
             return NULL;
         }
         newd = PyArray_DescrNew(saved);
+        Py_DECREF(newd->names);
         newd->names = new_name;
         ((PyArrayObject_fields *)self)->descr = newd;
     }
@@ -1418,6 +1436,7 @@ array_argpartition(PyArrayObject *self, PyObject *args, PyObject *kwds)
             return NULL;
         }
         newd = PyArray_DescrNew(saved);
+        Py_DECREF(newd->names);
         newd->names = new_name;
         ((PyArrayObject_fields *)self)->descr = newd;
     }
@@ -1521,7 +1540,6 @@ array_deepcopy(PyArrayObject *self, PyObject *args)
         copy = PyImport_ImportModule("copy");
         if (copy == NULL) {
             Py_DECREF(copied_array);
-            Py_DECREF(copy);
             return NULL;
         }
         deepcopy = PyObject_GetAttrString(copy, "deepcopy");
@@ -1704,129 +1722,150 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
 }
 
 static PyObject *
-array_reduce_ex(PyArrayObject *self, PyObject *args)
+array_reduce_ex_regular(PyArrayObject *self, int protocol)
 {
-    int protocol;
-    PyObject *ret = NULL, *numeric_mod = NULL, *from_buffer_func = NULL;
-    PyObject *buffer_tuple = NULL, *pickle_module = NULL, *pickle_class = NULL;
-    PyObject *class_args = NULL, *class_args_tuple = NULL, *unused = NULL;
     PyObject *subclass_array_reduce = NULL;
+    PyObject *ret;
+
+    /* We do not call array_reduce directly but instead lookup and call
+     * the __reduce__ method to make sure that it's possible to customize
+     * pickling in sub-classes. */
+    subclass_array_reduce = PyObject_GetAttrString((PyObject *)self,
+                                                   "__reduce__");
+    if (subclass_array_reduce == NULL) {
+        return NULL;
+    }
+    ret = PyObject_CallObject(subclass_array_reduce, NULL);
+    Py_DECREF(subclass_array_reduce);
+    return ret;
+}
+
+static PyObject *
+array_reduce_ex_picklebuffer(PyArrayObject *self, int protocol)
+{
+    PyObject *numeric_mod = NULL, *from_buffer_func = NULL;
+    PyObject *pickle_module = NULL, *picklebuf_class = NULL;
+    PyObject *picklebuf_args = NULL;
     PyObject *buffer = NULL, *transposed_array = NULL;
     PyArray_Descr *descr = NULL;
     char order;
 
-    if (PyArg_ParseTuple(args, "i", &protocol)){
-        descr = PyArray_DESCR(self);
-        if ((protocol < 5) ||
-            (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*)self) &&
-             !PyArray_IS_F_CONTIGUOUS((PyArrayObject*)self)) ||
-            PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) ||
-            (PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) &&
-             ((PyObject*)self)->ob_type != &PyArray_Type) ||
-            PyDataType_ISUNSIZED(descr)) {
-            /* The PickleBuffer class from version 5 of the pickle protocol
-             * can only be used for arrays backed by a contiguous data buffer.
-             * For all other cases we fallback to the generic array_reduce
-             * method that involves using a temporary bytes allocation. However
-             * we do not call array_reduce directly but instead lookup and call
-             * the __reduce__ method to make sure that it's possible customize
-             * pickling in sub-classes. */
-            subclass_array_reduce = PyObject_GetAttrString((PyObject *)self,
-                                                           "__reduce__");
-            return PyObject_CallObject(subclass_array_reduce, unused);
-        }
-        else if (protocol == 5){
-            ret = PyTuple_New(2);
-
-            if (ret == NULL) {
-                return NULL;
-            }
+    descr = PyArray_DESCR(self);
 
-            /* if the python version is below 3.8, the pickle module does not provide
-             * built-in support for protocol 5. We try importing the pickle5
-             * backport instead */
+    /* if the python version is below 3.8, the pickle module does not provide
+     * built-in support for protocol 5. We try importing the pickle5
+     * backport instead */
 #if PY_VERSION_HEX >= 0x03080000
-            pickle_module = PyImport_ImportModule("pickle");
-#elif PY_VERSION_HEX < 0x03080000 && PY_VERSION_HEX >= 0x03060000
-            pickle_module = PyImport_ImportModule("pickle5");
-            if (pickle_module == NULL){
-                /* for protocol 5, raise a clear ImportError if pickle5 is not found
-                 */
-                PyErr_SetString(PyExc_ImportError, "Using pickle protocol 5 "
-                        "requires the pickle5 module for python versions >=3.6 "
-                        "and <3.8");
-                return NULL;
-            }
+    /* we expect protocol 5 to be available in Python 3.8 */
+    pickle_module = PyImport_ImportModule("pickle");
+#elif PY_VERSION_HEX >= 0x03060000
+    pickle_module = PyImport_ImportModule("pickle5");
+    if (pickle_module == NULL) {
+        /* for protocol 5, raise a clear ImportError if pickle5 is not found
+         */
+        PyErr_SetString(PyExc_ImportError, "Using pickle protocol 5 "
+                "requires the pickle5 module for Python >=3.6 and <3.8");
+        return NULL;
+    }
 #else
-            PyErr_SetString(PyExc_ValueError, "pickle protocol 5 is not available "
-                                               "for python versions < 3.6");
-            return NULL;
+    PyErr_SetString(PyExc_ValueError, "pickle protocol 5 is not available "
+                                      "for Python < 3.6");
+    return NULL;
 #endif
-            if (pickle_module == NULL){
-                return NULL;
-            }
-
-            pickle_class = PyObject_GetAttrString(pickle_module,
-                                                  "PickleBuffer");
+    if (pickle_module == NULL){
+        return NULL;
+    }
+    picklebuf_class = PyObject_GetAttrString(pickle_module, "PickleBuffer");
+    Py_DECREF(pickle_module);
+    if (picklebuf_class == NULL) {
+        return NULL;
+    }
 
-            class_args_tuple = PyTuple_New(1);
-            if (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*)self) &&
-                PyArray_IS_F_CONTIGUOUS((PyArrayObject*)self)){
+    /* Construct a PickleBuffer of the array */
 
-                /* if the array if Fortran-contiguous and not C-contiguous,
-                 * the PickleBuffer instance will hold a view on the transpose
-                 * of the initial array, that is C-contiguous. */
-                order = 'F';
-                transposed_array = PyArray_Transpose((PyArrayObject*)self, NULL);
-                PyTuple_SET_ITEM(class_args_tuple, 0, transposed_array);
-            }
-            else {
-                order = 'C';
-                PyTuple_SET_ITEM(class_args_tuple, 0, (PyObject *)self);
-                Py_INCREF(self);
-            }
+    if (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*) self) &&
+         PyArray_IS_F_CONTIGUOUS((PyArrayObject*) self)) {
+        /* if the array if Fortran-contiguous and not C-contiguous,
+         * the PickleBuffer instance will hold a view on the transpose
+         * of the initial array, that is C-contiguous. */
+        order = 'F';
+        transposed_array = PyArray_Transpose((PyArrayObject*)self, NULL);
+        picklebuf_args = Py_BuildValue("(N)", transposed_array);
+    }
+    else {
+        order = 'C';
+        picklebuf_args = Py_BuildValue("(O)", self);
+    }
+    if (picklebuf_args == NULL) {
+        Py_DECREF(picklebuf_class);
+        return NULL;
+    }
 
-            class_args = Py_BuildValue("O", class_args_tuple);
+    buffer = PyObject_CallObject(picklebuf_class, picklebuf_args);
+    Py_DECREF(picklebuf_class);
+    Py_DECREF(picklebuf_args);
+    if (buffer == NULL) {
+        /* Some arrays may refuse to export a buffer, in which case
+         * just fall back on regular __reduce_ex__ implementation
+         * (gh-12745).
+         */
+        PyErr_Clear();
+        return array_reduce_ex_regular(self, protocol);
+    }
 
-            buffer = PyObject_CallObject(pickle_class, class_args);
+    /* Get the _frombuffer() function for reconstruction */
 
-            numeric_mod = PyImport_ImportModule("numpy.core.numeric");
-            if (numeric_mod == NULL) {
-                Py_DECREF(ret);
-                return NULL;
-            }
-            from_buffer_func = PyObject_GetAttrString(numeric_mod,
-                                                      "_frombuffer");
-            Py_DECREF(numeric_mod);
+    numeric_mod = PyImport_ImportModule("numpy.core.numeric");
+    if (numeric_mod == NULL) {
+        Py_DECREF(buffer);
+        return NULL;
+    }
+    from_buffer_func = PyObject_GetAttrString(numeric_mod,
+                                              "_frombuffer");
+    Py_DECREF(numeric_mod);
+    if (from_buffer_func == NULL) {
+        Py_DECREF(buffer);
+        return NULL;
+    }
 
-            Py_INCREF(descr);
+    return Py_BuildValue("N(NONN)",
+                         from_buffer_func, buffer, (PyObject *)descr,
+                         PyObject_GetAttrString((PyObject *)self, "shape"),
+                         PyUnicode_FromStringAndSize(&order, 1));
+}
 
-            buffer_tuple = PyTuple_New(4);
-            PyTuple_SET_ITEM(buffer_tuple, 0, buffer);
-            PyTuple_SET_ITEM(buffer_tuple, 1, (PyObject *)descr);
-            PyTuple_SET_ITEM(buffer_tuple, 2,
-                             PyObject_GetAttrString((PyObject *)self,
-                                                    "shape"));
-            PyTuple_SET_ITEM(buffer_tuple, 3,
-                             PyUnicode_FromStringAndSize(&order,
-                                                         (Py_ssize_t)1));
+static PyObject *
+array_reduce_ex(PyArrayObject *self, PyObject *args)
+{
+    int protocol;
+    PyArray_Descr *descr = NULL;
 
-            PyTuple_SET_ITEM(ret, 0, from_buffer_func);
-            PyTuple_SET_ITEM(ret, 1, buffer_tuple);
+    if (!PyArg_ParseTuple(args, "i", &protocol)) {
+        return NULL;
+    }
 
-            return ret;
-        }
-        else {
-            PyErr_Format(PyExc_ValueError,
-                         "cannot call __reduce_ex__ with protocol >= %d",
-                         5);
-            return NULL;
-        }
+    descr = PyArray_DESCR(self);
+    if ((protocol < 5) ||
+        (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*)self) &&
+         !PyArray_IS_F_CONTIGUOUS((PyArrayObject*)self)) ||
+        PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) ||
+        (PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) &&
+         ((PyObject*)self)->ob_type != &PyArray_Type) ||
+        PyDataType_ISUNSIZED(descr)) {
+        /* The PickleBuffer class from version 5 of the pickle protocol
+         * can only be used for arrays backed by a contiguous data buffer.
+         * For all other cases we fallback to the generic array_reduce
+         * method that involves using a temporary bytes allocation. */
+        return array_reduce_ex_regular(self, protocol);
+    }
+    else if (protocol == 5) {
+        return array_reduce_ex_picklebuffer(self, protocol);
     }
     else {
+        PyErr_Format(PyExc_ValueError,
+                     "__reduce_ex__ called with protocol > 5");
         return NULL;
     }
-
 }
 
 static PyObject *
index 8135769d9893b3efc8149dae11c7b50b1907e510..166533b3fb04451941d24c215f2f87550fd9dd12 100644 (file)
@@ -34,6 +34,7 @@
 NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
 
 /* Internal APIs */
+#include "arrayfunction_override.h"
 #include "arraytypes.h"
 #include "arrayobject.h"
 #include "hashdescr.h"
@@ -3254,6 +3255,7 @@ array_datetime_data(PyObject *NPY_UNUSED(dummy), PyObject *args)
     }
 
     meta = get_datetime_metadata_from_dtype(dtype);
+    Py_DECREF(dtype);    
     if (meta == NULL) {
         return NULL;
     }
@@ -3618,6 +3620,7 @@ _vec_string_with_args(PyArrayObject* char_array, PyArray_Descr* type,
     if (nargs == -1 || nargs > NPY_MAXARGS) {
         PyErr_Format(PyExc_ValueError,
                 "len(args) must be < %d", NPY_MAXARGS - 1);
+        Py_DECREF(type);
         goto err;
     }
 
@@ -3625,6 +3628,7 @@ _vec_string_with_args(PyArrayObject* char_array, PyArray_Descr* type,
     for (i = 1; i < nargs; i++) {
         PyObject* item = PySequence_GetItem(args, i-1);
         if (item == NULL) {
+            Py_DECREF(type);
             goto err;
         }
         broadcast_args[i] = item;
@@ -3633,6 +3637,7 @@ _vec_string_with_args(PyArrayObject* char_array, PyArray_Descr* type,
     in_iter = (PyArrayMultiIterObject*)PyArray_MultiIterFromObjects
         (broadcast_args, nargs, 0);
     if (in_iter == NULL) {
+        Py_DECREF(type);
         goto err;
     }
     n = in_iter->numiter;
@@ -3713,6 +3718,7 @@ _vec_string_no_args(PyArrayObject* char_array,
 
     in_iter = (PyArrayIterObject*)PyArray_IterNew((PyObject*)char_array);
     if (in_iter == NULL) {
+        Py_DECREF(type);
         goto err;
     }
 
@@ -3769,7 +3775,7 @@ static PyObject *
 _vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
 {
     PyArrayObject* char_array = NULL;
-    PyArray_Descr *type = NULL;
+    PyArray_Descr *type;
     PyObject* method_name;
     PyObject* args_seq = NULL;
 
@@ -3806,6 +3812,7 @@ _vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
         result = _vec_string_with_args(char_array, type, method, args_seq);
     }
     else {
+        Py_DECREF(type);
         PyErr_SetString(PyExc_TypeError,
                 "'args' must be a sequence of arguments");
         goto err;
@@ -4062,6 +4069,9 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
 }
 
 static struct PyMethodDef array_module_methods[] = {
+    {"_get_implementing_args",
+        (PyCFunction)array__get_implementing_args,
+        METH_VARARGS, NULL},
     {"_get_ndarray_c_version",
         (PyCFunction)array__get_ndarray_c_version,
         METH_VARARGS|METH_KEYWORDS, NULL},
@@ -4224,6 +4234,9 @@ static struct PyMethodDef array_module_methods[] = {
         METH_VARARGS | METH_KEYWORDS, NULL},
     {"_monotonicity", (PyCFunction)arr__monotonicity,
         METH_VARARGS | METH_KEYWORDS, NULL},
+    {"implement_array_function",
+        (PyCFunction)array_implement_array_function,
+        METH_VARARGS, NULL},
     {"interp", (PyCFunction)arr_interp,
         METH_VARARGS | METH_KEYWORDS, NULL},
     {"interp_complex", (PyCFunction)arr_interp_complex,
@@ -4476,6 +4489,7 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_wrap = NULL;
 NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_finalize = NULL;
 NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_buffer = NULL;
 NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_ufunc = NULL;
+NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_wrapped = NULL;
 NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_order = NULL;
 NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_copy = NULL;
 NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL;
@@ -4492,6 +4506,7 @@ intern_strings(void)
     npy_ma_str_array_finalize = PyUString_InternFromString("__array_finalize__");
     npy_ma_str_buffer = PyUString_InternFromString("__buffer__");
     npy_ma_str_ufunc = PyUString_InternFromString("__array_ufunc__");
+    npy_ma_str_wrapped = PyUString_InternFromString("__wrapped__");
     npy_ma_str_order = PyUString_InternFromString("order");
     npy_ma_str_copy = PyUString_InternFromString("copy");
     npy_ma_str_dtype = PyUString_InternFromString("dtype");
@@ -4501,7 +4516,7 @@ intern_strings(void)
 
     return npy_ma_str_array && npy_ma_str_array_prepare &&
            npy_ma_str_array_wrap && npy_ma_str_array_finalize &&
-           npy_ma_str_buffer && npy_ma_str_ufunc &&
+           npy_ma_str_buffer && npy_ma_str_ufunc && npy_ma_str_wrapped &&
            npy_ma_str_order && npy_ma_str_copy && npy_ma_str_dtype &&
            npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2;
 }
index 3de68c5498109bbf00d585340830c20eee972296..60a3965c9894b6e745607cb2dfd5ff15dbb27c43 100644 (file)
@@ -7,6 +7,7 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_wrap;
 NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_finalize;
 NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_buffer;
 NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_ufunc;
+NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_wrapped;
 NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_order;
 NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_copy;
 NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype;
index 90cff4077d43f950038056a39e10c15a522ed473..18a2cc84fb45b8dc9151ce16a5251caef3b5e56d 100644 (file)
@@ -1248,9 +1248,9 @@ npyiter_prepare_operands(int nop, PyArrayObject **op_in,
     return 1;
 
   fail_nop:
-    iop = nop;
+    iop = nop - 1;
   fail_iop:
-    for (i = 0; i < iop; ++i) {
+    for (i = 0; i < iop+1; ++i) {
         Py_XDECREF(op[i]);
         Py_XDECREF(op_dtype[i]);
     }
@@ -3175,6 +3175,7 @@ npyiter_allocate_transfer_functions(NpyIter *iter)
                                         &stransfer,
                                         &transferdata,
                                         &needs_api) != NPY_SUCCEED) {
+                    iop -= 1;  /* This one cannot be cleaned up yet. */
                     goto fail;
                 }
                 readtransferfn[iop] = stransfer;
@@ -3268,7 +3269,7 @@ npyiter_allocate_transfer_functions(NpyIter *iter)
     return 1;
 
 fail:
-    for (i = 0; i < iop; ++i) {
+    for (i = 0; i < iop+1; ++i) {
         if (readtransferdata[iop] != NULL) {
             NPY_AUXDATA_FREE(readtransferdata[iop]);
             readtransferdata[iop] = NULL;
index 5a9f3c5fae238f2124e6b60610191f0524ef8f27..30a81e0ca64d750aa461ef489cf523982d9f689f 100644 (file)
@@ -2355,6 +2355,8 @@ npyiter_close(NewNpyArrayIterObject *self)
     }
     ret = NpyIter_Deallocate(iter);
     self->iter = NULL;
+    Py_XDECREF(self->nested_child);
+    self->nested_child = NULL;
     if (ret < 0) {
         return NULL;
     }
index d153a8a64fa108964157867e4ef51c30697e4134..420501ce251f0b3289e12a6ea04ba57e92255857 100644 (file)
@@ -599,15 +599,16 @@ array_positive(PyArrayObject *m1)
             PyErr_Restore(exc, val, tb);
             return NULL;
         }
+        Py_XDECREF(exc);
+        Py_XDECREF(val);
+        Py_XDECREF(tb);
+
         /* 2018-06-28, 1.16.0 */
         if (DEPRECATE("Applying '+' to a non-numerical array is "
                       "ill-defined. Returning a copy, but in the future "
                       "this will error.") < 0) {
             return NULL;
         }
-        Py_XDECREF(exc);
-        Py_XDECREF(val);
-        Py_XDECREF(tb);
         value = PyArray_Return((PyArrayObject *)PyArray_Copy(m1));
     }
     return value;
index 4b018b056719de55f9b101b9ddf665be515a78af..b8230c81a58f597a82144caec73f7f7841207974 100644 (file)
 static void
 _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype);
 
-/* Incref all objects found at this record */
+
 /*NUMPY_API
+ * XINCREF all objects in a single array item. This is complicated for
+ * structured datatypes where the position of objects needs to be extracted.
+ * The function is execute recursively for each nested field or subarrays dtype
+ * such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])`
  */
 NPY_NO_EXPORT void
 PyArray_Item_INCREF(char *data, PyArray_Descr *descr)
@@ -51,11 +55,37 @@ PyArray_Item_INCREF(char *data, PyArray_Descr *descr)
             PyArray_Item_INCREF(data + offset, new);
         }
     }
+    else if (PyDataType_HASSUBARRAY(descr)) {
+        int size, i, inner_elsize;
+
+        inner_elsize = descr->subarray->base->elsize;
+        if (inner_elsize == 0) {
+            /* There cannot be any elements, so return */
+            return;
+        }
+        /* Subarrays are always contiguous in memory */
+        size = descr->elsize / inner_elsize;
+
+        for (i = 0; i < size; i++){
+            /* Recursively increment the reference count of subarray elements */
+            PyArray_Item_INCREF(data + i * inner_elsize,
+                                descr->subarray->base);
+        }
+    }
+    else {
+        /* This path should not be reachable. */
+        assert(0);
+    }
     return;
 }
 
-/* XDECREF all objects found at this record */
+
 /*NUMPY_API
+ *
+ * XDECREF all objects in a single array item. This is complicated for
+ * structured datatypes where the position of objects needs to be extracted.
+ * The function is execute recursively for each nested field or subarrays dtype
+ * such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])`
  */
 NPY_NO_EXPORT void
 PyArray_Item_XDECREF(char *data, PyArray_Descr *descr)
@@ -87,6 +117,27 @@ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr)
                 PyArray_Item_XDECREF(data + offset, new);
             }
         }
+    else if (PyDataType_HASSUBARRAY(descr)) {
+        int size, i, inner_elsize;
+
+        inner_elsize = descr->subarray->base->elsize;
+        if (inner_elsize == 0) {
+            /* There cannot be any elements, so return */
+            return;
+        }
+        /* Subarrays are always contiguous in memory */
+        size = descr->elsize / inner_elsize;
+
+        for (i = 0; i < size; i++){
+            /* Recursively decrement the reference count of subarray elements */
+            PyArray_Item_XDECREF(data + i * inner_elsize,
+                                 descr->subarray->base);
+        }
+    }
+    else {
+        /* This path should not be reachable. */
+        assert(0);
+    }
     return;
 }
 
@@ -258,6 +309,10 @@ _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
             Py_XDECREF(arr);
         }
     }
+    if (dtype->type_num == NPY_OBJECT) {
+        Py_XINCREF(obj);
+        NPY_COPY_PYOBJECT_PTR(optr, &obj);
+    }
     else if (PyDataType_HASFIELDS(dtype)) {
         PyObject *key, *value, *title = NULL;
         PyArray_Descr *new;
@@ -274,15 +329,26 @@ _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
             _fillobject(optr + offset, obj, new);
         }
     }
-    else {
-        npy_intp i;
-        npy_intp nsize = dtype->elsize / sizeof(obj);
+    else if (PyDataType_HASSUBARRAY(dtype)) {
+        int size, i, inner_elsize;
 
-        for (i = 0; i < nsize; i++) {
-            Py_XINCREF(obj);
-            NPY_COPY_PYOBJECT_PTR(optr, &obj);
-            optr += sizeof(obj);
+        inner_elsize = dtype->subarray->base->elsize;
+        if (inner_elsize == 0) {
+            /* There cannot be any elements, so return */
+            return;
+        }
+        /* Subarrays are always contiguous in memory */
+        size = dtype->elsize / inner_elsize;
+
+        /* Call _fillobject on each item recursively. */
+        for (i = 0; i < size; i++){
+            _fillobject(optr, obj, dtype->subarray->base);
+            optr += inner_elsize;
         }
-        return;
     }
+    else {
+        /* This path should not be reachable. */
+        assert(0);
+    }
+    return;
 }
index 2f71c8ae974677b494a596911cf2d4c8f3b3fe1d..52de31289d0368de128ff6e8bf5ef1d3f6a91b29 100644 (file)
@@ -2599,6 +2599,8 @@ NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type = {
 static void
 void_dealloc(PyVoidScalarObject *v)
 {
+    _dealloc_cached_buffer_info((PyObject *)v);
+
     if (v->flags & NPY_ARRAY_OWNDATA) {
         npy_free_cache(v->obval, Py_SIZE(v));
     }
index 8e8090002780c7eba3d010790aae7899fda2c17b..2e8fb514f337a5efc9a7f78820affaba94b489ee 100644 (file)
@@ -40,19 +40,27 @@ maintainer email:  oliphant.travis@ieee.org
 
 NPY_NO_EXPORT PyArray_Descr **userdescrs=NULL;
 
-static int *
-_append_new(int *types, int insert)
+static int
+_append_new(int **p_types, int insert)
 {
     int n = 0;
     int *newtypes;
+    int *types = *p_types;
 
     while (types[n] != NPY_NOTYPE) {
         n++;
     }
     newtypes = (int *)realloc(types, (n + 2)*sizeof(int));
+    if (newtypes == NULL) {
+        PyErr_NoMemory();
+        return -1;
+    }
     newtypes[n] = insert;
     newtypes[n + 1] = NPY_NOTYPE;
-    return newtypes;
+
+    /* Replace the passed-in pointer */
+    *p_types = newtypes;
+    return 0;
 }
 
 static npy_bool
@@ -247,10 +255,13 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
          */
         if (descr->f->cancastto == NULL) {
             descr->f->cancastto = (int *)malloc(1*sizeof(int));
+            if (descr->f->cancastto == NULL) {
+                PyErr_NoMemory();
+                return -1;
+            }
             descr->f->cancastto[0] = NPY_NOTYPE;
         }
-        descr->f->cancastto = _append_new(descr->f->cancastto,
-                                          totype);
+        return _append_new(&descr->f->cancastto, totype);
     }
     else {
         /* register with cancastscalarkindto */
@@ -258,6 +269,10 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
             int i;
             descr->f->cancastscalarkindto =
                 (int **)malloc(NPY_NSCALARKINDS* sizeof(int*));
+            if (descr->f->cancastscalarkindto == NULL) {
+                PyErr_NoMemory();
+                return -1;
+            }
             for (i = 0; i < NPY_NSCALARKINDS; i++) {
                 descr->f->cancastscalarkindto[i] = NULL;
             }
@@ -265,11 +280,13 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
         if (descr->f->cancastscalarkindto[scalar] == NULL) {
             descr->f->cancastscalarkindto[scalar] =
                 (int *)malloc(1*sizeof(int));
+            if (descr->f->cancastscalarkindto[scalar] == NULL) {
+                PyErr_NoMemory();
+                return -1;
+            }
             descr->f->cancastscalarkindto[scalar][0] =
                 NPY_NOTYPE;
         }
-        descr->f->cancastscalarkindto[scalar] =
-            _append_new(descr->f->cancastscalarkindto[scalar], totype);
+        return _append_new(&descr->f->cancastscalarkindto[scalar], totype);
     }
-    return 0;
 }
index c2bd28d60fc7d54ca739372d4e341627cfe2b0fc..84af86009752b98efde4dff82b4bb346756685b2 100644 (file)
@@ -301,15 +301,23 @@ npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f)
             npy_set_floatstatus_underflow();
         }
 #endif
+        /*
+         * Usually the significand is shifted by 13. For subnormals an
+         * additional shift needs to occur. This shift is one for the largest
+         * exponent giving a subnormal `f_exp = 0x38000000 >> 23 = 112`, which
+         * offsets the new first bit. At most the shift can be 1+10 bits.
+         */
         f_sig >>= (113 - f_exp);
         /* Handle rounding by adding 1 to the bit beyond half precision */
 #if NPY_HALF_ROUND_TIES_TO_EVEN
         /*
          * If the last bit in the half significand is 0 (already even), and
          * the remaining bit pattern is 1000...0, then we do not add one
-         * to the bit after the half significand.  In all other cases, we do.
+         * to the bit after the half significand. However, the (113 - f_exp)
+         * shift can lose up to 11 bits, so the || checks them in the original.
+         * In all other cases, we can just add one.
          */
-        if ((f_sig&0x00003fffu) != 0x00001000u) {
+        if (((f_sig&0x00003fffu) != 0x00001000u) || (f&0x000007ffu)) {
             f_sig += 0x00001000u;
         }
 #else
@@ -416,7 +424,16 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
             npy_set_floatstatus_underflow();
         }
 #endif
-        d_sig >>= (1009 - d_exp);
+        /*
+         * Unlike floats, doubles have enough room to shift left to align
+         * the subnormal significand leading to no loss of the last bits.
+         * The smallest possible exponent giving a subnormal is:
+         * `d_exp = 0x3e60000000000000 >> 52 = 998`. All larger subnormals are
+         * shifted with respect to it. This adds a shift of 10+1 bits the final
+         * right shift when comparing it to the one in the normal branch.
+         */
+        assert(d_exp - 998 >= 0);
+        d_sig <<= (d_exp - 998);
         /* Handle rounding by adding 1 to the bit beyond half precision */
 #if NPY_HALF_ROUND_TIES_TO_EVEN
         /*
@@ -424,13 +441,13 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
          * the remaining bit pattern is 1000...0, then we do not add one
          * to the bit after the half significand.  In all other cases, we do.
          */
-        if ((d_sig&0x000007ffffffffffULL) != 0x0000020000000000ULL) {
-            d_sig += 0x0000020000000000ULL;
+        if ((d_sig&0x003fffffffffffffULL) != 0x0010000000000000ULL) {
+            d_sig += 0x0010000000000000ULL;
         }
 #else
-        d_sig += 0x0000020000000000ULL;
+        d_sig += 0x0010000000000000ULL;
 #endif
-        h_sig = (npy_uint16) (d_sig >> 42);
+        h_sig = (npy_uint16) (d_sig >> 53);
         /*
          * If the rounding causes a bit to spill into h_exp, it will
          * increment h_exp from zero to one and h_sig will be zero.
index b831d5c2aa528abfb7f9e18424048acc65ee4bec..5c6e235e01b80d6305ada53b3234d3db09b73977 100644 (file)
@@ -114,6 +114,7 @@ PyMODINIT_FUNC init_struct_ufunc_tests(void)
                                 dtypes,
                                 NULL);
 
+    Py_DECREF(dtype);
     d = PyModule_GetDict(m);
 
     PyDict_SetItemString(d, "add_triplet", add_triplet);
index 8cb74f177fd2e199df550ea65a8f2780de4cdb4d..6c3bcce7133a7daff2c7f1eab706964afa38a226 100644 (file)
@@ -564,7 +564,7 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
         core_dim_sizes = Py_None;
     }
     Py_DECREF(f);
-    return Py_BuildValue("iOOOO", core_enabled, core_num_dims,
+    return Py_BuildValue("iNNNN", core_enabled, core_num_dims,
                          core_dim_ixs, core_dim_flags, core_dim_sizes);
 
 fail:
index ae3ece77bb71628aa1496acdb1d37a46cbff2817..975a5e6b833204ac86cf668e49021e157bc35050 100644 (file)
@@ -1603,7 +1603,7 @@ TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, voi
         else {
             if (in2 == 0) {
                 npy_set_floatstatus_divbyzero();
-                *((npy_timedelta *)op1) = 0;
+                *((npy_timedelta *)op1) = NPY_DATETIME_NAT;
             }
             else {
                 /* handle mixed case the way Python does */
@@ -1619,6 +1619,62 @@ TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, voi
     }
 }
 
+NPY_NO_EXPORT void
+TIMEDELTA_mm_q_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+    BINARY_LOOP {
+        const npy_timedelta in1 = *(npy_timedelta *)ip1;
+        const npy_timedelta in2 = *(npy_timedelta *)ip2;
+        if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) {
+            npy_set_floatstatus_invalid();
+            *((npy_int64 *)op1) = 0;
+        }
+        else if (in2 == 0) {
+            npy_set_floatstatus_divbyzero();
+            *((npy_int64 *)op1) = 0;
+        }
+        else {
+            if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) {
+                *((npy_int64 *)op1) = in1/in2 - 1;
+            }
+            else {
+                *((npy_int64 *)op1) = in1/in2;
+            }
+        }
+    }
+}
+
+NPY_NO_EXPORT void
+TIMEDELTA_mm_qm_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+    BINARY_LOOP_TWO_OUT {
+        const npy_timedelta in1 = *(npy_timedelta *)ip1;
+        const npy_timedelta in2 = *(npy_timedelta *)ip2;
+        if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) {
+            npy_set_floatstatus_invalid();
+            *((npy_int64 *)op1) = 0;
+            *((npy_timedelta *)op2) = NPY_DATETIME_NAT;
+        }
+        else if (in2 == 0) {
+            npy_set_floatstatus_divbyzero();
+            *((npy_int64 *)op1) = 0;
+            *((npy_timedelta *)op2) = NPY_DATETIME_NAT;
+        }
+        else {
+            const npy_int64 quo = in1 / in2;
+            const npy_timedelta rem = in1 % in2;
+            if ((in1 > 0) == (in2 > 0) || rem == 0) {
+                *((npy_int64 *)op1) = quo;
+                *((npy_timedelta *)op2) = rem;
+            }
+            else {
+                *((npy_int64 *)op1) = quo - 1;
+                *((npy_timedelta *)op2) = rem + in2;
+            }
+        }
+    }
+}
+
 /*
  *****************************************************************************
  **                             FLOAT LOOPS                                 **
index 9b6327308fe550d79ed9fe81a7a9e593372ac867..5264a6533ee848fccd0ea2c1d87bd1d0daa8fad8 100644 (file)
@@ -473,9 +473,15 @@ TIMEDELTA_md_m_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *
 NPY_NO_EXPORT void
 TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
 
+NPY_NO_EXPORT void
+TIMEDELTA_mm_q_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
 NPY_NO_EXPORT void
 TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
 
+NPY_NO_EXPORT void
+TIMEDELTA_mm_qm_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
 /* Special case equivalents to above functions */
 
 #define TIMEDELTA_mq_m_true_divide TIMEDELTA_mq_m_divide
index 6d04ce37224a0f9d33f96c31388ef34c6c4d4c85..791d3693f3c703baa7ef807d079f62279aef2df4 100644 (file)
@@ -186,7 +186,6 @@ conform_reduce_result(int ndim, npy_bool *axis_flags,
             return NULL;
         }
 
-        Py_INCREF(ret);
         if (PyArray_SetWritebackIfCopyBase(ret_copy, (PyArrayObject *)ret) < 0) {
             Py_DECREF(ret);
             Py_DECREF(ret_copy);
index a2df5869838f3e9f3eb74e91db2a7173b3beb0b8..1ab48bb903f3bdcbb79ba246797dd4f620f3fb67 100644 (file)
@@ -3063,6 +3063,8 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
     Py_XDECREF(axis);
     Py_XDECREF(full_args.in);
     Py_XDECREF(full_args.out);
+    PyArray_free(remap_axis_memory);
+    PyArray_free(remap_axis);
 
     NPY_UF_DBG_PRINT1("Returning code %d\n", retval);
 
index ec60d9cfd0a4ab4df8041cf523430b152826ba54..c2d81fc5d990355ea3c66b5d1c6548643d0ef501 100644 (file)
@@ -1114,7 +1114,16 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
             }
             out_dtypes[1] = out_dtypes[0];
             Py_INCREF(out_dtypes[1]);
+
+            /*
+             * TODO: split function into truediv and floordiv resolvers
+             */
+            if (strcmp(ufunc->name, "floor_divide") == 0) {
+                out_dtypes[2] = PyArray_DescrFromType(NPY_LONGLONG);
+            }
+            else {
             out_dtypes[2] = PyArray_DescrFromType(NPY_DOUBLE);
+            }
             if (out_dtypes[2] == NULL) {
                 Py_DECREF(out_dtypes[0]);
                 out_dtypes[0] = NULL;
@@ -2247,3 +2256,52 @@ type_tuple_type_resolver(PyUFuncObject *self,
 
     return -1;
 }
+
+NPY_NO_EXPORT int
+PyUFunc_DivmodTypeResolver(PyUFuncObject *ufunc,
+                                NPY_CASTING casting,
+                                PyArrayObject **operands,
+                                PyObject *type_tup,
+                                PyArray_Descr **out_dtypes)
+{
+    int type_num1, type_num2;
+    int i;
+
+    type_num1 = PyArray_DESCR(operands[0])->type_num;
+    type_num2 = PyArray_DESCR(operands[1])->type_num;
+
+    /* Use the default when datetime and timedelta are not involved */
+    if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) {
+        return PyUFunc_DefaultTypeResolver(ufunc, casting, operands,
+                    type_tup, out_dtypes);
+    }
+    if (type_num1 == NPY_TIMEDELTA) {
+        if (type_num2 == NPY_TIMEDELTA) {
+            out_dtypes[0] = PyArray_PromoteTypes(PyArray_DESCR(operands[0]),
+                                                PyArray_DESCR(operands[1]));
+            out_dtypes[1] = out_dtypes[0];
+            Py_INCREF(out_dtypes[1]);
+            out_dtypes[2] = PyArray_DescrFromType(NPY_LONGLONG);
+            Py_INCREF(out_dtypes[2]);
+            out_dtypes[3] = out_dtypes[0];
+            Py_INCREF(out_dtypes[3]);
+        }
+        else {
+            return raise_binary_type_reso_error(ufunc, operands);
+        }
+    }
+    else {
+        return raise_binary_type_reso_error(ufunc, operands);
+    }
+
+    /* Check against the casting rules */
+    if (PyUFunc_ValidateCasting(ufunc, casting, operands, out_dtypes) < 0) {
+        for (i = 0; i < 4; ++i) {
+            Py_DECREF(out_dtypes[i]);
+            out_dtypes[i] = NULL;
+        }
+        return -1;
+    }
+
+    return 0;
+}
index 2f37af7532e32cc01f07444e184b10826d8295af..78313b1ef6bf2a9a32e4b5ca2b9c16eb8ec5f58b 100644 (file)
@@ -99,6 +99,13 @@ PyUFunc_RemainderTypeResolver(PyUFuncObject *ufunc,
                               PyObject *type_tup,
                               PyArray_Descr **out_dtypes);
 
+NPY_NO_EXPORT int
+PyUFunc_DivmodTypeResolver(PyUFuncObject *ufunc,
+                              NPY_CASTING casting,
+                              PyArrayObject **operands,
+                              PyObject *type_tup,
+                              PyArray_Descr **out_dtypes);
+
 /*
  * Does a linear search for the best inner loop of the ufunc.
  *
index 5de19fec2e237edc8effd7bf0df5cc759e7caf3c..b334a89da113b0d51e8b155b4d90b1b253874706 100644 (file)
@@ -174,11 +174,17 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
     char *docstr, *newdocstr;
 
 #if defined(NPY_PY3K)
+    PyObject *tmp;
+
     if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc,
                                         &PyUnicode_Type, &str)) {
         return NULL;
     }
-    docstr = PyBytes_AS_STRING(PyUnicode_AsUTF8String(str));
+    tmp = PyUnicode_AsUTF8String(str);
+    if (tmp == NULL) {
+        return NULL;
+    }
+    docstr = PyBytes_AS_STRING(tmp);
 #else
     if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc,
                                          &PyString_Type, &str)) {
@@ -190,6 +196,9 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
     if (NULL != ufunc->doc) {
         PyErr_SetString(PyExc_ValueError,
                 "Cannot change docstring of ufunc with non-NULL docstring");
+#if defined(NPY_PY3K)
+        Py_DECREF(tmp);
+#endif
         return NULL;
     }
 
@@ -203,6 +212,9 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
     strcpy(newdocstr, docstr);
     ufunc->doc = newdocstr;
 
+#if defined(NPY_PY3K)
+    Py_DECREF(tmp);
+#endif
     Py_RETURN_NONE;
 }
 
index 7a858d2e22467f1e887c1861d7bce2e3e0988701..f2b8fdca714e846ff851b9ea3c7b564a35dc91fc 100644 (file)
@@ -90,6 +90,7 @@ class TestArrayRepr(object):
         assert_equal(repr(x),
             'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
         assert_equal(str(x), '...')
+        x[()] = 0  # resolve circular references for garbage collector
 
         # nested 0d-subclass-object
         x = sub(None)
@@ -124,11 +125,13 @@ class TestArrayRepr(object):
         arr0d[()] = arr0d
         assert_equal(repr(arr0d),
             'array(array(..., dtype=object), dtype=object)')
+        arr0d[()] = 0  # resolve recursion for garbage collector
 
         arr1d = np.array([None, None])
         arr1d[1] = arr1d
         assert_equal(repr(arr1d),
             'array([None, array(..., dtype=object)], dtype=object)')
+        arr1d[1] = 0  # resolve recursion for garbage collector
 
         first = np.array(None)
         second = np.array(None)
@@ -136,6 +139,7 @@ class TestArrayRepr(object):
         second[()] = first
         assert_equal(repr(first),
             'array(array(array(..., dtype=object), dtype=object), dtype=object)')
+        first[()] = 0  # resolve circular references for garbage collector
 
     def test_containing_list(self):
         # printing square brackets directly would be ambiguuous
index b2ce0402ae7c2552c70627bec31cd24a79aa4181..9832b4275103afcc2a2337c2e989a3964089083d 100644 (file)
@@ -1081,6 +1081,133 @@ class TestDateTime(object):
                 check(np.timedelta64(0), f, nat)
                 check(nat, f, nat)
 
+    @pytest.mark.parametrize("op1, op2, exp", [
+        # m8 same units round down
+        (np.timedelta64(7, 's'),
+         np.timedelta64(4, 's'),
+         1),
+        # m8 same units round down with negative
+        (np.timedelta64(7, 's'),
+         np.timedelta64(-4, 's'),
+         -2),
+        # m8 same units negative no round down
+        (np.timedelta64(8, 's'),
+         np.timedelta64(-4, 's'),
+         -2),
+        # m8 different units
+        (np.timedelta64(1, 'm'),
+         np.timedelta64(31, 's'),
+         1),
+        # m8 generic units
+        (np.timedelta64(1890),
+         np.timedelta64(31),
+         60),
+        # Y // M works
+        (np.timedelta64(2, 'Y'),
+         np.timedelta64('13', 'M'),
+         1),
+        # handle 1D arrays
+        (np.array([1, 2, 3], dtype='m8'),
+         np.array([2], dtype='m8'),
+         np.array([0, 1, 1], dtype=np.int64)),
+        ])
+    def test_timedelta_floor_divide(self, op1, op2, exp):
+        assert_equal(op1 // op2, exp)
+
+    @pytest.mark.parametrize("op1, op2", [
+        # div by 0
+        (np.timedelta64(10, 'us'),
+         np.timedelta64(0, 'us')),
+        # div with NaT
+        (np.timedelta64('NaT'),
+         np.timedelta64(50, 'us')),
+        # special case for int64 min
+        # in integer floor division
+        (np.timedelta64(np.iinfo(np.int64).min),
+         np.timedelta64(-1)),
+        ])
+    def test_timedelta_floor_div_warnings(self, op1, op2):
+        with assert_warns(RuntimeWarning):
+            actual = op1 // op2
+            assert_equal(actual, 0)
+            assert_equal(actual.dtype, np.int64)
+
+    @pytest.mark.parametrize("val1, val2", [
+        # the smallest integer that can't be represented
+        # exactly in a double should be preserved if we avoid
+        # casting to double in floordiv operation
+        (9007199254740993, 1),
+        # stress the alternate floordiv code path where
+        # operand signs don't match and remainder isn't 0
+        (9007199254740999, -2),
+        ])
+    def test_timedelta_floor_div_precision(self, val1, val2):
+        op1 = np.timedelta64(val1)
+        op2 = np.timedelta64(val2)
+        actual = op1 // op2
+        # Python reference integer floor
+        expected = val1 // val2
+        assert_equal(actual, expected)
+
+    @pytest.mark.parametrize("val1, val2", [
+        # years and months sometimes can't be unambiguously
+        # divided for floor division operation
+        (np.timedelta64(7, 'Y'),
+         np.timedelta64(3, 's')),
+        (np.timedelta64(7, 'M'),
+         np.timedelta64(1, 'D')),
+        ])
+    def test_timedelta_floor_div_error(self, val1, val2):
+        with assert_raises_regex(TypeError, "common metadata divisor"):
+            val1 // val2
+
+    @pytest.mark.parametrize("op1, op2", [
+        # reuse the test cases from floordiv
+        (np.timedelta64(7, 's'),
+         np.timedelta64(4, 's')),
+        # m8 same units round down with negative
+        (np.timedelta64(7, 's'),
+         np.timedelta64(-4, 's')),
+        # m8 same units negative no round down
+        (np.timedelta64(8, 's'),
+         np.timedelta64(-4, 's')),
+        # m8 different units
+        (np.timedelta64(1, 'm'),
+         np.timedelta64(31, 's')),
+        # m8 generic units
+        (np.timedelta64(1890),
+         np.timedelta64(31)),
+        # Y // M works
+        (np.timedelta64(2, 'Y'),
+         np.timedelta64('13', 'M')),
+        # handle 1D arrays
+        (np.array([1, 2, 3], dtype='m8'),
+         np.array([2], dtype='m8')),
+        ])
+    def test_timedelta_divmod(self, op1, op2):
+        expected = (op1 // op2, op1 % op2)
+        assert_equal(divmod(op1, op2), expected)
+
+    @pytest.mark.parametrize("op1, op2", [
+        # reuse cases from floordiv
+        # div by 0
+        (np.timedelta64(10, 'us'),
+         np.timedelta64(0, 'us')),
+        # div with NaT
+        (np.timedelta64('NaT'),
+         np.timedelta64(50, 'us')),
+        # special case for int64 min
+        # in integer floor division
+        (np.timedelta64(np.iinfo(np.int64).min),
+         np.timedelta64(-1)),
+        ])
+    def test_timedelta_divmod_warnings(self, op1, op2):
+        with assert_warns(RuntimeWarning):
+            expected = (op1 // op2, op1 % op2)
+        with assert_warns(RuntimeWarning):
+            actual = divmod(op1, op2)
+        assert_equal(actual, expected)
+
     def test_datetime_divide(self):
         for dta, tda, tdb, tdc, tdd in \
                     [
@@ -1111,8 +1238,6 @@ class TestDateTime(object):
             assert_equal(tda / tdd, 60.0)
             assert_equal(tdd / tda, 1.0 / 60.0)
 
-            # m8 // m8
-            assert_raises(TypeError, np.floor_divide, tda, tdb)
             # int / m8
             assert_raises(TypeError, np.divide, 2, tdb)
             # float / m8
@@ -1680,7 +1805,7 @@ class TestDateTime(object):
     def test_timedelta_modulus_div_by_zero(self):
         with assert_warns(RuntimeWarning):
             actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
-            assert_equal(actual, np.timedelta64(0, 's'))
+            assert_equal(actual, np.timedelta64('NaT'))
 
     @pytest.mark.parametrize("val1, val2", [
         # cases where one operand is not
index c55751e3cf388adbc70d29bfbefc2422d4421c94..8f371197cd7299528ea966c6b64e5b576a307fcd 100644 (file)
@@ -4,10 +4,12 @@ import sys
 import operator
 import pytest
 import ctypes
+import gc
 
 import numpy as np
 from numpy.core._rational_tests import rational
-from numpy.testing import assert_, assert_equal, assert_raises
+from numpy.testing import (
+    assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
 from numpy.core.numeric import pickle
 
 def assert_dtype_equal(a, b):
@@ -446,6 +448,173 @@ class TestSubarray(object):
         assert_equal(t1.alignment, t2.alignment)
 
 
+def iter_struct_object_dtypes():
+    """
+    Iterates over a few complex dtypes and object pattern which
+    fill the array with a given object (defaults to a singleton).
+
+    Yields
+    ------
+    dtype : dtype
+    pattern : tuple
+        Structured tuple for use with `np.array`.
+    count : int
+        Number of objects stored in the dtype.
+    singleton : object
+        A singleton object. The returned pattern is constructed so that
+        all objects inside the datatype are set to the singleton.
+    """
+    obj = object()
+
+    dt = np.dtype([('b', 'O', (2, 3))])
+    p = ([[obj] * 3] * 2,)
+    yield pytest.param(dt, p, 6, obj, id="<subarray>")
+
+    dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
+    p = (0, [[obj] * 3] * 2)
+    yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
+
+    dt = np.dtype([('a', 'i4'),
+                   ('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
+    p = (0, [[(obj, 0)] * 3] * 2)
+    yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
+
+    dt = np.dtype([('a', 'i4'),
+                   ('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
+    p = (0, [[(obj, obj)] * 3] * 2)
+    yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+class TestStructuredObjectRefcounting:
+    """These tests cover various uses of complicated structured types which
+    include objects and thus require reference counting.
+    """
+    @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+                             iter_struct_object_dtypes())
+    @pytest.mark.parametrize(["creation_func", "creation_obj"], [
+        pytest.param(np.empty, None,
+             # None is probably used for too many things
+             marks=pytest.mark.skip("unreliable due to python's behaviour")),
+        (np.ones, 1),
+        (np.zeros, 0)])
+    def test_structured_object_create_delete(self, dt, pat, count, singleton,
+                                             creation_func, creation_obj):
+        """Structured object reference counting in creation and deletion"""
+        # The test assumes that 0, 1, and None are singletons.
+        gc.collect()
+        before = sys.getrefcount(creation_obj)
+        arr = creation_func(3, dt)
+
+        now = sys.getrefcount(creation_obj)
+        assert now - before == count * 3
+        del arr
+        now = sys.getrefcount(creation_obj)
+        assert now == before
+
+    @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+                             iter_struct_object_dtypes())
+    def test_structured_object_item_setting(self, dt, pat, count, singleton):
+        """Structured object reference counting for simple item setting"""
+        one = 1
+
+        gc.collect()
+        before = sys.getrefcount(singleton)
+        arr = np.array([pat] * 3, dt)
+        assert sys.getrefcount(singleton) - before == count * 3
+        # Fill with `1` and check that it was replaced correctly:
+        before2 = sys.getrefcount(one)
+        arr[...] = one
+        after2 = sys.getrefcount(one)
+        assert after2 - before2 == count * 3
+        del arr
+        gc.collect()
+        assert sys.getrefcount(one) == before2
+        assert sys.getrefcount(singleton) == before
+
+    @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+                             iter_struct_object_dtypes())
+    @pytest.mark.parametrize(
+        ['shape', 'index', 'items_changed'],
+        [((3,), ([0, 2],), 2),
+         ((3, 2), ([0, 2], slice(None)), 4),
+         ((3, 2), ([0, 2], [1]), 2),
+         ((3,), ([True, False, True]), 2)])
+    def test_structured_object_indexing(self, shape, index, items_changed,
+                                        dt, pat, count, singleton):
+        """Structured object reference counting for advanced indexing."""
+        zero = 0
+        one = 1
+
+        arr = np.zeros(shape, dt)
+
+        gc.collect()
+        before_zero = sys.getrefcount(zero)
+        before_one = sys.getrefcount(one)
+        # Test item getting:
+        part = arr[index]
+        after_zero = sys.getrefcount(zero)
+        assert after_zero - before_zero == count * items_changed
+        del part
+        # Test item setting:
+        arr[index] = one
+        gc.collect()
+        after_zero = sys.getrefcount(zero)
+        after_one = sys.getrefcount(one)
+        assert before_zero - after_zero == count * items_changed
+        assert after_one - before_one == count * items_changed
+
+    @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+                             iter_struct_object_dtypes())
+    def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
+        """Structured object reference counting for specialized functions.
+        The older functions such as take and repeat use different code paths
+        then item setting (when writing this).
+        """
+        indices = [0, 1]
+
+        arr = np.array([pat] * 3, dt)
+        gc.collect()
+        before = sys.getrefcount(singleton)
+        res = arr.take(indices)
+        after = sys.getrefcount(singleton)
+        assert after - before == count * 2
+        new = res.repeat(10)
+        gc.collect()
+        after_repeat = sys.getrefcount(singleton)
+        assert after_repeat - after == count * 2 * 10
+
+
+class TestStructuredDtypeSparseFields(object):
+    """Tests subarray fields which contain sparse dtypes so that
+    not all memory is used by the dtype work. Such dtype's should
+    leave the underlying memory unchanged.
+    """
+    dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
+                             'offsets':[0, 4]}, (2, 3))])
+    sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
+                                    'offsets':[4]}, (2, 3))])
+
+    @pytest.mark.xfail(reason="inaccessible data is changed see gh-12686.")
+    @pytest.mark.valgrind_error(reason="reads from unitialized buffers.")
+    def test_sparse_field_assignment(self):
+        arr = np.zeros(3, self.dtype)
+        sparse_arr = arr.view(self.sparse_dtype)
+
+        sparse_arr[...] = np.finfo(np.float32).max
+        # dtype is reduced when accessing the field, so shape is (3, 2, 3):
+        assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
+
+    def test_sparse_field_assignment_fancy(self):
+        # Fancy assignment goes to the copyswap function for comlex types:
+        arr = np.zeros(3, self.dtype)
+        sparse_arr = arr.view(self.sparse_dtype)
+
+        sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
+        # dtype is reduced when accessing the field, so shape is (3, 2, 3):
+        assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
+
+
 class TestMonsterType(object):
     """Test deeply nested subtypes."""
 
index b28c933db956a7ae9411fd94a838a2bab8fa767c..7707125014cff074d5b033244d19d5760975bad8 100644 (file)
@@ -69,6 +69,85 @@ class TestHalf(object):
         j = np.array(i_f16, dtype=int)
         assert_equal(i_int, j)
 
+    @pytest.mark.parametrize("offset", [None, "up", "down"])
+    @pytest.mark.parametrize("shift", [None, "up", "down"])
+    @pytest.mark.parametrize("float_t", [np.float32, np.float64])
+    def test_half_conversion_rounding(self, float_t, shift, offset):
+        # Assumes that round to even is used during casting.
+        max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16)
+
+        # Test all (positive) finite numbers, denormals are most interesting
+        # however:
+        f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16)
+        f16s_float = f16s_patterns.view(np.float16).astype(float_t)
+
+        # Shift the values by half a bit up or a down (or do not shift),
+        if shift == "up":
+            f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:]
+        elif shift == "down":
+            f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1]
+        else:
+            f16s_float = f16s_float[1:-1]
+
+        # Increase the float by a minimal value:
+        if offset == "up":
+            f16s_float = np.nextafter(f16s_float, float_t(1e50))
+        elif offset == "down":
+            f16s_float = np.nextafter(f16s_float, float_t(-1e50))
+
+        # Convert back to float16 and its bit pattern:
+        res_patterns = f16s_float.astype(np.float16).view(np.uint16)
+
+        # The above calculations tries the original values, or the exact
+        # mid points between the float16 values. It then further offsets them
+        # by as little as possible. If no offset occurs, "round to even"
+        # logic will be necessary, an arbitrarily small offset should cause
+        # normal up/down rounding always.
+
+        # Calculate the expecte pattern:
+        cmp_patterns = f16s_patterns[1:-1].copy()
+
+        if shift == "down" and offset != "up":
+            shift_pattern = -1
+        elif shift == "up" and offset != "down":
+            shift_pattern = 1
+        else:
+            # There cannot be a shift, either shift is None, so all rounding
+            # will go back to original, or shift is reduced by offset too much.
+            shift_pattern = 0
+
+        # If rounding occurs, is it normal rounding or round to even?
+        if offset is None:
+            # Round to even occurs, modify only non-even, cast to allow + (-1)
+            cmp_patterns[0::2].view(np.int16)[...] += shift_pattern
+        else:
+            cmp_patterns.view(np.int16)[...] += shift_pattern
+
+        assert_equal(res_patterns, cmp_patterns)
+
+    @pytest.mark.parametrize(["float_t", "uint_t", "bits"],
+                             [(np.float32, np.uint32, 23),
+                              (np.float64, np.uint64, 52)])
+    def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits):
+        # Test specifically that all bits are considered when deciding
+        # whether round to even should occur (i.e. no bits are lost at the
+        # end. Compare also gh-12721. The most bits can get lost for the
+        # smallest denormal:
+        smallest_value = np.uint16(1).view(np.float16).astype(float_t)
+        assert smallest_value == 2**-24
+
+        # Will be rounded to zero based on round to even rule:
+        rounded_to_zero = smallest_value / float_t(2)
+        assert rounded_to_zero.astype(np.float16) == 0
+
+        # The significand will be all 0 for the float_t, test that we do not
+        # lose the lower ones of these:
+        for i in range(bits):
+            # slightly increasing the value should make it round up:
+            larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i)
+            larger_value = larger_pattern.view(float_t)
+            assert larger_value.astype(np.float16) == smallest_value
+
     def test_nans_infs(self):
         with np.errstate(all='ignore'):
             # Check some of the ufuncs
index 06cabe2cbf22049ac4678ad38a29459b0d3acf01..8a196308cc628279a95bd670294dee6b909f21e9 100644 (file)
@@ -3769,10 +3769,16 @@ class TestPickling(object):
                                                    ('c', float)])
             ]
 
+            refs = [weakref.ref(a) for a in DATA]
             for a in DATA:
                 assert_equal(
                         a, pickle.loads(pickle.dumps(a, protocol=proto)),
                         err_msg="%r" % a)
+            del a, DATA, carray
+            gc.collect()
+            # check for reference leaks (gh-12793)
+            for ref in refs:
+                assert ref() is None
 
     def _loads(self, obj):
         if sys.version_info[0] >= 3:
@@ -7009,12 +7015,11 @@ class TestArrayAttributeDeletion(object):
             assert_raises(AttributeError, delattr, a, s)
 
 
-def test_array_interface():
-    # Test scalar coercion within the array interface
+class TestArrayInterface():
     class Foo(object):
         def __init__(self, value):
             self.value = value
-            self.iface = {'typestr': '=f8'}
+            self.iface = {'typestr': 'f8'}
 
         def __float__(self):
             return float(self.value)
@@ -7023,22 +7028,39 @@ def test_array_interface():
         def __array_interface__(self):
             return self.iface
 
+
     f = Foo(0.5)
-    assert_equal(np.array(f), 0.5)
-    assert_equal(np.array([f]), [0.5])
-    assert_equal(np.array([f, f]), [0.5, 0.5])
-    assert_equal(np.array(f).dtype, np.dtype('=f8'))
-    # Test various shape definitions
-    f.iface['shape'] = ()
-    assert_equal(np.array(f), 0.5)
-    f.iface['shape'] = None
-    assert_raises(TypeError, np.array, f)
-    f.iface['shape'] = (1, 1)
-    assert_equal(np.array(f), [[0.5]])
-    f.iface['shape'] = (2,)
-    assert_raises(ValueError, np.array, f)
-
-    # test scalar with no shape
+
+    @pytest.mark.parametrize('val, iface, expected', [
+        (f, {}, 0.5),
+        ([f], {}, [0.5]),
+        ([f, f], {}, [0.5, 0.5]),
+        (f, {'shape': ()}, 0.5),
+        (f, {'shape': None}, TypeError),
+        (f, {'shape': (1, 1)}, [[0.5]]),
+        (f, {'shape': (2,)}, ValueError),
+        (f, {'strides': ()}, 0.5),
+        (f, {'strides': (2,)}, ValueError),
+        (f, {'strides': 16}, TypeError),
+        ])
+    def test_scalar_interface(self, val, iface, expected):
+        # Test scalar coercion within the array interface
+        self.f.iface = {'typestr': 'f8'}
+        self.f.iface.update(iface)
+        if HAS_REFCOUNT:
+            pre_cnt = sys.getrefcount(np.dtype('f8'))
+        if isinstance(expected, type):
+            assert_raises(expected, np.array, val)
+        else:
+            result = np.array(val)
+            assert_equal(np.array(val), expected)
+            assert result.dtype == 'f8'
+            del result
+        if HAS_REFCOUNT:
+            post_cnt = sys.getrefcount(np.dtype('f8'))
+            assert_equal(pre_cnt, post_cnt)
+
+def test_interface_no_shape():
     class ArrayLike(object):
         array = np.array(1)
         __array_interface__ = array.__array_interface__
@@ -7211,6 +7233,7 @@ class TestConversion(object):
         except NameError:
             Error = RuntimeError  # python < 3.5
         assert_raises(Error, bool, self_containing)  # previously stack overflow
+        self_containing[0] = None  # resolve circular reference
 
     def test_to_int_scalar(self):
         # gh-9972 means that these aren't always the same
@@ -7712,6 +7735,8 @@ class TestWritebackIfCopy(object):
         # uses arr_insert
         np.place(a, a>2, [44, 55])
         assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))
+        # hit one of the failing paths
+        assert_raises(ValueError, np.place, a, a>20, [])
 
     def test_put_noncontiguous(self):
         a = np.arange(6).reshape(2,3).T # force non-c-contiguous
@@ -8068,3 +8093,43 @@ def test_getfield():
     pytest.raises(ValueError, a.getfield, 'uint8', -1)
     pytest.raises(ValueError, a.getfield, 'uint8', 16)
     pytest.raises(ValueError, a.getfield, 'uint64', 0)
+
+def test_multiarray_module():
+    # gh-12736
+    # numpy 1.16 replaced the multiarray and umath c-extension modules with
+    # a single _multiarray_umath one. For backward compatibility, it added a
+    # pure-python multiarray.py and umath.py shim so people can still do
+    # from numpy.core.multirarray import something-public-api
+    # It turns out pip can leave old pieces of previous versions of numpy
+    # around when installing a newer version. If the old c-extension modules
+    # are found, they will be given precedence over the new pure-python ones.
+    #
+    # This test copies a multiarray c-extension in parallel with the pure-
+    # python one, and starts another python interpreter to load multiarray.
+    # The expectation is that import will fail.
+    import subprocess, shutil
+    core_dir = os.path.dirname(np.core.multiarray.__file__)
+    cextension = np.core._multiarray_umath.__file__
+    testfile = cextension.replace('_multiarray_umath', '_multiarray_module_test')
+    badfile = cextension.replace('_multiarray_umath', 'multiarray')
+    assert not os.path.exists(badfile), '%s exists, this numpy ' \
+                                    'installation is faulty' % badfile
+    try:
+        shutil.copy(testfile, badfile)
+        p = subprocess.Popen([sys.executable, '-c', 'import numpy' ],
+                stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                env=os.environ.copy())
+        stdout, stderr = p.communicate()
+        r = p.wait()
+        #print(stdout.decode())
+        #print(stderr.decode())
+        assert r != 0
+        assert b'ImportError' in stderr
+    finally:
+        if os.path.exists(badfile):
+            try:
+                # can this fail?
+                os.remove(badfile)
+            except:
+                print("Could not remove %s, remove it by hand" % badfile)
+                raise
index 62b2a3e5380be466a5c1ca315559f9c083bada97..8f1c16539bacffd61196aa6f4d413957f9eba150 100644 (file)
@@ -7,7 +7,7 @@ import numpy as np
 from numpy.testing import (
     assert_, assert_equal, assert_raises, assert_raises_regex)
 from numpy.core.overrides import (
-    get_overloaded_types_and_args, array_function_dispatch,
+    _get_implementing_args, array_function_dispatch,
     verify_matching_signatures, ENABLE_ARRAY_FUNCTION)
 from numpy.core.numeric import pickle
 import pytest
@@ -18,11 +18,6 @@ requires_array_function = pytest.mark.skipif(
     reason="__array_function__ dispatch not enabled.")
 
 
-def _get_overloaded_args(relevant_args):
-    types, args = get_overloaded_types_and_args(relevant_args)
-    return args
-
-
 def _return_not_implemented(self, *args, **kwargs):
     return NotImplemented
 
@@ -41,26 +36,21 @@ def dispatched_two_arg(array1, array2):
 
 
 @requires_array_function
-class TestGetOverloadedTypesAndArgs(object):
+class TestGetImplementingArgs(object):
 
     def test_ndarray(self):
         array = np.array(1)
 
-        types, args = get_overloaded_types_and_args([array])
-        assert_equal(set(types), {np.ndarray})
+        args = _get_implementing_args([array])
         assert_equal(list(args), [array])
 
-        types, args = get_overloaded_types_and_args([array, array])
-        assert_equal(len(types), 1)
-        assert_equal(set(types), {np.ndarray})
+        args = _get_implementing_args([array, array])
         assert_equal(list(args), [array])
 
-        types, args = get_overloaded_types_and_args([array, 1])
-        assert_equal(set(types), {np.ndarray})
+        args = _get_implementing_args([array, 1])
         assert_equal(list(args), [array])
 
-        types, args = get_overloaded_types_and_args([1, array])
-        assert_equal(set(types), {np.ndarray})
+        args = _get_implementing_args([1, array])
         assert_equal(list(args), [array])
 
     def test_ndarray_subclasses(self):
@@ -75,17 +65,14 @@ class TestGetOverloadedTypesAndArgs(object):
         override_sub = np.array(1).view(OverrideSub)
         no_override_sub = np.array(1).view(NoOverrideSub)
 
-        types, args = get_overloaded_types_and_args([array, override_sub])
-        assert_equal(set(types), {np.ndarray, OverrideSub})
+        args = _get_implementing_args([array, override_sub])
         assert_equal(list(args), [override_sub, array])
 
-        types, args = get_overloaded_types_and_args([array, no_override_sub])
-        assert_equal(set(types), {np.ndarray, NoOverrideSub})
+        args = _get_implementing_args([array, no_override_sub])
         assert_equal(list(args), [no_override_sub, array])
 
-        types, args = get_overloaded_types_and_args(
+        args = _get_implementing_args(
             [override_sub, no_override_sub])
-        assert_equal(set(types), {OverrideSub, NoOverrideSub})
         assert_equal(list(args), [override_sub, no_override_sub])
 
     def test_ndarray_and_duck_array(self):
@@ -96,12 +83,10 @@ class TestGetOverloadedTypesAndArgs(object):
         array = np.array(1)
         other = Other()
 
-        types, args = get_overloaded_types_and_args([other, array])
-        assert_equal(set(types), {np.ndarray, Other})
+        args = _get_implementing_args([other, array])
         assert_equal(list(args), [other, array])
 
-        types, args = get_overloaded_types_and_args([array, other])
-        assert_equal(set(types), {np.ndarray, Other})
+        args = _get_implementing_args([array, other])
         assert_equal(list(args), [array, other])
 
     def test_ndarray_subclass_and_duck_array(self):
@@ -116,9 +101,9 @@ class TestGetOverloadedTypesAndArgs(object):
         subarray = np.array(1).view(OverrideSub)
         other = Other()
 
-        assert_equal(_get_overloaded_args([array, subarray, other]),
+        assert_equal(_get_implementing_args([array, subarray, other]),
                      [subarray, array, other])
-        assert_equal(_get_overloaded_args([array, other, subarray]),
+        assert_equal(_get_implementing_args([array, other, subarray]),
                      [subarray, array, other])
 
     def test_many_duck_arrays(self):
@@ -140,15 +125,26 @@ class TestGetOverloadedTypesAndArgs(object):
         c = C()
         d = D()
 
-        assert_equal(_get_overloaded_args([1]), [])
-        assert_equal(_get_overloaded_args([a]), [a])
-        assert_equal(_get_overloaded_args([a, 1]), [a])
-        assert_equal(_get_overloaded_args([a, a, a]), [a])
-        assert_equal(_get_overloaded_args([a, d, a]), [a, d])
-        assert_equal(_get_overloaded_args([a, b]), [b, a])
-        assert_equal(_get_overloaded_args([b, a]), [b, a])
-        assert_equal(_get_overloaded_args([a, b, c]), [b, c, a])
-        assert_equal(_get_overloaded_args([a, c, b]), [c, b, a])
+        assert_equal(_get_implementing_args([1]), [])
+        assert_equal(_get_implementing_args([a]), [a])
+        assert_equal(_get_implementing_args([a, 1]), [a])
+        assert_equal(_get_implementing_args([a, a, a]), [a])
+        assert_equal(_get_implementing_args([a, d, a]), [a, d])
+        assert_equal(_get_implementing_args([a, b]), [b, a])
+        assert_equal(_get_implementing_args([b, a]), [b, a])
+        assert_equal(_get_implementing_args([a, b, c]), [b, c, a])
+        assert_equal(_get_implementing_args([a, c, b]), [c, b, a])
+
+    def test_too_many_duck_arrays(self):
+        namespace = dict(__array_function__=_return_not_implemented)
+        types = [type('A' + str(i), (object,), namespace) for i in range(33)]
+        relevant_args = [t() for t in types]
+
+        actual = _get_implementing_args(relevant_args[:32])
+        assert_equal(actual, relevant_args[:32])
+
+        with assert_raises_regex(TypeError, 'distinct argument types'):
+            _get_implementing_args(relevant_args)
 
 
 @requires_array_function
@@ -201,6 +197,14 @@ class TestNDArrayArrayFunction(object):
         result = np.concatenate((array, override_sub))
         assert_equal(result, expected.view(OverrideSub))
 
+    def test_no_wrapper(self):
+        array = np.array(1)
+        func = dispatched_one_arg.__wrapped__
+        with assert_raises_regex(AttributeError, '__wrapped__'):
+            array.__array_function__(func=func,
+                                     types=(np.ndarray,),
+                                     args=(array,), kwargs={})
+
 
 @requires_array_function
 class TestArrayFunctionDispatch(object):
index 2421a11616ad7daaf992e3f6e864f2e402ffe427..472a83696a8f4d201aefeb342c5ba93330c8704d 100644 (file)
@@ -46,7 +46,7 @@ class TestRegression(object):
             assert_array_equal(a, b)
 
     def test_typeNA(self):
-        # Issue gh-515 
+        # Issue gh-515
         with suppress_warnings() as sup:
             sup.filter(np.VisibleDeprecationWarning)
             assert_equal(np.typeNA[np.int64], 'Int64')
@@ -2411,7 +2411,41 @@ class TestRegression(object):
             if HAS_REFCOUNT:
                 assert_(base <= sys.getrefcount(s))
 
+    @pytest.mark.parametrize('val', [
+        # arrays and scalars
+        np.ones((10, 10), dtype='int32'),
+        np.uint64(10),
+        ])
+    @pytest.mark.parametrize('protocol',
+        range(2, pickle.HIGHEST_PROTOCOL + 1)
+        )
+    def test_pickle_module(self, protocol, val):
+        # gh-12837
+        s = pickle.dumps(val, protocol)
+        assert b'_multiarray_umath' not in s
+        if protocol == 5 and len(val.shape) > 0:
+            # unpickling ndarray goes through _frombuffer for protocol 5
+            assert b'numpy.core.numeric' in s
+        else:
+            assert b'numpy.core.multiarray' in s
+
     def test_object_casting_errors(self):
         # gh-11993
         arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)
         assert_raises(TypeError, arr.astype, 'c8')
+
+    def test_eff1d_casting(self):
+        # gh-12711
+        x = np.array([1, 2, 4, 7, 0], dtype=np.int16)
+        res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
+        assert_equal(res, [-99,   1,   2,   3,  -7,  88,  99])
+        assert_raises(ValueError, np.ediff1d, x, to_begin=(1<<20))
+        assert_raises(ValueError, np.ediff1d, x, to_end=(1<<20))
+
+    def test_pickle_datetime64_array(self):
+        # gh-12745 (would fail with pickle5 installed)
+        d = np.datetime64('2015-07-04 12:59:59.50', 'ns')
+        arr = np.array([d])
+        for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+            dumped = pickle.dumps(arr, protocol=proto)
+            assert_equal(pickle.loads(dumped), arr)
index ef5c118eccf33d4248e56510eb0932949ae56435..b996321c2ee4888ede6972f9bcb48dc895db0f29 100644 (file)
@@ -373,6 +373,10 @@ def test_stack():
     # empty arrays
     assert_(stack([[], [], []]).shape == (3, 0))
     assert_(stack([[], [], []], axis=1).shape == (0, 3))
+    # out
+    out = np.zeros_like(r1)
+    np.stack((a, b), out=out)
+    assert_array_equal(out, r1)
     # edge cases
     assert_raises_regex(ValueError, 'need at least one array', stack, [])
     assert_raises_regex(ValueError, 'must have the same shape',
index 9eefbc9f4b7a4368ccf890303ede2510297c1a43..78aa59ddc7ec17b33b3dc641c7594802a2814246 100644 (file)
@@ -346,27 +346,157 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
     return klass
 
 
-def _get_typecodes():
-    """ Return a dictionary mapping __array_interface__ formats to ctypes types """
-    ct = ctypes
-    simple_types = [
-        ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
-        ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
-        ct.c_float, ct.c_double,
-    ]
+if ctypes is not None:
+    def _ctype_ndarray(element_type, shape):
+        """ Create an ndarray of the given element type and shape """
+        for dim in shape[::-1]:
+            element_type = dim * element_type
+            # prevent the type name include np.ctypeslib
+            element_type.__module__ = None
+        return element_type
 
-    return {_dtype(ctype).str: ctype for ctype in simple_types}
 
+    def _get_scalar_type_map():
+        """
+        Return a dictionary mapping native endian scalar dtype to ctypes types
+        """
+        ct = ctypes
+        simple_types = [
+            ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
+            ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
+            ct.c_float, ct.c_double,
+            ct.c_bool,
+        ]
+        return {_dtype(ctype): ctype for ctype in simple_types}
 
-def _ctype_ndarray(element_type, shape):
-    """ Create an ndarray of the given element type and shape """
-    for dim in shape[::-1]:
-        element_type = element_type * dim
-    return element_type
 
+    _scalar_type_map = _get_scalar_type_map()
+
+
+    def _ctype_from_dtype_scalar(dtype):
+        # swapping twice ensure that `=` is promoted to <, >, or |
+        dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S')
+        dtype_native = dtype.newbyteorder('=')
+        try:
+            ctype = _scalar_type_map[dtype_native]
+        except KeyError:
+            raise NotImplementedError(
+                "Converting {!r} to a ctypes type".format(dtype)
+            )
+
+        if dtype_with_endian.byteorder == '>':
+            ctype = ctype.__ctype_be__
+        elif dtype_with_endian.byteorder == '<':
+            ctype = ctype.__ctype_le__
+
+        return ctype
+
+
+    def _ctype_from_dtype_subarray(dtype):
+        element_dtype, shape = dtype.subdtype
+        ctype = _ctype_from_dtype(element_dtype)
+        return _ctype_ndarray(ctype, shape)
+
+
+    def _ctype_from_dtype_structured(dtype):
+        # extract offsets of each field
+        field_data = []
+        for name in dtype.names:
+            field_dtype, offset = dtype.fields[name][:2]
+            field_data.append((offset, name, _ctype_from_dtype(field_dtype)))
+
+        # ctypes doesn't care about field order
+        field_data = sorted(field_data, key=lambda f: f[0])
+
+        if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data):
+            # union, if multiple fields all at address 0
+            size = 0
+            _fields_ = []
+            for offset, name, ctype in field_data:
+                _fields_.append((name, ctype))
+                size = max(size, ctypes.sizeof(ctype))
+
+            # pad to the right size
+            if dtype.itemsize != size:
+                _fields_.append(('', ctypes.c_char * dtype.itemsize))
+
+            # we inserted manual padding, so always `_pack_`
+            return type('union', (ctypes.Union,), dict(
+                _fields_=_fields_,
+                _pack_=1,
+                __module__=None,
+            ))
+        else:
+            last_offset = 0
+            _fields_ = []
+            for offset, name, ctype in field_data:
+                padding = offset - last_offset
+                if padding < 0:
+                    raise NotImplementedError("Overlapping fields")
+                if padding > 0:
+                    _fields_.append(('', ctypes.c_char * padding))
+
+                _fields_.append((name, ctype))
+                last_offset = offset + ctypes.sizeof(ctype)
+
+
+            padding = dtype.itemsize - last_offset
+            if padding > 0:
+                _fields_.append(('', ctypes.c_char * padding))
+
+            # we inserted manual padding, so always `_pack_`
+            return type('struct', (ctypes.Structure,), dict(
+                _fields_=_fields_,
+                _pack_=1,
+                __module__=None,
+            ))
+
+
+    def _ctype_from_dtype(dtype):
+        if dtype.fields is not None:
+            return _ctype_from_dtype_structured(dtype)
+        elif dtype.subdtype is not None:
+            return _ctype_from_dtype_subarray(dtype)
+        else:
+            return _ctype_from_dtype_scalar(dtype)
+
+
+    def as_ctypes_type(dtype):
+        """
+        Convert a dtype into a ctypes type.
+
+        Parameters
+        ----------
+        dtype : dtype
+            The dtype to convert
+
+        Returns
+        -------
+        ctypes
+            A ctype scalar, union, array, or struct
+
+        Raises
+        ------
+        NotImplementedError
+            If the conversion is not possible
+
+        Notes
+        -----
+        This function does not losslessly round-trip in either direction.
+
+        ``np.dtype(as_ctypes_type(dt))`` will:
+         - insert padding fields
+         - reorder fields to be sorted by offset
+         - discard field titles
+
+        ``as_ctypes_type(np.dtype(ctype))`` will:
+         - discard the class names of ``Structure``s and ``Union``s
+         - convert single-element ``Union``s into single-element ``Structure``s
+         - insert padding fields
+
+        """
+        return _ctype_from_dtype(_dtype(dtype))
 
-if ctypes is not None:
-    _typecodes = _get_typecodes()
 
     def as_array(obj, shape=None):
         """
@@ -388,6 +518,7 @@ if ctypes is not None:
 
         return array(obj, copy=False)
 
+
     def as_ctypes(obj):
         """Create and return a ctypes object from a numpy array.  Actually
         anything that exposes the __array_interface__ is accepted."""
@@ -399,7 +530,8 @@ if ctypes is not None:
         addr, readonly = ai["data"]
         if readonly:
             raise TypeError("readonly arrays unsupported")
-        tp = _ctype_ndarray(_typecodes[ai["typestr"]], ai["shape"])
-        result = tp.from_address(addr)
+
+        dtype = _dtype((ai["typestr"], ai["shape"]))
+        result = as_ctypes_type(dtype).from_address(addr)
         result.__keep = obj
         return result
index 12b32832ea8102c7ebcc6b0b7ebbe33aac8c94e5..001dea5ec609b02b1c56ec260afcc34708da6c25 100644 (file)
@@ -466,10 +466,8 @@ class FCompiler(CCompiler):
         noarch = self.distutils_vars.get('noarch', noopt)
         debug = self.distutils_vars.get('debug', False)
 
-        f77 = shlex.split(self.command_vars.compiler_f77,
-                          posix=(os.name == 'posix'))
-        f90 = shlex.split(self.command_vars.compiler_f90,
-                          posix=(os.name == 'posix'))
+        f77 = self.command_vars.compiler_f77
+        f90 = self.command_vars.compiler_f90
 
         f77flags = []
         f90flags = []
@@ -477,8 +475,10 @@ class FCompiler(CCompiler):
         fixflags = []
 
         if f77:
+            f77 = shlex.split(f77, posix=(os.name == 'posix'))
             f77flags = self.flag_vars.f77
         if f90:
+            f90 = shlex.split(f90, posix=(os.name == 'posix'))
             f90flags = self.flag_vars.f90
             freeflags = self.flag_vars.free
         # XXX Assuming that free format is default for f90 compiler.
@@ -490,8 +490,8 @@ class FCompiler(CCompiler):
         # environment variable has been customized by CI or a user
         # should perhaps eventually be more throughly tested and more
         # robustly handled
-        fix = shlex.split(fix, posix=(os.name == 'posix'))
         if fix:
+            fix = shlex.split(fix, posix=(os.name == 'posix'))
             fixflags = self.flag_vars.fix + f90flags
 
         oflags, aflags, dflags = [], [], []
index 2c3edfe023924932e97ecfe75af2d786287b033f..d14fee0e18e3aa8b85987abff2b4404e220c5b84 100644 (file)
@@ -66,7 +66,7 @@ class AbsoftFCompiler(FCompiler):
 
     def library_dir_option(self, dir):
         if os.name=='nt':
-            return ['-link', '/PATH:"%s"' % (dir)]
+            return ['-link', '/PATH:%s' % (dir)]
         return "-L" + dir
 
     def library_option(self, lib):
index 48978458045d8e848d82fb74f25ac7410665b5f4..4238f35cba6a03fe8e171165cc06d791097338ed 100644 (file)
@@ -1,6 +1,7 @@
 from __future__ import division, absolute_import, print_function
 
 import os
+import warnings
 from distutils.dist import Distribution
 
 __metaclass__ = type
@@ -54,8 +55,18 @@ class EnvironmentConfig(object):
         if envvar is not None:
             envvar_contents = os.environ.get(envvar)
             if envvar_contents is not None:
-                if var and append and os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':
-                    var = var + [envvar_contents]
+                if var and append:
+                    if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':
+                        var = var + [envvar_contents]
+                    else:
+                        var = envvar_contents
+                        if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys():
+                            msg = "{} is used as is, not appended ".format(envvar) + \
+                                  "to flags already defined " + \
+                                  "by numpy.distutils! Use NPY_DISTUTILS_APPEND_FLAGS=1 " + \
+                                  "to obtain appending behavior instead (this " + \
+                                  "behavior will become default in a future release)."
+                            warnings.warn(msg, UserWarning, stacklevel=3)
                 else:
                     var = envvar_contents
         if confvar is not None and self._conf:
index 81769e562c6a38e510903f2ace80c4eccda628f3..965c67041082b1ff0fdfcaca613c8c315186c1cd 100644 (file)
@@ -269,8 +269,11 @@ class GnuFCompiler(FCompiler):
             # Linux/Solaris/Unix support RPATH, Windows and AIX do not
             raise NotImplementedError
 
+        # TODO: could use -Xlinker here, if it's supported
+        assert "," not in dir
+
         sep = ',' if sys.platform == 'darwin' else '='
-        return '-Wl,-rpath%s"%s"' % (sep, dir)
+        return '-Wl,-rpath%s%s' % (sep, dir)
 
 
 class Gnu95FCompiler(GnuFCompiler):
index 217eac8fbea3d89d595411afc626fed3fd7b930e..51f6812743a28a47232e5d2f0cd4590328c4833a 100644 (file)
@@ -23,7 +23,10 @@ class BaseIntelFCompiler(FCompiler):
                                            f + '.f', '-o', f + '.o']
 
     def runtime_library_dir_option(self, dir):
-        return '-Wl,-rpath="%s"' % dir
+        # TODO: could use -Xlinker here, if it's supported
+        assert "," not in dir
+
+        return '-Wl,-rpath=%s' % dir
 
 
 class IntelFCompiler(BaseIntelFCompiler):
index 99071800adad3eee0a1eb52a0a387f3ac2890e7d..9c51947fd80335e4a33cbbcf82d4193f885f2629 100644 (file)
@@ -33,7 +33,7 @@ class PGroupFCompiler(FCompiler):
             'compiler_f77': ["pgfortran"],
             'compiler_fix': ["pgfortran", "-Mfixed"],
             'compiler_f90': ["pgfortran"],
-            'linker_so': ["pgfortran", "-shared", "-fpic"],
+            'linker_so': ["pgfortran"],
             'archiver': ["ar", "-cr"],
             'ranlib': ["ranlib"]
         }
@@ -56,8 +56,12 @@ class PGroupFCompiler(FCompiler):
         def get_flags_linker_so(self):
             return ["-dynamic", '-undefined', 'dynamic_lookup']
 
+    else:
+        def get_flags_linker_so(self):
+            return ["-shared", '-fpic']
+
     def runtime_library_dir_option(self, dir):
-        return '-R"%s"' % dir
+        return '-R%s' % dir
 
 
 if sys.version_info >= (3, 5):
index d477d33087b07d6dbfa0eb5629403634bf4eae5c..561ea854f9826a23cb4004d94e6d233c8c22262e 100644 (file)
@@ -44,7 +44,7 @@ class SunFCompiler(FCompiler):
         return opt
 
     def runtime_library_dir_option(self, dir):
-        return '-R"%s"' % dir
+        return '-R%s' % dir
 
 if __name__ == '__main__':
     from distutils import log
index cd63cc849a373ac161986cabb6aaadac0b84838b..befe53b3dd430f7c3a9c0c9dbbff5692280b9d67 100644 (file)
@@ -164,6 +164,17 @@ _bits = {'32bit': 32, '64bit': 64}
 platform_bits = _bits[platform.architecture()[0]]
 
 
+def _c_string_literal(s):
+    """
+    Convert a python string into a literal suitable for inclusion into C code
+    """
+    # only these three characters are forbidden in C strings
+    s = s.replace('\\', r'\\')
+    s = s.replace('"',  r'\"')
+    s = s.replace('\n', r'\n')
+    return '"{}"'.format(s)
+
+
 def libpaths(paths, bits):
     """Return a list of library paths valid on 32 or 64 bit systems.
 
@@ -1497,7 +1508,7 @@ Make sure that -lgfortran is used for C++ extensions.
             atlas_version = os.environ.get('ATLAS_VERSION', None)
         if atlas_version:
             dict_append(info, define_macros=[(
-                'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
+                'ATLAS_INFO', _c_string_literal(atlas_version))
             ])
         else:
             dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
@@ -1518,7 +1529,7 @@ Make sure that -lgfortran is used for C++ extensions.
         dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
     else:
         dict_append(info, define_macros=[(
-            'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
+            'ATLAS_INFO', _c_string_literal(atlas_version))
         ])
     result = _cached_atlas_version[key] = atlas_version, info
     return result
@@ -2063,7 +2074,7 @@ class _numpy_info(system_info):
             if vrs is None:
                 continue
             macros = [(self.modulename.upper() + '_VERSION',
-                      '"\\"%s\\""' % (vrs)),
+                      _c_string_literal(vrs)),
                       (self.modulename.upper(), None)]
             break
         dict_append(info, define_macros=macros)
@@ -2268,7 +2279,7 @@ class _pkg_config_info(system_info):
         version = self.get_config_output(config_exe, self.version_flag)
         if version:
             macros.append((self.__class__.__name__.split('.')[-1].upper(),
-                           '"\\"%s\\""' % (version)))
+                           _c_string_literal(version)))
             if self.version_macro_name:
                 macros.append((self.version_macro_name + '_%s'
                                % (version.replace('.', '_')), None))
index 95e44b051307e82d5aba3dce4698b32cd8ccb1db..ba19a97ea6968d49fc18db2b6841a258152240a1 100644 (file)
@@ -1,6 +1,8 @@
 from __future__ import division, absolute_import, print_function
 
-from numpy.testing import assert_
+import pytest
+
+from numpy.testing import assert_, suppress_warnings
 import numpy.distutils.fcompiler
 
 customizable_flags = [
@@ -25,6 +27,7 @@ def test_fcompiler_flags(monkeypatch):
 
         monkeypatch.setenv(envvar, new_flag)
         new_flags = getattr(flag_vars, opt)
+
         monkeypatch.delenv(envvar)
         assert_(new_flags == [new_flag])
 
@@ -33,12 +36,46 @@ def test_fcompiler_flags(monkeypatch):
     for opt, envvar in customizable_flags:
         new_flag = '-dummy-{}-flag'.format(opt)
         prev_flags = getattr(flag_vars, opt)
-
         monkeypatch.setenv(envvar, new_flag)
         new_flags = getattr(flag_vars, opt)
+
         monkeypatch.delenv(envvar)
         if prev_flags is None:
             assert_(new_flags == [new_flag])
         else:
             assert_(new_flags == prev_flags + [new_flag])
 
+
+def test_fcompiler_flags_append_warning(monkeypatch):
+    # Test to check that the warning for append behavior changing in future
+    # is triggered.  Need to use a real compiler instance so that we have
+    # non-empty flags to start with (otherwise the "if var and append" check
+    # will always be false).
+    try:
+        with suppress_warnings() as sup:
+            sup.record()
+            fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
+            fc.customize()
+    except numpy.distutils.fcompiler.CompilerNotFound:
+        pytest.skip("gfortran not found, so can't execute this test")
+
+    # Ensure NPY_DISTUTILS_APPEND_FLAGS not defined
+    monkeypatch.delenv('NPY_DISTUTILS_APPEND_FLAGS', raising=False)
+
+    for opt, envvar in customizable_flags:
+        new_flag = '-dummy-{}-flag'.format(opt)
+        with suppress_warnings() as sup:
+            sup.record()
+            prev_flags = getattr(fc.flag_vars, opt)
+
+        monkeypatch.setenv(envvar, new_flag)
+        with suppress_warnings() as sup:
+            sup.record()
+            new_flags = getattr(fc.flag_vars, opt)
+            if prev_flags:
+                # Check that warning was issued
+                assert len(sup.log) == 1
+
+        monkeypatch.delenv(envvar)
+        assert_(new_flags == [new_flag])
+
index 23a4b7c41a76be289e8bb594b9ad39501e64ec9d..d146739bb2f1b8c5d3dfbfea25bf719037e77a70 100644 (file)
@@ -28,12 +28,16 @@ def compile(source,
             extension='.f'
            ):
     """
-    Build extension module from processing source with f2py.
+    Build extension module from a Fortran 77 source string with f2py.
 
     Parameters
     ----------
-    source : str
+    source : str or bytes
         Fortran source of module / subroutine to compile
+
+        .. versionchanged:: 1.16.0
+           Accept str as well as bytes
+
     modulename : str, optional
         The name of the compiled python module
     extra_args : str or list, optional
@@ -55,6 +59,16 @@ def compile(source,
 
         .. versionadded:: 1.11.0
 
+    Returns
+    -------
+    result : int
+        0 on success
+
+    Examples
+    --------
+    .. include:: compile_session.dat
+        :literal:
+
     """
     import tempfile
     import shlex
@@ -67,9 +81,11 @@ def compile(source,
     else:
         fname = source_fn
 
+    if not isinstance(source, str):
+        source = str(source, 'utf-8')
     try:
         with open(fname, 'w') as f:
-            f.write(str(source))
+            f.write(source)
 
         args = ['-c', '-m', modulename, f.name]
 
index 8750ed0b3d19e9052733af6e6ba01ad1f4455885..47223151f32fa524a810fda50c176fedf9b78fcd 100755 (executable)
@@ -396,8 +396,25 @@ def dict_append(d_out, d_in):
 
 
 def run_main(comline_list):
-    """Run f2py as if string.join(comline_list,' ') is used as a command line.
-    In case of using -h flag, return None.
+    """
+    Equivalent to running::
+
+        f2py <args>
+
+    where ``<args>=string.join(<list>,' ')``, but in Python.  Unless
+    ``-h`` is used, this function returns a dictionary containing
+    information on generated modules and their dependencies on source
+    files.  For example, the command ``f2py -m scalar scalar.f`` can be
+    executed from Python as follows
+
+    You cannot build extension modules with this function, that is,
+    using ``-c`` is not allowed. Use ``compile`` command instead
+
+    Examples
+    --------
+    .. include:: run_main_session.dat
+        :literal:
+
     """
     crackfortran.reset_global_f2py_vars()
     f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__))
index 74e0804e2144059aa26c2f64991020f62bcb5aa1..36abf05f9cc7a9ec7ec4d17546b8a88e83f29eb7 100644 (file)
@@ -106,3 +106,20 @@ def test_f2py_init_compile_bad_cmd():
         assert_equal(ret_val, 127)
     finally:
         sys.executable = temp
+
+
+@pytest.mark.parametrize('fsource',
+        ['program test_f2py\nend program test_f2py',
+         b'program test_f2py\nend program test_f2py',])
+def test_compile_from_strings(tmpdir, fsource):
+    # Make sure we can compile str and bytes gh-12796
+    cwd = os.getcwd()
+    try:
+        os.chdir(str(tmpdir))
+        ret_val = numpy.f2py.compile(
+                fsource,
+                modulename='test_compile_from_strings',
+                extension='.f90')
+        assert_equal(ret_val, 0)
+    finally:
+        os.chdir(cwd)
index 30237b76f9a3e10671dbedcf0aa8d0085825669b..463266e2810697f1f5d87db0ce3bea1a59322f07 100644 (file)
@@ -545,6 +545,11 @@ class DataSource(object):
         is accessible if it exists in either location.
 
         """
+
+        # First test for local path
+        if os.path.exists(path):
+            return True
+
         # We import this here because importing urllib2 is slow and
         # a significant fraction of numpy's total import time.
         if sys.version_info[0] >= 3:
@@ -554,10 +559,6 @@ class DataSource(object):
             from urllib2 import urlopen
             from urllib2 import URLError
 
-        # Test local path
-        if os.path.exists(path):
-            return True
-
         # Test cached url
         upath = self.abspath(path)
         if os.path.exists(upath):
index fd64ecbd64b130b6253b4ad370bccf39e5849256..3356904ab0254c5aa9661ccd27bbba00891e685d 100644 (file)
@@ -94,8 +94,7 @@ def ediff1d(ary, to_end=None, to_begin=None):
     # force a 1d array
     ary = np.asanyarray(ary).ravel()
 
-    # we have unit tests enforcing
-    # propagation of the dtype of input
+    # enforce propagation of the dtype of input
     # ary to returned result
     dtype_req = ary.dtype
 
@@ -106,23 +105,22 @@ def ediff1d(ary, to_end=None, to_begin=None):
     if to_begin is None:
         l_begin = 0
     else:
-        to_begin = np.asanyarray(to_begin)
-        if not np.can_cast(to_begin, dtype_req):
-            raise TypeError("dtype of to_begin must be compatible "
-                            "with input ary")
-
-        to_begin = to_begin.ravel()
+        _to_begin = np.asanyarray(to_begin, dtype=dtype_req)
+        if not np.all(_to_begin == to_begin):
+            raise ValueError("cannot convert 'to_begin' to array with dtype "
+                            "'%r' as required for input ary" % dtype_req)
+        to_begin = _to_begin.ravel()
         l_begin = len(to_begin)
 
     if to_end is None:
         l_end = 0
     else:
-        to_end = np.asanyarray(to_end)
-        if not np.can_cast(to_end, dtype_req):
-            raise TypeError("dtype of to_end must be compatible "
-                            "with input ary")
-
-        to_end = to_end.ravel()
+        _to_end = np.asanyarray(to_end, dtype=dtype_req)
+        # check that casting has not overflowed
+        if not np.all(_to_end == to_end):
+            raise ValueError("cannot convert 'to_end' to array with dtype "
+                             "'%r' as required for input ary" % dtype_req)
+        to_end = _to_end.ravel()
         l_end = len(to_end)
 
     # do the calculation in place and copy to_begin and to_end
index 5f87c8b2c7fa38c484126f4f699c10671c952dac..d9ce3f8a457b3e6489312faa2b65726d0c08c496 100644 (file)
@@ -2590,6 +2590,7 @@ def blackman(M):
 
     Examples
     --------
+    >>> import matplotlib.pyplot as plt
     >>> np.blackman(12)
     array([ -1.38777878e-17,   3.26064346e-02,   1.59903635e-01,
              4.14397981e-01,   7.36045180e-01,   9.67046769e-01,
@@ -2807,6 +2808,7 @@ def hanning(M):
 
     Plot the window and its frequency response:
 
+    >>> import matplotlib.pyplot as plt
     >>> from numpy.fft import fft, fftshift
     >>> window = np.hanning(51)
     >>> plt.plot(window)
@@ -2906,6 +2908,7 @@ def hamming(M):
 
     Plot the window and the frequency response:
 
+    >>> import matplotlib.pyplot as plt
     >>> from numpy.fft import fft, fftshift
     >>> window = np.hamming(51)
     >>> plt.plot(window)
@@ -3180,6 +3183,7 @@ def kaiser(M, beta):
 
     Examples
     --------
+    >>> import matplotlib.pyplot as plt
     >>> np.kaiser(12, 14)
     array([  7.72686684e-06,   3.46009194e-03,   4.65200189e-02,
              2.29737120e-01,   5.99885316e-01,   9.45674898e-01,
@@ -3273,6 +3277,7 @@ def sinc(x):
 
     Examples
     --------
+    >>> import matplotlib.pyplot as plt
     >>> x = np.linspace(-4, 4, 41)
     >>> np.sinc(x)
     array([ -3.89804309e-17,  -4.92362781e-02,  -8.40918587e-02,
index a17fc66e5daf8031344d8aac855ebaf036f0f544..93d4b279f30a74bd4bf206ef0259e0924c8c36e7 100644 (file)
@@ -136,8 +136,8 @@ class TestSetOps(object):
          np.nan),
         # should fail because attempting
         # to downcast to smaller int type:
-        (np.array([1, 2, 3], dtype=np.int32),
-         np.array([5, 7, 2], dtype=np.int64),
+        (np.array([1, 2, 3], dtype=np.int16),
+         np.array([5, 1<<20, 2], dtype=np.int32),
          None),
         # should fail because attempting to cast
         # two special floating point values
@@ -152,8 +152,8 @@ class TestSetOps(object):
         # specifically, raise an appropriate
         # Exception when attempting to append or
         # prepend with an incompatible type
-        msg = 'must be compatible'
-        with assert_raises_regex(TypeError, msg):
+        msg = 'cannot convert'
+        with assert_raises_regex(ValueError, msg):
             ediff1d(ary=ary,
                     to_end=append,
                     to_begin=prepend)
index 27d84860802322f4c7a3ca1b5ba8189d130cdf93..5c840b1112d1a668056176bb7d56852076ce4f97 100644 (file)
@@ -644,7 +644,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
 
     Examples
     --------
-    >>> import matplotlib as mpl
+    >>> from matplotlib.image import NonUniformImage
     >>> import matplotlib.pyplot as plt
 
     Construct a 2-D histogram with variable bin width. First define the bin
@@ -679,7 +679,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
 
     >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
     ...         aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
-    >>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
+    >>> im = NonUniformImage(ax, interpolation='bilinear')
     >>> xcenters = (xedges[:-1] + xedges[1:]) / 2
     >>> ycenters = (yedges[:-1] + yedges[1:]) / 2
     >>> im.set_data(xcenters, ycenters, H)
index 21bc73e544beeb9aac50f7233389e71e6ec91c5a..c0561137bf81024032f0c07c06e114705df39c2d 100644 (file)
@@ -1139,9 +1139,12 @@ cdef class RandomState:
                 raise ValueError("'p' must be 1-dimensional")
             if p.size != pop_size:
                 raise ValueError("'a' and 'p' must have same size")
+            p_sum = kahan_sum(pix, d)
+            if np.isnan(p_sum):
+                raise ValueError("probabilities contain NaN")
             if np.logical_or.reduce(p < 0):
                 raise ValueError("probabilities are not non-negative")
-            if abs(kahan_sum(pix, d) - 1.) > atol:
+            if abs(p_sum - 1.) > atol:
                 raise ValueError("probabilities do not sum to 1")
 
         shape = size
@@ -2166,6 +2169,7 @@ cdef class RandomState:
         >>> NF = np.histogram(nc_vals, bins=50, density=True)
         >>> c_vals = np.random.f(dfnum, dfden, 1000000)
         >>> F = np.histogram(c_vals, bins=50, density=True)
+        >>> import matplotlib.pyplot as plt
         >>> plt.plot(F[1][1:], F[0])
         >>> plt.plot(NF[1][1:], NF[0])
         >>> plt.show()
@@ -2443,6 +2447,7 @@ cdef class RandomState:
         --------
         Draw samples and plot the distribution:
 
+        >>> import matplotlib.pyplot as plt
         >>> s = np.random.standard_cauchy(1000000)
         >>> s = s[(s>-25) & (s<25)]  # truncate distribution so it plots well
         >>> plt.hist(s, bins=100)
@@ -3279,6 +3284,7 @@ cdef class RandomState:
 
         >>> loc, scale = 10, 1
         >>> s = np.random.logistic(loc, scale, 10000)
+        >>> import matplotlib.pyplot as plt
         >>> count, bins, ignored = plt.hist(s, bins=50)
 
         #   plot against distribution
@@ -3479,6 +3485,7 @@ cdef class RandomState:
         --------
         Draw values from the distribution and plot the histogram
 
+        >>> from matplotlib.pyplot import hist
         >>> values = hist(np.random.rayleigh(3, 100000), bins=200, density=True)
 
         Wave heights tend to follow a Rayleigh distribution. If the mean wave
@@ -4233,6 +4240,7 @@ cdef class RandomState:
         >>> ngood, nbad, nsamp = 100, 2, 10
         # number of good, number of bad, and number of samples
         >>> s = np.random.hypergeometric(ngood, nbad, nsamp, 1000)
+        >>> from matplotlib.pyplot import hist
         >>> hist(s)
         #   note that it is very unlikely to grab both bad items
 
@@ -4342,6 +4350,7 @@ cdef class RandomState:
 
         >>> a = .6
         >>> s = np.random.logseries(a, 10000)
+        >>> import matplotlib.pyplot as plt
         >>> count, bins, ignored = plt.hist(s)
 
         #   plot against distribution
@@ -4710,6 +4719,7 @@ cdef class RandomState:
 
         >>> s = np.random.dirichlet((10, 5, 3), 20).transpose()
 
+        >>> import matplotlib.pyplot as plt
         >>> plt.barh(range(20), s[0])
         >>> plt.barh(range(20), s[1], left=s[0], color='g')
         >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')
index d0bb92a738954a259836a46691711c4c1a20d2d6..d4721bc62c9263e81dd5a861ee3246a50baab258 100644 (file)
@@ -448,6 +448,11 @@ class TestRandomDist(object):
         assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, (3, 0, 4))
         assert_raises(ValueError, np.random.choice, [], 10)
 
+    def test_choice_nan_probabilities(self):
+        a = np.array([42, 1, 2])
+        p = [None, None, None]
+        assert_raises(ValueError, np.random.choice, a, p=p)
+
     def test_bytes(self):
         np.random.seed(self.seed)
         actual = np.random.bytes(10)
index 1728d9d1f5b34ab7fb092c0f74f11104fc22117f..19569a5098a1b745c2859c9137248c7d652b3e86 100644 (file)
@@ -92,7 +92,7 @@ def run_module_suite(file_to_run=None, argv=None):
 
     Alternatively, calling::
 
-    >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
+    >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")  # doctest: +SKIP
 
     from an interpreter will run all the test routine in 'test_matlib.py'.
     """
index 55306e49938bfad834bd586f387f3af8c7e88db2..36f5c3cfe537bf17d695f9e120e454d652fc0cd2 100644 (file)
@@ -318,8 +318,9 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
     Examples
     --------
     >>> np.testing.assert_equal([4,5], [4,6])
-    ...
-    <type 'exceptions.AssertionError'>:
+    Traceback (most recent call last):
+        ...
+    AssertionError:
     Items are not equal:
     item=1
      ACTUAL: 5
@@ -510,21 +511,24 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
     >>> import numpy.testing as npt
     >>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
     >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
-    ...
-    <type 'exceptions.AssertionError'>:
-    Items are not equal:
-     ACTUAL: 2.3333333333333002
-     DESIRED: 2.3333333399999998
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not almost equal to 10 decimals
+     ACTUAL: 2.3333333333333
+     DESIRED: 2.33333334
 
     >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
     ...                         np.array([1.0,2.33333334]), decimal=9)
-    ...
-    <type 'exceptions.AssertionError'>:
-    Arrays are not almost equal
-    <BLANKLINE>
-    (mismatch 50.0%)
-     x: array([ 1.        ,  2.33333333])
-     y: array([ 1.        ,  2.33333334])
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not almost equal to 9 decimals
+    Mismatch: 50%
+    Max absolute difference: 6.66669964e-09
+    Max relative difference: 2.85715698e-09
+     x: array([1.         , 2.333333333])
+     y: array([1.        , 2.33333334])
 
     """
     __tracebackhide__ = True  # Hide traceback for py.test
@@ -626,14 +630,15 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
     --------
     >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
     >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
-                                       significant=8)
+    ...                                significant=8)
     >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
-                                       significant=8)
-    ...
-    <type 'exceptions.AssertionError'>:
+    ...                                significant=8)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
     Items are not equal to 8 significant digits:
-     ACTUAL: 1.234567e-021
-     DESIRED: 1.2345672000000001e-021
+     ACTUAL: 1.234567e-21
+     DESIRED: 1.2345672e-21
 
     the evaluated condition that raises the exception is
 
@@ -660,10 +665,10 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
         sc_actual = actual/scale
     except ZeroDivisionError:
         sc_actual = 0.0
-    msg = build_err_msg([actual, desired], err_msg,
-                header='Items are not equal to %d significant digits:' %
-                                 significant,
-                verbose=verbose)
+    msg = build_err_msg(
+        [actual, desired], err_msg,
+        header='Items are not equal to %d significant digits:' % significant,
+        verbose=verbose)
     try:
         # If one of desired/actual is not finite, handle it specially here:
         # check that both are nan if any is a nan, and test for equality
@@ -686,7 +691,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
                          header='', precision=6, equal_nan=True,
                          equal_inf=True):
     __tracebackhide__ = True  # Hide traceback for py.test
-    from numpy.core import array, isnan, inf, bool_
+    from numpy.core import array, array2string, isnan, inf, bool_, errstate
 
     x = array(x, copy=False, subok=True)
     y = array(y, copy=False, subok=True)
@@ -782,15 +787,33 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
             reduced = val.ravel()
             cond = reduced.all()
             reduced = reduced.tolist()
+
         # The below comparison is a hack to ensure that fully masked
         # results, for which val.ravel().all() returns np.ma.masked,
         # do not trigger a failure (np.ma.masked != True evaluates as
         # np.ma.masked, which is falsy).
         if cond != True:
             mismatch = 100.0 * reduced.count(0) / ox.size
-            msg = build_err_msg([ox, oy],
-                                err_msg
-                                + '\n(mismatch %s%%)' % (mismatch,),
+            remarks = ['Mismatch: {:.3g}%'.format(mismatch)]
+
+            with errstate(invalid='ignore', divide='ignore'):
+                # ignore errors for non-numeric types
+                try:
+                    error = abs(x - y)
+                    max_abs_error = error.max()
+                    remarks.append('Max absolute difference: '
+                                   + array2string(max_abs_error))
+
+                    # note: this definition of relative error matches that one
+                    # used by assert_allclose (found in np.isclose)
+                    max_rel_error = (error / abs(y)).max()
+                    remarks.append('Max relative difference: '
+                                   + array2string(max_rel_error))
+                except TypeError:
+                    pass
+
+            err_msg += '\n' + '\n'.join(remarks)
+            msg = build_err_msg([ox, oy], err_msg,
                                 verbose=verbose, header=header,
                                 names=('x', 'y'), precision=precision)
             raise AssertionError(msg)
@@ -850,14 +873,15 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
 
     >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
     ...                               [1, np.sqrt(np.pi)**2, np.nan])
-    ...
-    <type 'exceptions.ValueError'>:
+    Traceback (most recent call last):
+        ...
     AssertionError:
     Arrays are not equal
-    <BLANKLINE>
-    (mismatch 50.0%)
-     x: array([ 1.        ,  3.14159265,         NaN])
-     y: array([ 1.        ,  3.14159265,         NaN])
+    Mismatch: 33.3%
+    Max absolute difference: 4.4408921e-16
+    Max relative difference: 1.41357986e-16
+     x: array([1.      , 3.141593,      nan])
+     y: array([1.      , 3.141593,      nan])
 
     Use `assert_allclose` or one of the nulp (number of floating point values)
     functions for these cases instead:
@@ -922,26 +946,29 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
     the first assert does not raise an exception
 
     >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
-                                             [1.0,2.333,np.nan])
+    ...                                      [1.0,2.333,np.nan])
 
     >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
     ...                                      [1.0,2.33339,np.nan], decimal=5)
-    ...
-    <type 'exceptions.AssertionError'>:
+    Traceback (most recent call last):
+        ...
     AssertionError:
-    Arrays are not almost equal
-    <BLANKLINE>
-    (mismatch 50.0%)
-     x: array([ 1.     ,  2.33333,      NaN])
-     y: array([ 1.     ,  2.33339,      NaN])
+    Arrays are not almost equal to 5 decimals
+    Mismatch: 33.3%
+    Max absolute difference: 6.e-05
+    Max relative difference: 2.57136612e-05
+     x: array([1.     , 2.33333,     nan])
+     y: array([1.     , 2.33339,     nan])
 
     >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
     ...                                      [1.0,2.33333, 5], decimal=5)
-    <type 'exceptions.ValueError'>:
-    ValueError:
-    Arrays are not almost equal
-     x: array([ 1.     ,  2.33333,      NaN])
-     y: array([ 1.     ,  2.33333,  5.     ])
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not almost equal to 5 decimals
+    x and y nan location mismatch:
+     x: array([1.     , 2.33333,     nan])
+     y: array([1.     , 2.33333, 5.     ])
 
     """
     __tracebackhide__ = True  # Hide traceback for py.test
@@ -1022,27 +1049,34 @@ def assert_array_less(x, y, err_msg='', verbose=True):
     --------
     >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
     >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
-    ...
-    <type 'exceptions.ValueError'>:
+    Traceback (most recent call last):
+        ...
+    AssertionError:
     Arrays are not less-ordered
-    (mismatch 50.0%)
-     x: array([  1.,   1.,  NaN])
-     y: array([  1.,   2.,  NaN])
+    Mismatch: 33.3%
+    Max absolute difference: 1.
+    Max relative difference: 0.5
+     x: array([ 1.,  1., nan])
+     y: array([ 1.,  2., nan])
 
     >>> np.testing.assert_array_less([1.0, 4.0], 3)
-    ...
-    <type 'exceptions.ValueError'>:
+    Traceback (most recent call last):
+        ...
+    AssertionError:
     Arrays are not less-ordered
-    (mismatch 50.0%)
-     x: array([ 1.,  4.])
+    Mismatch: 50%
+    Max absolute difference: 2.
+    Max relative difference: 0.66666667
+     x: array([1., 4.])
      y: array(3)
 
     >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
-    ...
-    <type 'exceptions.ValueError'>:
+    Traceback (most recent call last):
+        ...
+    AssertionError:
     Arrays are not less-ordered
     (shapes (3,), (1,) mismatch)
-     x: array([ 1.,  2.,  3.])
+     x: array([1., 2., 3.])
      y: array([4])
 
     """
@@ -1147,7 +1181,7 @@ def rundocs(filename=None, raise_on_error=True):
     argument to the ``test()`` call. For example, to run all tests (including
     doctests) for `numpy.lib`:
 
-    >>> np.lib.test(doctests=True) #doctest: +SKIP
+    >>> np.lib.test(doctests=True)  # doctest: +SKIP
     """
     from numpy.compat import npy_load_module
     import doctest
@@ -1329,7 +1363,7 @@ def decorate_methods(cls, decorator, testmatch=None):
     return
 
 
-def measure(code_str,times=1,label=None):
+def measure(code_str, times=1, label=None):
     """
     Return elapsed time for executing code in the namespace of the caller.
 
@@ -1356,9 +1390,9 @@ def measure(code_str,times=1,label=None):
 
     Examples
     --------
-    >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
-    ...                            times=times)
-    >>> print("Time for a single execution : ", etime / times, "s")
+    >>> times = 10
+    >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times)
+    >>> print("Time for a single execution : ", etime / times, "s")  # doctest: +SKIP
     Time for a single execution :  0.005 s
 
     """
@@ -1443,7 +1477,7 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
     --------
     >>> x = [1e-5, 1e-3, 1e-1]
     >>> y = np.arccos(np.cos(x))
-    >>> assert_allclose(x, y, rtol=1e-5, atol=0)
+    >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
 
     """
     __tracebackhide__ = True  # Hide traceback for py.test
@@ -1897,7 +1931,8 @@ class clear_and_catch_warnings(warnings.catch_warnings):
     Examples
     --------
     >>> import warnings
-    >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
+    >>> with np.testing.clear_and_catch_warnings(
+    ...         modules=[np.core.fromnumeric]):
     ...     warnings.simplefilter('always')
     ...     warnings.filterwarnings('ignore', module='np.core.fromnumeric')
     ...     # do something that raises a warning but ignore those in
@@ -1978,25 +2013,28 @@ class suppress_warnings(object):
 
     Examples
     --------
-    >>> with suppress_warnings() as sup:
-    ...     sup.filter(DeprecationWarning, "Some text")
-    ...     sup.filter(module=np.ma.core)
-    ...     log = sup.record(FutureWarning, "Does this occur?")
-    ...     command_giving_warnings()
-    ...     # The FutureWarning was given once, the filtered warnings were
-    ...     # ignored. All other warnings abide outside settings (may be
-    ...     # printed/error)
-    ...     assert_(len(log) == 1)
-    ...     assert_(len(sup.log) == 1)  # also stored in log attribute
-
-    Or as a decorator:
-
-    >>> sup = suppress_warnings()
-    >>> sup.filter(module=np.ma.core)  # module must match exact
-    >>> @sup
-    >>> def some_function():
-    ...     # do something which causes a warning in np.ma.core
-    ...     pass
+
+    With a context manager::
+
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Some text")
+            sup.filter(module=np.ma.core)
+            log = sup.record(FutureWarning, "Does this occur?")
+            command_giving_warnings()
+            # The FutureWarning was given once, the filtered warnings were
+            # ignored. All other warnings abide outside settings (may be
+            # printed/error)
+            assert_(len(log) == 1)
+            assert_(len(sup.log) == 1)  # also stored in log attribute
+
+    Or as a decorator::
+
+        sup = np.testing.suppress_warnings()
+        sup.filter(module=np.ma.core)  # module must match exactly
+        @sup
+        def some_function():
+            # do something which causes a warning in np.ma.core
+            pass
     """
     def __init__(self, forwarding_rule="always"):
         self._entered = False
index 43afafaa8afecb1ca44a9e33182dc409627d9d4c..c376a3852b3ecb2e40c38e8a167fc66dc4226256 100644 (file)
@@ -327,24 +327,22 @@ class TestEqual(TestArrayEqual):
         self._test_not_equal(x, y)
 
     def test_error_message(self):
-        try:
+        with pytest.raises(AssertionError) as exc_info:
             self._assert_func(np.array([1, 2]), np.array([[1, 2]]))
-        except AssertionError as e:
-            msg = str(e)
-            msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
-            msg_reference = textwrap.dedent("""\
+        msg = str(exc_info.value)
+        msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
+        msg_reference = textwrap.dedent("""\
 
-            Arrays are not equal
+        Arrays are not equal
 
-            (shapes (2,), (1, 2) mismatch)
-             x: array([1, 2])
-             y: array([[1, 2]])""")
-            try:
-                assert_equal(msg, msg_reference)
-            except AssertionError:
-                assert_equal(msg2, msg_reference)
-        else:
-            raise AssertionError("Did not raise")
+        (shapes (2,), (1, 2) mismatch)
+         x: array([1, 2])
+         y: array([[1, 2]])""")
+
+        try:
+            assert_equal(msg, msg_reference)
+        except AssertionError:
+            assert_equal(msg2, msg_reference)
 
 
 class TestArrayAlmostEqual(_GenericTest):
@@ -509,38 +507,53 @@ class TestAlmostEqual(_GenericTest):
         x = np.array([1.00000000001, 2.00000000002, 3.00003])
         y = np.array([1.00000000002, 2.00000000003, 3.00004])
 
-        # test with a different amount of decimal digits
-        # note that we only check for the formatting of the arrays themselves
-        b = ('x: array([1.00000000001, 2.00000000002, 3.00003     '
-             ' ])\n y: array([1.00000000002, 2.00000000003, 3.00004      ])')
-        try:
+        # Test with a different amount of decimal digits
+        with pytest.raises(AssertionError) as exc_info:
             self._assert_func(x, y, decimal=12)
-        except AssertionError as e:
-            # remove anything that's not the array string
-            assert_equal(str(e).split('%)\n ')[1], b)
-
-        # with the default value of decimal digits, only the 3rd element differs
-        # note that we only check for the formatting of the arrays themselves
-        b = ('x: array([1.     , 2.     , 3.00003])\n y: array([1.     , '
-             '2.     , 3.00004])')
-        try:
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatch: 100%')
+        assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
+        assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
+        assert_equal(
+            msgs[6],
+            ' x: array([1.00000000001, 2.00000000002, 3.00003      ])')
+        assert_equal(
+            msgs[7],
+            ' y: array([1.00000000002, 2.00000000003, 3.00004      ])')
+
+        # With the default value of decimal digits, only the 3rd element
+        # differs. Note that we only check for the formatting of the arrays
+        # themselves.
+        with pytest.raises(AssertionError) as exc_info:
             self._assert_func(x, y)
-        except AssertionError as e:
-            # remove anything that's not the array string
-            assert_equal(str(e).split('%)\n ')[1], b)
-
-        # Check the error message when input includes inf or nan
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatch: 33.3%')
+        assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
+        assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
+        assert_equal(msgs[6], ' x: array([1.     , 2.     , 3.00003])')
+        assert_equal(msgs[7], ' y: array([1.     , 2.     , 3.00004])')
+
+        # Check the error message when input includes inf
         x = np.array([np.inf, 0])
         y = np.array([np.inf, 1])
-        try:
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatch: 50%')
+        assert_equal(msgs[4], 'Max absolute difference: 1.')
+        assert_equal(msgs[5], 'Max relative difference: 1.')
+        assert_equal(msgs[6], ' x: array([inf,  0.])')
+        assert_equal(msgs[7], ' y: array([inf,  1.])')
+
+        # Check the error message when dividing by zero
+        x = np.array([1, 2])
+        y = np.array([0, 0])
+        with pytest.raises(AssertionError) as exc_info:
             self._assert_func(x, y)
-        except AssertionError as e:
-            msgs = str(e).split('\n')
-            # assert error percentage is 50%
-            assert_equal(msgs[3], '(mismatch 50.0%)')
-            # assert output array contains inf
-            assert_equal(msgs[4], ' x: array([inf,  0.])')
-            assert_equal(msgs[5], ' y: array([inf,  1.])')
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatch: 100%')
+        assert_equal(msgs[4], 'Max absolute difference: 2')
+        assert_equal(msgs[5], 'Max relative difference: inf')
 
     def test_subclass_that_cannot_be_bool(self):
         # While we cannot guarantee testing functions will always work for
@@ -829,12 +842,12 @@ class TestAssertAllclose(object):
     def test_report_fail_percentage(self):
         a = np.array([1, 1, 1, 1])
         b = np.array([1, 1, 1, 2])
-        try:
+
+        with pytest.raises(AssertionError) as exc_info:
             assert_allclose(a, b)
-            msg = ''
-        except AssertionError as exc:
-            msg = exc.args[0]
-        assert_("mismatch 25.0%" in msg)
+        msg = str(exc_info.value)
+        assert_('Mismatch: 25%\nMax absolute difference: 1\n'
+                'Max relative difference: 0.5' in msg)
 
     def test_equal_nan(self):
         a = np.array([np.nan])
@@ -1117,12 +1130,10 @@ class TestStringEqual(object):
         assert_string_equal("hello", "hello")
         assert_string_equal("hello\nmultiline", "hello\nmultiline")
 
-        try:
+        with pytest.raises(AssertionError) as exc_info:
             assert_string_equal("foo\nbar", "hello\nbar")
-        except AssertionError as exc:
-            assert_equal(str(exc), "Differences in strings:\n- foo\n+ hello")
-        else:
-            raise AssertionError("exception not raised")
+        msg = str(exc_info.value)
+        assert_equal(msg, "Differences in strings:\n- foo\n+ hello")
 
         assert_raises(AssertionError,
                       lambda: assert_string_equal("foo", "hello"))
index d389b37a876043d8bce19bab1b5a7d7fe17b1fde..521208c36d3d20f62fd41cbff0e55c41eb6b9820 100644 (file)
@@ -273,3 +273,95 @@ class TestAsArray(object):
 
         # check we avoid the segfault
         c_arr[0][0][0]
+
+
+@pytest.mark.skipif(ctypes is None,
+                    reason="ctypes not available on this python installation")
+class TestAsCtypesType(object):
+    """ Test conversion from dtypes to ctypes types """
+    def test_scalar(self):
+        dt = np.dtype('<u2')
+        ct = np.ctypeslib.as_ctypes_type(dt)
+        assert_equal(ct, ctypes.c_uint16.__ctype_le__)
+
+        dt = np.dtype('>u2')
+        ct = np.ctypeslib.as_ctypes_type(dt)
+        assert_equal(ct, ctypes.c_uint16.__ctype_be__)
+
+        dt = np.dtype('u2')
+        ct = np.ctypeslib.as_ctypes_type(dt)
+        assert_equal(ct, ctypes.c_uint16)
+
+    def test_subarray(self):
+        dt = np.dtype((np.int32, (2, 3)))
+        ct = np.ctypeslib.as_ctypes_type(dt)
+        assert_equal(ct, 2 * (3 * ctypes.c_int32))
+
+    def test_structure(self):
+        dt = np.dtype([
+            ('a', np.uint16),
+            ('b', np.uint32),
+        ])
+
+        ct = np.ctypeslib.as_ctypes_type(dt)
+        assert_(issubclass(ct, ctypes.Structure))
+        assert_equal(ctypes.sizeof(ct), dt.itemsize)
+        assert_equal(ct._fields_, [
+            ('a', ctypes.c_uint16),
+            ('b', ctypes.c_uint32),
+        ])
+
+    def test_structure_aligned(self):
+        dt = np.dtype([
+            ('a', np.uint16),
+            ('b', np.uint32),
+        ], align=True)
+
+        ct = np.ctypeslib.as_ctypes_type(dt)
+        assert_(issubclass(ct, ctypes.Structure))
+        assert_equal(ctypes.sizeof(ct), dt.itemsize)
+        assert_equal(ct._fields_, [
+            ('a', ctypes.c_uint16),
+            ('', ctypes.c_char * 2),  # padding
+            ('b', ctypes.c_uint32),
+        ])
+
+    def test_union(self):
+        dt = np.dtype(dict(
+            names=['a', 'b'],
+            offsets=[0, 0],
+            formats=[np.uint16, np.uint32]
+        ))
+
+        ct = np.ctypeslib.as_ctypes_type(dt)
+        assert_(issubclass(ct, ctypes.Union))
+        assert_equal(ctypes.sizeof(ct), dt.itemsize)
+        assert_equal(ct._fields_, [
+            ('a', ctypes.c_uint16),
+            ('b', ctypes.c_uint32),
+        ])
+
+    def test_padded_union(self):
+        dt = np.dtype(dict(
+            names=['a', 'b'],
+            offsets=[0, 0],
+            formats=[np.uint16, np.uint32],
+            itemsize=5,
+        ))
+
+        ct = np.ctypeslib.as_ctypes_type(dt)
+        assert_(issubclass(ct, ctypes.Union))
+        assert_equal(ctypes.sizeof(ct), dt.itemsize)
+        assert_equal(ct._fields_, [
+            ('a', ctypes.c_uint16),
+            ('b', ctypes.c_uint32),
+            ('', ctypes.c_char * 5),  # padding
+        ])
+
+    def test_overlapping(self):
+        dt = np.dtype(dict(
+            names=['a', 'b'],
+            offsets=[0, 2],
+            formats=[np.uint32, np.uint32]
+        ))
+        assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt)
index 9e27cc6cebc68aa075b7c1f3ff0507898706cf49..e42dc25f98e93eb1a28bc53e7d1c23de71a39992 100644 (file)
@@ -8,7 +8,7 @@ import sys
 import os
 import pytest
 from os.path import join as pathjoin, isfile, dirname
-from subprocess import Popen, PIPE
+import subprocess
 
 import numpy as np
 from numpy.compat.py3k import basestring
@@ -17,74 +17,13 @@ from numpy.testing import assert_, assert_equal
 is_inplace = isfile(pathjoin(dirname(np.__file__),  '..', 'setup.py'))
 
 
-def run_command(cmd, check_code=True):
-    """ Run command sequence `cmd` returning exit code, stdout, stderr
-
-    Parameters
-    ----------
-    cmd : str or sequence
-        string with command name or sequence of strings defining command
-    check_code : {True, False}, optional
-        If True, raise error for non-zero return code
-
-    Returns
-    -------
-    returncode : int
-        return code from execution of `cmd`
-    stdout : bytes (python 3) or str (python 2)
-        stdout from `cmd`
-    stderr : bytes (python 3) or str (python 2)
-        stderr from `cmd`
-
-    Raises
-    ------
-    RuntimeError
-        If `check_code` is True, and return code !=0
-    """
-    cmd = [cmd] if isinstance(cmd, basestring) else list(cmd)
-    if os.name == 'nt':
-        # Quote any arguments with spaces. The quotes delimit the arguments
-        # on Windows, and the arguments might be file paths with spaces.
-        # On Unix the list elements are each separate arguments.
-        cmd = ['"{0}"'.format(c) if ' ' in c else c for c in cmd]
-    proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
-    stdout, stderr = proc.communicate()
-    if proc.poll() is None:
-        proc.terminate()
-    if check_code and proc.returncode != 0:
-        raise RuntimeError('\n'.join(
-            ['Command "{0}" failed with',
-             'stdout', '------', '{1}', '',
-             'stderr', '------', '{2}']).format(cmd, stdout, stderr))
-    return proc.returncode, stdout, stderr
-
-
-@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace")
-@pytest.mark.xfail(reason="Test is unreliable")
-def test_f2py():
-    # test that we can run f2py script
-
-    def try_f2py_commands(cmds):
-        success = 0
-        for f2py_cmd in cmds:
-            try:
-                code, stdout, stderr = run_command([f2py_cmd, '-v'])
-                assert_equal(stdout.strip(), b'2')
-                success += 1
-            except Exception:
-                pass
-        return success
-
+def find_f2py_commands():
     if sys.platform == 'win32':
-        # Only the single 'f2py' script is installed in windows.
         exe_dir = dirname(sys.executable)
         if exe_dir.endswith('Scripts'): # virtualenv
-            f2py_cmds = [os.path.join(exe_dir, 'f2py')]
+            return [os.path.join(exe_dir, 'f2py')]
         else:
-            f2py_cmds = [os.path.join(exe_dir, "Scripts", 'f2py')]
-        success = try_f2py_commands(f2py_cmds)
-        msg = "Warning: f2py not found in path"
-        assert_(success == 1, msg)
+            return [os.path.join(exe_dir, "Scripts", 'f2py')]
     else:
         # Three scripts are installed in Unix-like systems:
         # 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example,
@@ -93,7 +32,18 @@ def test_f2py():
         version = sys.version_info
         major = str(version.major)
         minor = str(version.minor)
-        f2py_cmds = ('f2py', 'f2py' + major, 'f2py' + major + '.' + minor)
-        success = try_f2py_commands(f2py_cmds)
-        msg = "Warning: not all of %s, %s, and %s are found in path" % f2py_cmds
-        assert_(success == 3, msg)
+        return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor]
+
+
+@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace")
+@pytest.mark.xfail(reason="Test is unreliable")
+@pytest.mark.parametrize('f2py_cmd', find_f2py_commands())
+def test_f2py(f2py_cmd):
+    # test that we can run f2py script
+    stdout = subprocess.check_output([f2py_cmd, '-v'])
+    assert_equal(stdout.strip(), b'2')
+
+
+def test_pep338():
+    stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v'])
+    assert_equal(stdout.strip(), b'2')
index f2c56883b3001b7854ebe67becb76bb20098cf35..f3eb2847eabb9d6832ad799820f9efadf480a9d8 100644 (file)
@@ -42,7 +42,7 @@ from paver.easy import Bunch, options, task, sh
 #-----------------------------------
 
 # Path to the release notes
-RELEASE_NOTES = 'doc/release/1.16.0-notes.rst'
+RELEASE_NOTES = 'doc/release/1.16.1-notes.rst'
 
 
 #-------------------------------------------------------
index 12d3010b37394c8e3b9a64c744e1054d1d22f2ec..c61afa20e228726e26c380a72ced25199c99073c 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@ Operating System :: MacOS
 
 MAJOR               = 1
 MINOR               = 16
-MICRO               = 0
+MICRO               = 1
 ISRELEASED          = True
 VERSION             = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
 
index fa83606b225d7e4ad8234b77d6c171eaa01de750..bdbbd8024526d14014169dbaae11cd68e9a9766e 100755 (executable)
@@ -113,7 +113,7 @@ run_test()
 
   if [ -n "$RUN_COVERAGE" ]; then
     $PIP install pytest-cov
-    NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1
+    export NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1
     COVERAGE_FLAG=--coverage
   fi