Imported Upstream version 1.16.6 upstream/1.16.6
authorDongHun Kwak <dh0128.kwak@samsung.com>
Thu, 31 Dec 2020 00:48:47 +0000 (09:48 +0900)
committerDongHun Kwak <dh0128.kwak@samsung.com>
Thu, 31 Dec 2020 00:48:47 +0000 (09:48 +0900)
34 files changed:
doc/changelog/1.16.6-changelog.rst [new file with mode: 0644]
doc/release/1.15.6-notes.rst [new file with mode: 0644]
doc/release/1.16.6-notes.rst [new file with mode: 0644]
doc/source/release.rst
numpy/core/_internal.py
numpy/core/arrayprint.py
numpy/core/code_generators/generate_umath.py
numpy/core/include/numpy/ndarrayobject.h
numpy/core/records.py
numpy/core/setup_common.py
numpy/core/src/multiarray/descriptor.c
numpy/core/src/multiarray/einsum.c.src
numpy/core/src/umath/matmul.c.src
numpy/core/src/umath/matmul.h.src
numpy/core/tests/test_einsum.py
numpy/core/tests/test_multiarray.py
numpy/core/tests/test_records.py
numpy/core/tests/test_regression.py
numpy/ctypeslib.py
numpy/lib/_iotools.py
numpy/lib/arraypad.py
numpy/lib/histograms.py
numpy/lib/npyio.py
numpy/lib/recfunctions.py
numpy/lib/tests/test_arraypad.py
numpy/lib/tests/test_histograms.py
numpy/lib/tests/test_io.py
numpy/lib/tests/test_recfunctions.py
numpy/ma/mrecords.py
numpy/testing/_private/utils.py
numpy/testing/tests/test_utils.py
pavement.py
setup.py
shippable.yml

diff --git a/doc/changelog/1.16.6-changelog.rst b/doc/changelog/1.16.6-changelog.rst
new file mode 100644 (file)
index 0000000..62ff46c
--- /dev/null
@@ -0,0 +1,36 @@
+
+Contributors
+============
+
+A total of 10 people contributed to this release.
+
+* CakeWithSteak
+* Charles Harris
+* Chris Burr
+* Eric Wieser
+* Fernando Saravia
+* Lars Grueter
+* Matti Picus
+* Maxwell Aladago
+* Qiming Sun
+* Warren Weckesser
+
+Pull requests merged
+====================
+
+A total of 14 pull requests were merged for this release.
+
+* `#14211 <https://github.com/numpy/numpy/pull/14211>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative...
+* `#14275 <https://github.com/numpy/numpy/pull/14275>`__: BUG: fixing to allow unpickling of PY3 pickles from PY2
+* `#14340 <https://github.com/numpy/numpy/pull/14340>`__: BUG: Fix misuse of .names and .fields in various places (backport...
+* `#14423 <https://github.com/numpy/numpy/pull/14423>`__: BUG: test, fix regression in converting to ctypes.
+* `#14434 <https://github.com/numpy/numpy/pull/14434>`__: BUG: Fixed maximum relative error reporting in assert_allclose
+* `#14509 <https://github.com/numpy/numpy/pull/14509>`__: BUG: Fix regression in boolean matmul.
+* `#14686 <https://github.com/numpy/numpy/pull/14686>`__: BUG: properly define PyArray_DescrCheck
+* `#14853 <https://github.com/numpy/numpy/pull/14853>`__: BLD: add 'apt update' to shippable
+* `#14854 <https://github.com/numpy/numpy/pull/14854>`__: BUG: Fix _ctypes class circular reference. (#13808)
+* `#14856 <https://github.com/numpy/numpy/pull/14856>`__: BUG: Fix `np.einsum` errors on Power9 Linux and z/Linux
+* `#14863 <https://github.com/numpy/numpy/pull/14863>`__: BLD: Prevent -flto from optimising long double representation...
+* `#14864 <https://github.com/numpy/numpy/pull/14864>`__: BUG: lib: Fix histogram problem with signed integer arrays.
+* `#15172 <https://github.com/numpy/numpy/pull/15172>`__: ENH: Backport improvements to testing functions.
+* `#15191 <https://github.com/numpy/numpy/pull/15191>`__: REL: Prepare for 1.16.6 release.
diff --git a/doc/release/1.15.6-notes.rst b/doc/release/1.15.6-notes.rst
new file mode 100644 (file)
index 0000000..863f4b4
--- /dev/null
@@ -0,0 +1,52 @@
+==========================
+NumPy 1.16.6 Release Notes
+==========================
+
+The NumPy 1.16.6 release fixes bugs reported against the 1.16.5 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.7-dev, which should fix errors on Skylake series
+cpus.
+
+Downstream developers building this release should use Cython >= 0.29.2 and,
+if using OpenBLAS, OpenBLAS >= v0.3.7. The supported Python versions are 2.7
+and 3.5-3.7.
+
+Highlights
+==========
+
+
+New functions
+=============
+
+
+New deprecations
+================
+
+
+Expired deprecations
+====================
+
+
+Future changes
+==============
+
+
+Compatibility notes
+===================
+
+
+C API changes
+=============
+
+
+New Features
+============
+
+
+Improvements
+============
+
+
+Changes
+=======
diff --git a/doc/release/1.16.6-notes.rst b/doc/release/1.16.6-notes.rst
new file mode 100644 (file)
index 0000000..cda3449
--- /dev/null
@@ -0,0 +1,85 @@
+==========================
+NumPy 1.16.6 Release Notes
+==========================
+
+The NumPy 1.16.6 release fixes bugs reported against the 1.16.5 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.7, which should fix errors on Skylake series
+cpus.
+
+Downstream developers building this release should use Cython >= 0.29.2 and, if
+using OpenBLAS, OpenBLAS >= v0.3.7. The supported Python versions are 2.7 and
+3.5-3.7.
+
+Highlights
+==========
+
+- The ``np.testing.utils`` functions have been updated from 1.19.0-dev0.
+  This improves the function documentation and error messages as well
+  extending the ``assert_array_compare`` function to additional types.
+
+
+New functions
+=============
+
+Allow matmul (`@` operator) to work with object arrays.
+-------------------------------------------------------
+This is an enhancement that was added in NumPy 1.17 and seems reasonable to
+include in the LTS 1.16 release series.
+
+
+Compatibility notes
+===================
+
+Fix regression in matmul (`@` operator) for boolean types
+---------------------------------------------------------
+Booleans were being treated as integers rather than booleans,
+which was a regression from previous behavior.
+
+
+Improvements
+============
+
+Array comparison assertions include maximum differences
+-------------------------------------------------------
+Error messages from array comparison tests such as ``testing.assert_allclose``
+now include "max absolute difference" and "max relative difference," in
+addition to the previous "mismatch" percentage.  This information makes it
+easier to update absolute and relative error tolerances.
+
+Contributors
+============
+
+A total of 10 people contributed to this release.
+
+* CakeWithSteak
+* Charles Harris
+* Chris Burr
+* Eric Wieser
+* Fernando Saravia
+* Lars Grueter
+* Matti Picus
+* Maxwell Aladago
+* Qiming Sun
+* Warren Weckesser
+
+Pull requests merged
+====================
+
+A total of 14 pull requests were merged for this release.
+
+* `#14211 <https://github.com/numpy/numpy/pull/14211>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative...
+* `#14275 <https://github.com/numpy/numpy/pull/14275>`__: BUG: fixing to allow unpickling of PY3 pickles from PY2
+* `#14340 <https://github.com/numpy/numpy/pull/14340>`__: BUG: Fix misuse of .names and .fields in various places (backport...
+* `#14423 <https://github.com/numpy/numpy/pull/14423>`__: BUG: test, fix regression in converting to ctypes.
+* `#14434 <https://github.com/numpy/numpy/pull/14434>`__: BUG: Fixed maximum relative error reporting in assert_allclose
+* `#14509 <https://github.com/numpy/numpy/pull/14509>`__: BUG: Fix regression in boolean matmul.
+* `#14686 <https://github.com/numpy/numpy/pull/14686>`__: BUG: properly define PyArray_DescrCheck
+* `#14853 <https://github.com/numpy/numpy/pull/14853>`__: BLD: add 'apt update' to shippable
+* `#14854 <https://github.com/numpy/numpy/pull/14854>`__: BUG: Fix _ctypes class circular reference. (#13808)
+* `#14856 <https://github.com/numpy/numpy/pull/14856>`__: BUG: Fix `np.einsum` errors on Power9 Linux and z/Linux
+* `#14863 <https://github.com/numpy/numpy/pull/14863>`__: BLD: Prevent -flto from optimising long double representation...
+* `#14864 <https://github.com/numpy/numpy/pull/14864>`__: BUG: lib: Fix histogram problem with signed integer arrays.
+* `#15172 <https://github.com/numpy/numpy/pull/15172>`__: ENH: Backport improvements to testing functions.
+* `#15191 <https://github.com/numpy/numpy/pull/15191>`__: REL: Prepare for 1.16.6 release.
index e5b3d5d1386d264f523c4443eaf936e4173401e2..10a8caabdd2a829a267b5a97f0feb7c94a8df7f1 100644 (file)
@@ -2,6 +2,7 @@
 Release Notes
 *************
 
+.. include:: ../release/1.16.6-notes.rst
 .. include:: ../release/1.16.5-notes.rst
 .. include:: ../release/1.16.4-notes.rst
 .. include:: ../release/1.16.3-notes.rst
index c7c18fbfc38a352f5dbdbc33d05965b2f312eb9b..2a49067678dba136be7b9f21e12c710de2d2e9f4 100644 (file)
@@ -248,55 +248,13 @@ class _missing_ctypes(object):
             self.value = ptr
 
 
-class _unsafe_first_element_pointer(object):
-    """
-    Helper to allow viewing an array as a ctypes pointer to the first element
-
-    This avoids:
-      * dealing with strides
-      * `.view` rejecting object-containing arrays
-      * `memoryview` not supporting overlapping fields
-    """
-    def __init__(self, arr):
-        self.base = arr
-
-    @property
-    def __array_interface__(self):
-        i = dict(
-            shape=(),
-            typestr='|V0',
-            data=(self.base.__array_interface__['data'][0], False),
-            strides=(),
-            version=3,
-        )
-        return i
-
-
-def _get_void_ptr(arr):
-    """
-    Get a `ctypes.c_void_p` to arr.data, that keeps a reference to the array
-    """
-    import numpy as np
-    # convert to a 0d array that has a data pointer referrign to the start
-    # of arr. This holds a reference to arr.
-    simple_arr = np.asarray(_unsafe_first_element_pointer(arr))
-
-    # create a `char[0]` using the same memory.
-    c_arr = (ctypes.c_char * 0).from_buffer(simple_arr)
-
-    # finally cast to void*
-    return ctypes.cast(ctypes.pointer(c_arr), ctypes.c_void_p)
-
-
 class _ctypes(object):
     def __init__(self, array, ptr=None):
         self._arr = array
 
         if ctypes:
             self._ctypes = ctypes
-            # get a void pointer to the buffer, which keeps the array alive
-            self._data = _get_void_ptr(array)
-            assert self._data.value == ptr
+            self._data = self._ctypes.c_void_p(ptr)
         else:
             # fake a pointer-like object that holds onto the reference
             self._ctypes = _missing_ctypes()
@@ -318,7 +276,14 @@ class _ctypes(object):
 
         The returned pointer will keep a reference to the array.
         """
-        return self._ctypes.cast(self._data, obj)
+        # _ctypes.cast function causes a circular reference of self._data in
+        # self._data._objects. Attributes of self._data cannot be released
+        # until gc.collect is called. Make a copy of the pointer first then let
+        # it hold the array reference. This is a workaround to circumvent the
+        # CPython bug https://bugs.python.org/issue12836
+        ptr = self._ctypes.cast(self._data, obj)
+        ptr._arr = self._arr
+        return ptr
 
     def shape_as(self, obj):
         """
@@ -386,7 +351,7 @@ class _ctypes(object):
 
         Enables `c_func(some_array.ctypes)`
         """
-        return self._data
+        return self.data_as(ctypes.c_void_p)
 
     # kept for compatibility
     get_data = data.fget
@@ -460,7 +425,7 @@ def _getfield_is_safe(oldtype, newtype, offset):
     if newtype.hasobject or oldtype.hasobject:
         if offset == 0 and newtype == oldtype:
             return
-        if oldtype.names:
+        if oldtype.names is not None:
             for name in oldtype.names:
                 if (oldtype.fields[name][1] == offset and
                         oldtype.fields[name][0] == newtype):
index 6a71de226d3558ed12427271b081ef67f2ae7cf6..a305552ee9ff470ede2be281662d23dc762d8ff8 100644 (file)
@@ -672,7 +672,7 @@ def array2string(a, max_line_width=None, precision=None,
         if style is np._NoValue:
             style = repr
 
-        if a.shape == () and not a.dtype.names:
+        if a.shape == () and a.dtype.names is None:
             return style(a.item())
     elif style is not np._NoValue:
         # Deprecation 11-9-2017  v1.14
index 0fac9b05eeffb5c0036be1260a226302e0f030f7..daf5949d06256a43fe6044d9c6d823fc60448e40 100644 (file)
@@ -911,6 +911,7 @@ defdict = {
           docstrings.get('numpy.core.umath.matmul'),
           "PyUFunc_SimpleBinaryOperationTypeResolver",
           TD(notimes_or_obj),
+          TD(O),
           signature='(n?,k),(k,m?)->(n?,m?)',
           ),
 }
index 2cc7ced3584d0040b95b889cd33b33a7b8eac844..95e9cb0603bb9832a1d3903b1074bcbd9363511a 100644 (file)
@@ -23,7 +23,7 @@ extern "C" {
 
 /* C-API that requires previous API to be defined */
 
-#define PyArray_DescrCheck(op) (((PyObject*)(op))->ob_type==&PyArrayDescr_Type)
+#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type)
 
 #define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type)
 #define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type)
index 5898bb1631f6e792ec5e4640ae60a40ee4bea273..79b55fec179756200cf14aa07acd3c2863dc55a4 100644 (file)
@@ -254,8 +254,8 @@ class record(nt.void):
             except AttributeError:
                 #happens if field is Object type
                 return obj
-            if dt.fields:
-                return obj.view((self.__class__, obj.dtype.fields))
+            if dt.names is not None:
+                return obj.view((self.__class__, obj.dtype))
             return obj
         else:
             raise AttributeError("'record' object has no "
@@ -279,8 +279,8 @@ class record(nt.void):
         obj = nt.void.__getitem__(self, indx)
 
         # copy behavior of record.__getattribute__,
-        if isinstance(obj, nt.void) and obj.dtype.fields:
-            return obj.view((self.__class__, obj.dtype.fields))
+        if isinstance(obj, nt.void) and obj.dtype.names is not None:
+            return obj.view((self.__class__, obj.dtype))
         else:
             # return a single element
             return obj
@@ -431,7 +431,7 @@ class recarray(ndarray):
         return self
 
     def __array_finalize__(self, obj):
-        if self.dtype.type is not record and self.dtype.fields:
+        if self.dtype.type is not record and self.dtype.names is not None:
             # if self.dtype is not np.record, invoke __setattr__ which will
             # convert it to a record if it is a void dtype.
             self.dtype = self.dtype
@@ -459,7 +459,7 @@ class recarray(ndarray):
         # with void type convert it to the same dtype.type (eg to preserve
         # numpy.record type if present), since nested structured fields do not
         # inherit type. Don't do this for non-void structures though.
-        if obj.dtype.fields:
+        if obj.dtype.names is not None:
             if issubclass(obj.dtype.type, nt.void):
                 return obj.view(dtype=(self.dtype.type, obj.dtype))
             return obj
@@ -474,7 +474,7 @@ class recarray(ndarray):
 
         # Automatically convert (void) structured types to records
         # (but not non-void structures, subarrays, or non-structured voids)
-        if attr == 'dtype' and issubclass(val.type, nt.void) and val.fields:
+        if attr == 'dtype' and issubclass(val.type, nt.void) and val.names is not None:
             val = sb.dtype((record, val))
 
         newattr = attr not in self.__dict__
@@ -508,7 +508,7 @@ class recarray(ndarray):
         # copy behavior of getattr, except that here
         # we might also be returning a single element
         if isinstance(obj, ndarray):
-            if obj.dtype.fields:
+            if obj.dtype.names is not None:
                 obj = obj.view(type(self))
                 if issubclass(obj.dtype.type, nt.void):
                     return obj.view(dtype=(self.dtype.type, obj.dtype))
@@ -564,7 +564,7 @@ class recarray(ndarray):
 
         if val is None:
             obj = self.getfield(*res)
-            if obj.dtype.fields:
+            if obj.dtype.names is not None:
                 return obj
             return obj.view(ndarray)
         else:
index f837df11217b3eff8af1b878c9495c6142d3713f..85b863e50ea599230658fcadc97c878298e94995 100644 (file)
@@ -243,8 +243,9 @@ def check_long_double_representation(cmd):
     except ValueError:
         # try linking to support CC="gcc -flto" or icc -ipo
         # struct needs to be volatile so it isn't optimized away
+        # additionally "clang -flto" requires the foo struct to be used
         body = body.replace('struct', 'volatile struct')
-        body += "int main(void) { return 0; }\n"
+        body += "int main(void) { return foo.before[0]; }\n"
         src, obj = cmd._compile(body, None, None, 'c')
         cmd.temp_files.append("_configtest")
         cmd.compiler.link_executable([obj], "_configtest")
index e7a4b6c72a9ac95f473b831e39b9f9ecef7ddbc7..53d74512c21b8947335b25b1c7c487a72fef0c45 100644 (file)
@@ -2751,11 +2751,11 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
             }
         }
         else {
-#if defined(NPY_PY3K)
             /*
-             * To support pickle.load(f, encoding='bytes') for loading Py2
-             * generated pickles on Py3, we need to be more lenient and convert
-             * field names from byte strings to unicode.
+             * At least one of the names is not of the expected type.
+             * The difference might originate from pickles that were
+             * created on another Python version (PY2/PY3). Go through
+             * the names and convert if possible for compatibility
              */
             PyObject *tmp, *new_name, *field;
 
@@ -2780,7 +2780,14 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
                     return NULL;
                 }
 
+#if defined(NPY_PY3K)
+                /*
+                 * To support pickle.load(f, encoding='bytes') for loading Py2
+                 * generated pickles on Py3, we need to be more lenient and convert
+                 * field names from byte strings to unicode.
+                 */
                 if (PyUnicode_Check(name)) {
+                    // no transformation needed, keep it as is
                     new_name = name;
                     Py_INCREF(new_name);
                 }
@@ -2790,17 +2797,35 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
                         return NULL;
                     }
                 }
+#else
+                // PY2 should be able to read PY3 pickles. See gh-2407
+                if (PyString_Check(name)) {
+                    // It is a PY2 string, no transformation is needed
+                    new_name = name;
+                    Py_INCREF(new_name);
+                }
+                else if (PyUnicode_Check(name)) {
+                    // The field names of a structured dtype were pickled in PY3 as unicode strings
+                    // so, to unpickle them in PY2, we need to convert them to PY2 strings
+                    new_name = PyUnicode_AsEncodedString(name, "ASCII", "strict");
+                    if (new_name == NULL) {
+                        return NULL;
+                    }
+                }
+                else {
+                    // The field name is not a string or a unicode object, we cannot process it
+                    PyErr_Format(PyExc_ValueError,
+                        "non-string/non-unicode names in Numpy dtype unpickling");
+                    return NULL;
+                }
+
+#endif
 
                 PyTuple_SET_ITEM(self->names, i, new_name);
                 if (PyDict_SetItem(self->fields, new_name, field) != 0) {
                     return NULL;
                 }
             }
-#else
-            PyErr_Format(PyExc_ValueError,
-                "non-string names in Numpy dtype unpickling");
-            return NULL;
-#endif
         }
     }
 
index 1765982a0a7b1fa779acd13f8ee02247b392589e..58af4409192148d9c49f4eba80c058b5b8dcba6a 100644 (file)
@@ -1876,7 +1876,7 @@ parse_operand_subscripts(char *subscripts, int length,
      * later where it matters the char is cast to a signed char.
      */
     for (idim = 0; idim < ndim - 1; ++idim) {
-        int label = op_labels[idim];
+        int label = (signed char)op_labels[idim];
         /* If it is a proper label, find any duplicates of it. */
         if (label > 0) {
             /* Search for the next matching label. */
@@ -1992,12 +1992,13 @@ parse_output_subscripts(char *subscripts, int length,
 
 
 /*
- * When there's just one operand and no reduction, we
- * can return a view into op.  This calculates the view
- * if possible.
+ * When there's just one operand and no reduction we can return a view
+ * into 'op'.  This calculates the view and stores it in 'ret', if
+ * possible.  Returns -1 on error, 0 otherwise.  Note that a 0 return
+ * does not mean that a view was successfully created.
  */
 static int
-get_single_op_view(PyArrayObject *op, int  iop, char *labels,
+get_single_op_view(PyArrayObject *op, char *labels,
                    int ndim_output, char *output_labels,
                    PyArrayObject **ret)
 {
@@ -2052,13 +2053,11 @@ get_single_op_view(PyArrayObject *op, int  iop, char *labels,
             }
             /* Update the dimensions and strides of the output */
             i = out_label - output_labels;
-            if (new_dims[i] != 0 &&
-                    new_dims[i] != PyArray_DIM(op, idim)) {
+            if (new_dims[i] != 0 && new_dims[i] != PyArray_DIM(op, idim)) {
                 PyErr_Format(PyExc_ValueError,
-                        "dimensions in operand %d for collapsing "
+                        "dimensions in single operand for collapsing "
                         "index '%c' don't match (%d != %d)",
-                        iop, label, (int)new_dims[i],
-                        (int)PyArray_DIM(op, idim));
+                        label, (int)new_dims[i], (int)PyArray_DIM(op, idim));
                 return -1;
             }
             new_dims[i] = PyArray_DIM(op, idim);
@@ -2086,80 +2085,108 @@ get_single_op_view(PyArrayObject *op, int  iop, char *labels,
     return 0;
 }
 
+
+/*
+ * The char type may be either signed or unsigned, we need it to be
+ * signed here.
+ */
+static int
+_any_labels_are_negative(signed char *labels, int ndim)
+{
+    int idim;
+
+    for (idim = 0; idim < ndim; ++idim) {
+        if (labels[idim] < 0) {
+            return 1;
+        }
+    }
+
+    return 0;
+}
+
+/*
+ * Given the labels for an operand array, returns a view of the array
+ * with all repeated labels collapsed into a single dimension along
+ * the corresponding diagonal. The labels are also updated to match
+ * the dimensions of the new array. If no label is repeated, the
+ * original array is reference increased and returned unchanged.
+ */
 static PyArrayObject *
 get_combined_dims_view(PyArrayObject *op, int iop, char *labels)
 {
     npy_intp new_strides[NPY_MAXDIMS];
     npy_intp new_dims[NPY_MAXDIMS];
-    int idim, ndim, icombine, combineoffset;
+    int idim, icombine;
     int icombinemap[NPY_MAXDIMS];
-
+    int ndim = PyArray_NDIM(op);
     PyArrayObject *ret = NULL;
 
-    ndim = PyArray_NDIM(op);
+    /* A fast path to avoid unnecessary calculations. */
+    if (!_any_labels_are_negative((signed char *)labels, ndim)) {
+        Py_INCREF(op);
 
-    /* Initialize the dimensions and strides to zero */
-    for (idim = 0; idim < ndim; ++idim) {
-        new_dims[idim] = 0;
-        new_strides[idim] = 0;
+        return op;
     }
 
-    /* Copy the dimensions and strides, except when collapsing */
+    /* Combine repeated labels. */
     icombine = 0;
-    for (idim = 0; idim < ndim; ++idim) {
+    for(idim = 0; idim < ndim; ++idim) {
         /*
          * The char type may be either signed or unsigned, we
          * need it to be signed here.
          */
         int label = (signed char)labels[idim];
-        /* If this label says to merge axes, get the actual label */
-        if (label < 0) {
-            combineoffset = label;
-            label = labels[idim+label];
-        }
-        else {
-            combineoffset = 0;
-            if (icombine != idim) {
-                labels[icombine] = labels[idim];
-            }
+        npy_intp dim = PyArray_DIM(op, idim);
+        npy_intp stride = PyArray_STRIDE(op, idim);
+
+        /* A label seen for the first time, add it to the op view. */
+        if (label >= 0) {
+            /*
+             * icombinemap maps dimensions in the original array to
+             * their position in the combined dimensions view.
+             */
             icombinemap[idim] = icombine;
+            new_dims[icombine] = dim;
+            new_strides[icombine] = stride;
+            ++icombine;
         }
-        /* If the label is 0, it's an unlabeled broadcast dimension */
-        if (label == 0) {
-            new_dims[icombine] = PyArray_DIM(op, idim);
-            new_strides[icombine] = PyArray_STRIDE(op, idim);
-        }
+        /* A repeated label, find the original one and merge them. */
         else {
-            /* Update the combined axis dimensions and strides */
-            int i = icombinemap[idim + combineoffset];
-            if (combineoffset < 0 && new_dims[i] != 0 &&
-                        new_dims[i] != PyArray_DIM(op, idim)) {
+            int i = icombinemap[idim + label];
+
+            icombinemap[idim] = -1;
+            if (new_dims[i] != dim) {
+                char orig_label = labels[idim + label];
                 PyErr_Format(PyExc_ValueError,
-                        "dimensions in operand %d for collapsing "
-                        "index '%c' don't match (%d != %d)",
-                        iop, label, (int)new_dims[i],
-                        (int)PyArray_DIM(op, idim));
+                             "dimensions in operand %d for collapsing "
+                             "index '%c' don't match (%d != %d)",
+                             iop, orig_label, (int)new_dims[i], (int)dim);
                 return NULL;
             }
-            new_dims[i] = PyArray_DIM(op, idim);
-            new_strides[i] += PyArray_STRIDE(op, idim);
+            new_strides[i] += stride;
         }
+    }
 
-        /* If the label didn't say to combine axes, increment dest i */
-        if (combineoffset == 0) {
-            icombine++;
+    /* Overwrite labels to match the new operand view. */
+    for (idim = 0; idim < ndim; ++idim) {
+        int i = icombinemap[idim];
+
+        if (i >= 0) {
+            labels[i] = labels[idim];
         }
     }
 
-    /* The compressed number of dimensions */
+    /* The number of dimensions of the combined view. */
     ndim = icombine;
 
+    /* Create a view of the operand with the compressed dimensions. */
     Py_INCREF(PyArray_DESCR(op));
     ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
             Py_TYPE(op), PyArray_DESCR(op),
             ndim, new_dims, new_strides, PyArray_DATA(op),
             PyArray_ISWRITEABLE(op) ? NPY_ARRAY_WRITEABLE : 0,
             (PyObject *)op, (PyObject *)op);
+
     return ret;
 }
 
@@ -2620,6 +2647,24 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
         return NULL;
     }
 
+    /*
+     * If there's just one operand and no output parameter,
+     * first try remapping the axes to the output to return
+     * a view instead of a copy.
+     */
+    if (nop == 1 && out == NULL) {
+        ret = NULL;
+
+        if (get_single_op_view(op_in[0], op_labels[0], ndim_output,
+                               output_labels, &ret) < 0) {
+            return NULL;
+        }
+
+        if (ret != NULL) {
+            return ret;
+        }
+    }
+
     /* Set all the op references to NULL */
     for (iop = 0; iop < nop; ++iop) {
         op[iop] = NULL;
@@ -2631,53 +2676,10 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
      */
     for (iop = 0; iop < nop; ++iop) {
         char *labels = op_labels[iop];
-        int combine, ndim;
-
-        ndim = PyArray_NDIM(op_in[iop]);
 
-        /*
-         * If there's just one operand and no output parameter,
-         * first try remapping the axes to the output to return
-         * a view instead of a copy.
-         */
-        if (iop == 0 && nop == 1 && out == NULL) {
-            ret = NULL;
-
-            if (get_single_op_view(op_in[iop], iop, labels,
-                                   ndim_output, output_labels,
-                                   &ret) < 0) {
-                return NULL;
-            }
-
-            if (ret != NULL) {
-                return ret;
-            }
-        }
-
-        /*
-         * Check whether any dimensions need to be combined
-         *
-         * The char type may be either signed or unsigned, we
-         * need it to be signed here.
-         */
-        combine = 0;
-        for (idim = 0; idim < ndim; ++idim) {
-            if ((signed char)labels[idim] < 0) {
-                combine = 1;
-            }
-        }
-
-        /* If any dimensions are combined, create a view which combines them */
-        if (combine) {
-            op[iop] = get_combined_dims_view(op_in[iop], iop, labels);
-            if (op[iop] == NULL) {
-                goto fail;
-            }
-        }
-        /* No combining needed */
-        else {
-            Py_INCREF(op_in[iop]);
-            op[iop] = op_in[iop];
+        op[iop] = get_combined_dims_view(op_in[iop], iop, labels);
+        if (op[iop] == NULL) {
+            goto fail;
         }
     }
 
index 0cb3c82ad228601b530294f6b0c9282169d6efbc..bc00d3562d0f533b7581cd9d1ddb2d999d46e523 100644 (file)
@@ -196,16 +196,14 @@ NPY_NO_EXPORT void
  *          FLOAT, DOUBLE, HALF,
  *          CFLOAT, CDOUBLE, CLONGDOUBLE,
  *          UBYTE, USHORT, UINT, ULONG, ULONGLONG,
- *          BYTE, SHORT, INT, LONG, LONGLONG,
- *          BOOL#
+ *          BYTE, SHORT, INT, LONG, LONGLONG#
  *  #typ = npy_longdouble,
  *         npy_float,npy_double,npy_half,
  *         npy_cfloat, npy_cdouble, npy_clongdouble,
  *         npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
- *         npy_byte, npy_short, npy_int, npy_long, npy_longlong,
- *         npy_bool#
- * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*11#
- * #IS_HALF = 0, 0, 0, 1, 0*14#
+ *         npy_byte, npy_short, npy_int, npy_long, npy_longlong#
+ * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*10#
+ * #IS_HALF = 0, 0, 0, 1, 0*13#
  */
 
 NPY_NO_EXPORT void
@@ -213,7 +211,6 @@ NPY_NO_EXPORT void
                            void *_ip2, npy_intp is2_n, npy_intp is2_p,
                            void *_op, npy_intp os_m, npy_intp os_p,
                            npy_intp dm, npy_intp dn, npy_intp dp)
-                           
 {
     npy_intp m, n, p;
     npy_intp ib1_n, ib2_n, ib2_p, ob_p;
@@ -266,20 +263,126 @@ NPY_NO_EXPORT void
 }
 
 /**end repeat**/
+NPY_NO_EXPORT void
+BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n,
+                           void *_ip2, npy_intp is2_n, npy_intp is2_p,
+                           void *_op, npy_intp os_m, npy_intp os_p,
+                           npy_intp dm, npy_intp dn, npy_intp dp)
+{
+    npy_intp m, n, p;
+    npy_intp ib2_p, ob_p;
+    char *ip1 = (char *)_ip1, *ip2 = (char *)_ip2, *op = (char *)_op;
+
+    ib2_p = is2_p * dp;
+    ob_p  = os_p * dp;
+
+    for (m = 0; m < dm; m++) {
+        for (p = 0; p < dp; p++) {
+            char *ip1tmp = ip1;
+            char *ip2tmp = ip2;
+            *(npy_bool *)op = NPY_FALSE;
+            for (n = 0; n < dn; n++) {
+                npy_bool val1 = (*(npy_bool *)ip1tmp);
+                npy_bool val2 = (*(npy_bool *)ip2tmp);
+                if (val1 != 0 && val2 != 0) {
+                    *(npy_bool *)op = NPY_TRUE;
+                    break;
+                }
+                ip2tmp += is2_n;
+                ip1tmp += is1_n;
+            }
+            op  +=  os_p;
+            ip2 += is2_p;
+        }
+        op -= ob_p;
+        ip2 -= ib2_p;
+        ip1 += is1_m;
+        op  +=  os_m;
+    }
+}
+
+NPY_NO_EXPORT void
+OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n,
+                           void *_ip2, npy_intp is2_n, npy_intp is2_p,
+                           void *_op, npy_intp os_m, npy_intp os_p,
+                           npy_intp dm, npy_intp dn, npy_intp dp)
+{
+    char *ip1 = (char *)_ip1, *ip2 = (char *)_ip2, *op = (char *)_op;
+
+    npy_intp ib1_n = is1_n * dn;
+    npy_intp ib2_n = is2_n * dn;
+    npy_intp ib2_p = is2_p * dp;
+    npy_intp ob_p  = os_p * dp;
+    npy_intp m, p, n;
+
+    PyObject *product, *sum_of_products = NULL;
+
+    for (m = 0; m < dm; m++) {
+        for (p = 0; p < dp; p++) {
+            if ( 0 == dn ) {
+                sum_of_products = PyLong_FromLong(0);
+                if (sum_of_products == NULL) {
+                    return;
+                }
+            }
+
+            for (n = 0; n < dn; n++) {
+                PyObject *obj1 = *(PyObject**)ip1, *obj2 = *(PyObject**)ip2;
+                if (obj1 == NULL) {
+                    obj1 = Py_None;
+                }
+                if (obj2 == NULL) {
+                    obj2 = Py_None;
+                }
+
+                product = PyNumber_Multiply(obj1, obj2);
+                if (product == NULL) {
+                    Py_XDECREF(sum_of_products);
+                    return;
+                }
+
+                if (n == 0) {
+                    sum_of_products = product;
+                }
+                else {
+                    Py_SETREF(sum_of_products, PyNumber_Add(sum_of_products, product));
+                    Py_DECREF(product);
+                    if (sum_of_products == NULL) {
+                        return;
+                    }
+                }
+
+                ip2 += is2_n;
+                ip1 += is1_n;
+            }
+
+            *((PyObject **)op) = sum_of_products;
+            ip1 -= ib1_n;
+            ip2 -= ib2_n;
+            op  +=  os_p;
+            ip2 += is2_p;
+        }
+        op -= ob_p;
+        ip2 -= ib2_p;
+        ip1 += is1_m;
+        op  +=  os_m;
+    }
+}
+
 
 /**begin repeat
  *  #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF,
  *          CFLOAT, CDOUBLE, CLONGDOUBLE,
  *          UBYTE, USHORT, UINT, ULONG, ULONGLONG,
  *          BYTE, SHORT, INT, LONG, LONGLONG,
- *          BOOL#
+ *          BOOL, OBJECT#
  *  #typ = npy_float,npy_double,npy_longdouble, npy_half,
  *         npy_cfloat, npy_cdouble, npy_clongdouble,
  *         npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
  *         npy_byte, npy_short, npy_int, npy_long, npy_longlong,
- *         npy_bool#
- * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*11#
- * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*12#
+ *         npy_bool,npy_object#
+ * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*12#
+ * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*13#
  */
 
 
@@ -398,5 +501,3 @@ NPY_NO_EXPORT void
 }
 
 /**end repeat**/
-
-
index 16be7675b9451e3ca769dbb5da276a48a5cea6e1..a664b1b4e1fd35085c4a6fc9ac88e467f2d3bd65 100644 (file)
@@ -3,7 +3,7 @@
  *          CFLOAT, CDOUBLE, CLONGDOUBLE,
  *          UBYTE, USHORT, UINT, ULONG, ULONGLONG,
  *          BYTE, SHORT, INT, LONG, LONGLONG,
- *          BOOL#
+ *          BOOL, OBJECT#
  **/
 NPY_NO_EXPORT void
 @TYPE@_matmul(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
index 3be4a8a2663ced7bd22d01cdfe324527b6b7aa47..1b5b4cb262445b1cfbd4e41d51278ad6bfdaa469 100644 (file)
@@ -5,7 +5,7 @@ import itertools
 import numpy as np
 from numpy.testing import (
     assert_, assert_equal, assert_array_equal, assert_almost_equal,
-    assert_raises, suppress_warnings
+    assert_raises, suppress_warnings, assert_raises_regex, assert_allclose
     )
 
 # Setup for optimize einsum
@@ -90,6 +90,11 @@ class TestEinsum(object):
                           optimize=do_opt)
             assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
                           out=np.arange(4).reshape(2, 2), optimize=do_opt)
+            with assert_raises_regex(ValueError, "'b'"):
+                # gh-11221 - 'c' erroneously appeared in the error message
+                a = np.ones((3, 3, 4, 5, 6))
+                b = np.ones((3, 4, 5))
+                np.einsum('aabcb,abc', a, b)
 
     def test_einsum_views(self):
         # pass-through
@@ -695,6 +700,14 @@ class TestEinsum(object):
         y2 = x[idx[:, None], idx[:, None], idx, idx]
         assert_equal(y1, y2)
 
+    def test_einsum_failed_on_p9_and_s390x(self):
+        # Issues gh-14692 and gh-12689
+        # Bug with signed vs unsigned char errored on power9 and s390x Linux
+        tensor = np.random.random_sample((10, 10, 10, 10))
+        x = np.einsum('ijij->', tensor)
+        y = tensor.trace(axis1=0, axis2=2).trace()
+        assert_allclose(x, y)
+
     def test_einsum_all_contig_non_contig_output(self):
         # Issue gh-5907, tests that the all contiguous special case
         # actually checks the contiguity of the output
index 873aa9312b5975d77a19b2d5734bb11f2743042f..c55556535c9aa17e4c452367994494ab0344f285 100644 (file)
@@ -3884,6 +3884,64 @@ class TestPickling(object):
     
         assert_equal(original.dtype, new.dtype)
 
+    def test_py2_can_load_py3_pickle_with_dtype_field_names(self):
+        # gh-2407 and PR #14275
+        # Py2 should be able to load a pickle that was created in PY3
+        # when the pickle contains a structured dtype with field names
+        import numpy as np
+
+        expected_dtype = np.dtype([('SPOT', np.float64)])
+        expected_data = np.array([(6.0)], dtype=expected_dtype)
+        # Pickled under Python 3.6.5 with protocol=2 by the code below:
+        # pickle.dumps(expected_data, protocol=2)
+        saved_pickle_from_py3 = b'''\
+\x80\x02cnumpy.core.multiarray\n_reconstruct\nq\x00cnumpy\nndarray\n\
+q\x01K\x00\x85q\x02c_codecs\nencode\nq\x03X\x01\x00\x00\x00bq\x04X\
+\x06\x00\x00\x00latin1q\x05\x86q\x06Rq\x07\x87q\x08Rq\t(K\x01K\x01\
+\x85q\ncnumpy\ndtype\nq\x0bX\x02\x00\x00\x00V8q\x0cK\x00K\x01\x87q\
+\rRq\x0e(K\x03X\x01\x00\x00\x00|q\x0fNX\x04\x00\x00\x00SPOTq\x10\
+\x85q\x11}q\x12h\x10h\x0bX\x02\x00\x00\x00f8q\x13K\x00K\x01\x87\
+q\x14Rq\x15(K\x03X\x01\x00\x00\x00<q\x16NNNJ\xff\xff\xff\xffJ\xff\
+\xff\xff\xffK\x00tq\x17bK\x00\x86q\x18sK\x08K\x01K\x10tq\x19b\x89h\
+\x03X\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18@q\x1ah\x05\x86q\
+\x1bRq\x1ctq\x1db.\
+'''
+
+        if sys.version_info[0] < 3:  # PY2
+            assert pickle.loads(saved_pickle_from_py3) == expected_data
+        else:
+            # check that the string above is what we claim on PY3
+            py3_pickle_dump = pickle.dumps(expected_data, protocol=2)
+            assert py3_pickle_dump == saved_pickle_from_py3
+
+    def test_py3_can_load_py2_pickle_with_dtype_field_names(self):
+        # gh-2407 and PR #14275
+        # Roundtrip: Py3 should load a pickle that was created in PY2
+        # after loading the saved_pickle (from PY3) in the test named
+        # 'test_py2_can_load_py3_pickle_with_dtype_field_names'
+        import numpy as np
+
+        expected_dtype = np.dtype([('SPOT', np.float64)])
+        expected = np.array([(6.0)], dtype=expected_dtype)
+        # Pickled under Python 2.7.16 with protocol=2 after it was loaded
+        # by test 'test_py2_can_load_py3_pickle_with_dtype_field_names'
+        pickle_from_py2 = b'''\
+\x80\x02cnumpy.core.multiarray\n_reconstruct\nq\x01cnumpy\nndarray\n\
+q\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x01\x85cnumpy\ndtype\nq\x04U\x02\
+V8K\x00K\x01\x87Rq\x05(K\x03U\x01|NU\x04SPOTq\x06\x85q\x07}q\x08h\x06h\
+\x04U\x02f8K\x00K\x01\x87Rq\t(K\x03U\x01<NNNJ\xff\xff\xff\xffJ\xff\xff\
+\xff\xffK\x00tbK\x00\x86sK\x08K\x01K\x10tb\x89U\x08\x00\x00\x00\x00\x00\
+\x00\x18@tb.\
+'''
+
+        if sys.version_info[0] >= 3:  # PY3
+            assert pickle.loads(pickle_from_py2) == expected
+        else:
+            # check that the string above is what we claim on PY2
+            if sys.platform.startswith('linux') and not IS_PYPY:
+                assert pickle.dumps(expected, protocol=2) == pickle_from_py2
+
+
 
 class TestFancyIndexing(object):
     def test_list(self):
@@ -6067,7 +6125,69 @@ class TestMatmul(MatmulCommon):
 
         r3 = np.matmul(args[0].copy(), args[1].copy())
         assert_equal(r1, r3)
-        
+
+    def test_matmul_object(self):
+        import fractions
+
+        f = np.vectorize(fractions.Fraction)
+        def random_ints():
+            return np.random.randint(1, 1000, size=(10, 3, 3))
+        M1 = f(random_ints(), random_ints())
+        M2 = f(random_ints(), random_ints())
+
+        M3 = self.matmul(M1, M2)
+
+        [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]]
+
+        assert_allclose(N3, self.matmul(N1, N2))
+
+    def test_matmul_object_type_scalar(self):
+        from fractions import Fraction as F
+        v = np.array([F(2,3), F(5,7)])
+        res = self.matmul(v, v)
+        assert_(type(res) is F)
+
+    def test_matmul_empty(self):
+        a = np.empty((3, 0), dtype=object)
+        b = np.empty((0, 3), dtype=object)
+        c = np.zeros((3, 3))
+        assert_array_equal(np.matmul(a, b), c)
+
+    def test_matmul_exception_multiply(self):
+        # test that matmul fails if `__mul__` is missing
+        class add_not_multiply():
+            def __add__(self, other):
+                return self
+        a = np.full((3,3), add_not_multiply())
+        with assert_raises(TypeError):
+            b = np.matmul(a, a)
+
+    def test_matmul_exception_add(self):
+        # test that matmul fails if `__add__` is missing
+        class multiply_not_add():
+            def __mul__(self, other):
+                return self
+        a = np.full((3,3), multiply_not_add())
+        with assert_raises(TypeError):
+            b = np.matmul(a, a)
+
+    def test_matmul_bool(self):
+        # gh-14439
+        a = np.array([[1, 0],[1, 1]], dtype=bool)
+        assert np.max(a.view(np.uint8)) == 1
+        b = np.matmul(a, a)
+        # matmul with boolean output should always be 0, 1
+        assert np.max(b.view(np.uint8)) == 1
+
+        np.random.seed(42)
+        d = np.random.randint(2, size=4*5, dtype=np.int8)
+        d = d.reshape(4, 5) > 0
+        out1 = np.matmul(d, d.reshape(5, 4))
+        out2 = np.dot(d, d.reshape(5, 4))
+        assert_equal(out1, out2)
+
+        c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool))
+        assert not np.any(c)
 
 
 if sys.version_info[:2] >= (3, 5):
@@ -7725,6 +7845,8 @@ class TestFormat(object):
                 dst = object.__format__(a, '30')
                 assert_equal(res, dst)
 
+from numpy.testing import IS_PYPY
+
 class TestCTypes(object):
 
     def test_ctypes_is_available(self):
@@ -7791,7 +7913,29 @@ class TestCTypes(object):
 
         # but when the `ctypes_ptr` object dies, so should `arr`
         del ctypes_ptr
+        if IS_PYPY:
+            # Pypy does not recycle arr objects immediately. Trigger gc to
+            # release arr. Cpython uses refcounts. An explicit call to gc
+            # should not be needed here.
+            break_cycles()
+        assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
+
+    def test_ctypes_as_parameter_holds_reference(self):
+        arr = np.array([None]).copy()
+
+        arr_ref = weakref.ref(arr)
+
+        ctypes_ptr = arr.ctypes._as_parameter_
+
+        # `ctypes_ptr` should hold onto `arr`
+        del arr
         break_cycles()
+        assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
+
+        # but when the `ctypes_ptr` object dies, so should `arr`
+        del ctypes_ptr
+        if IS_PYPY:
+            break_cycles()
         assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
 
 
index c059ef510eab9ea06d9f1922ab08eddd994cb4d1..95ed1fa5bb0d8266db98bdf1c87203933cdda720 100644 (file)
@@ -437,6 +437,53 @@ class TestRecord(object):
         arr = np.zeros((3,), dtype=[('x', int), ('y', int)])
         assert_raises(ValueError, lambda: arr[['nofield']])
 
+    @pytest.mark.parametrize('nfields', [0, 1, 2])
+    def test_assign_dtype_attribute(self, nfields):
+        dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields])
+        data = np.zeros(3, dt).view(np.recarray)
+
+        # the original and resulting dtypes differ on whether they are records
+        assert data.dtype.type == np.record
+        assert dt.type != np.record
+
+        # ensure that the dtype remains a record even when assigned
+        data.dtype = dt
+        assert data.dtype.type == np.record
+
+    @pytest.mark.parametrize('nfields', [0, 1, 2])
+    def test_nested_fields_are_records(self, nfields):
+        """ Test that nested structured types are treated as records too """
+        dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields])
+        dt_outer = np.dtype([('inner', dt)])
+
+        data = np.zeros(3, dt_outer).view(np.recarray)
+        assert isinstance(data, np.recarray)
+        assert isinstance(data['inner'], np.recarray)
+
+        data0 = data[0]
+        assert isinstance(data0, np.record)
+        assert isinstance(data0['inner'], np.record)
+
+    def test_nested_dtype_padding(self):
+        """ test that trailing padding is preserved """
+        # construct a dtype with padding at the end
+        dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)])
+        dt_padded_end = np.dtype(dict(
+            names=['a', 'b'],
+            formats=[np.uint8, np.uint8],
+            offsets=[0, 1],
+            itemsize=3
+        ))  # dt[['a', 'b']], but that's not supported in 1.16
+        assert dt_padded_end.itemsize == dt.itemsize
+
+        dt_outer = np.dtype([('inner', dt_padded_end)])
+
+        data = np.zeros(3, dt_outer).view(np.recarray)
+        assert_equal(data['inner'].dtype, dt_padded_end)
+
+        data0 = data[0]
+        assert_equal(data0['inner'].dtype, dt_padded_end)
+
 
 def test_find_duplicate():
     l1 = [1, 2, 3, 4, 5, 6]
index 3b9ca72464d5421d5ad94c88fe03aeed4acf6d7a..8d84b2c12dbd6cb4e410911f84a68f2733812338 100644 (file)
@@ -469,7 +469,7 @@ class TestRegression(object):
                 result = pickle.loads(data, encoding='bytes')
                 assert_equal(result, original)
 
-                if isinstance(result, np.ndarray) and result.dtype.names:
+                if isinstance(result, np.ndarray) and result.dtype.names is not None:
                     for name in result.dtype.names:
                         assert_(isinstance(name, str))
 
@@ -2455,3 +2455,33 @@ class TestRegression(object):
             __array_interface__ = {}
 
         np.array([T()])
+
+    def test_2d__array__shape(self):
+        class T(object):
+            def __array__(self):
+                return np.ndarray(shape=(0,0))
+
+            # Make sure __array__ is used instead of Sequence methods.
+            def __iter__(self):
+                return iter([])
+
+            def __getitem__(self, idx):
+                raise AssertionError("__getitem__ was called")
+
+            def __len__(self):
+                return 0
+
+
+        t = T()
+        #gh-13659, would raise in broadcasting [x=t for x in result]
+        np.array([t])
+
+    @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
+    @pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8),
+                        reason='overflows on windows, fixed in bpo-16865')
+    def test_to_ctypes(self):
+        #gh-14214
+        arr = np.zeros((2 ** 31 + 1,), 'b')
+        assert arr.size * arr.itemsize > 2 ** 31
+        c_arr = np.ctypeslib.as_ctypes(arr)
+        assert_equal(c_arr._length_, arr.size)
index 535ea768bea453a6472373f8c0d781d09fab1aa4..8f4715ffd873ec6576a1d858a9fffc2cc60f0d0d 100644 (file)
@@ -92,11 +92,11 @@ else:
     # Adapted from Albert Strasheim
     def load_library(libname, loader_path):
         """
-        It is possible to load a library using 
-        >>> lib = ctypes.cdll[<full_path_name>]
+        It is possible to load a library using
+        >>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP
 
         But there are cross-platform considerations, such as library file extensions,
-        plus the fact Windows will just load the first library it finds with that name.  
+        plus the fact Windows will just load the first library it finds with that name.
         NumPy supplies the load_library function as a convenience.
 
         Parameters
@@ -110,12 +110,12 @@ else:
         Returns
         -------
         ctypes.cdll[libpath] : library object
-           A ctypes library object 
+           A ctypes library object
 
         Raises
         ------
         OSError
-            If there is no library with the expected extension, or the 
+            If there is no library with the expected extension, or the
             library is defective and cannot be loaded.
         """
         if ctypes.__version__ < '1.0.1':
@@ -321,7 +321,7 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
     # produce a name for the new type
     if dtype is None:
         name = 'any'
-    elif dtype.names:
+    elif dtype.names is not None:
         name = str(id(dtype))
     else:
         name = dtype.str
@@ -535,7 +535,10 @@ if ctypes is not None:
         if readonly:
             raise TypeError("readonly arrays unsupported")
 
-        dtype = _dtype((ai["typestr"], ai["shape"]))
-        result = as_ctypes_type(dtype).from_address(addr)
+        # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows
+        # dtype.itemsize (gh-14214)
+        ctype_scalar = as_ctypes_type(ai["typestr"])
+        result_type = _ctype_ndarray(ctype_scalar, ai["shape"])
+        result = result_type.from_address(addr)
         result.__keep = obj
         return result
index 8a042f190924c1281b2029714bf3fe2f61cd43aa..9713ff8b1671a527e919e27719887dfd196ffe14 100644 (file)
@@ -121,7 +121,7 @@ def has_nested_fields(ndtype):
 
     """
     for name in ndtype.names or ():
-        if ndtype[name].names:
+        if ndtype[name].names is not None:
             return True
     return False
 
@@ -925,28 +925,27 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
         names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
         ndtype = np.dtype(dict(formats=ndtype, names=names))
     else:
-        nbtypes = len(ndtype)
         # Explicit names
         if names is not None:
             validate = NameValidator(**validationargs)
             if isinstance(names, basestring):
                 names = names.split(",")
             # Simple dtype: repeat to match the nb of names
-            if nbtypes == 0:
+            if ndtype.names is None:
                 formats = tuple([ndtype.type] * len(names))
                 names = validate(names, defaultfmt=defaultfmt)
                 ndtype = np.dtype(list(zip(names, formats)))
             # Structured dtype: just validate the names as needed
             else:
-                ndtype.names = validate(names, nbfields=nbtypes,
+                ndtype.names = validate(names, nbfields=len(ndtype.names),
                                         defaultfmt=defaultfmt)
         # No implicit names
-        elif (nbtypes > 0):
+        elif ndtype.names is not None:
             validate = NameValidator(**validationargs)
             # Default initial names : should we change the format ?
-            if ((ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and
+            if ((ndtype.names == tuple("f%i" % i for i in range(len(ndtype.names)))) and
                     (defaultfmt != "f%i")):
-                ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt)
+                ndtype.names = validate([''] * len(ndtype.names), defaultfmt=defaultfmt)
             # Explicit initial names : just validate
             else:
                 ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
index 4f6371058eef28a67c980c15be71bb151bc212c2..8650685a71a2c4cbf6b172e3afe56c1b5fd2c4d2 100644 (file)
@@ -244,27 +244,19 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1):
     if pad_amt == 0:
         return arr
 
-    # Generate shape for final concatenated array
-    padshape = tuple(x if i != axis else pad_amt
-                     for (i, x) in enumerate(arr.shape))
-
-    # Generate an n-dimensional array incrementing along `axis`
-    ramp_arr = _arange_ndarray(arr, padshape, axis,
-                               reverse=True).astype(np.float64)
-
-    # Appropriate slicing to extract n-dimensional edge along `axis`
+    # Slice a chunk from the edge to calculate stats on and extract edge
     edge_slice = _slice_first(arr.shape, 1, axis=axis)
+    edge = arr[edge_slice]
 
-    # Extract edge, and extend along `axis`
-    edge_pad = arr[edge_slice].repeat(pad_amt, axis)
-
-    # Linear ramp
-    slope = (end - edge_pad) / float(pad_amt)
-    ramp_arr = ramp_arr * slope
-    ramp_arr += edge_pad
-    _round_ifneeded(ramp_arr, arr.dtype)
+    ramp_arr = np.linspace(
+        start=end,
+        stop=edge.squeeze(axis),
+        num=pad_amt,
+        endpoint=False,
+        dtype=arr.dtype,
+        axis=axis
+    )
 
-    # Ramp values will most likely be float, cast them to the same type as arr
     return _do_prepend(arr, ramp_arr, axis)
 
 
@@ -294,27 +286,23 @@ def _append_ramp(arr, pad_amt, end, axis=-1):
     if pad_amt == 0:
         return arr
 
-    # Generate shape for final concatenated array
-    padshape = tuple(x if i != axis else pad_amt
-                     for (i, x) in enumerate(arr.shape))
-
-    # Generate an n-dimensional array incrementing along `axis`
-    ramp_arr = _arange_ndarray(arr, padshape, axis,
-                               reverse=False).astype(np.float64)
-
-    # Slice a chunk from the edge to calculate stats on
+    # Slice a chunk from the edge to calculate stats on and extract edge
     edge_slice = _slice_last(arr.shape, 1, axis=axis)
+    edge = arr[edge_slice]
+
+    ramp_arr = np.linspace(
+        start=end,
+        stop=edge.squeeze(axis),
+        num=pad_amt,
+        endpoint=False,
+        dtype=arr.dtype,
+        axis=axis
+    )
+    # Reverse linear space in appropriate dimension
+    ramp_arr = ramp_arr[
+        _slice_at_axis(ramp_arr.shape, slice(None, None, -1), axis)
+    ]
 
-    # Extract edge, and extend along `axis`
-    edge_pad = arr[edge_slice].repeat(pad_amt, axis)
-
-    # Linear ramp
-    slope = (end - edge_pad) / float(pad_amt)
-    ramp_arr = ramp_arr * slope
-    ramp_arr += edge_pad
-    _round_ifneeded(ramp_arr, arr.dtype)
-
-    # Ramp values will most likely be float, cast them to the same type as arr
     return _do_append(arr, ramp_arr, axis)
 
 
index d69e04e80aacfa7ea54b32efa86c93b04faeeff3..bed1f46b0d5345831e81ea5533ed541f8c3c107a 100644 (file)
@@ -21,6 +21,16 @@ array_function_dispatch = functools.partial(
 _range = range
 
 
+def _ptp(x):
+    """Peak-to-peak value of x.
+
+    This implementation avoids the problem of signed integer arrays having a
+    peak-to-peak value that cannot be represented with the array's data type.
+    This function returns an unsigned value for signed integer arrays.
+    """
+    return _unsigned_subtract(x.max(), x.min())
+
+
 def _hist_bin_sqrt(x, range):
     """
     Square root histogram bin estimator.
@@ -39,7 +49,7 @@ def _hist_bin_sqrt(x, range):
     h : An estimate of the optimal bin width for the given data.
     """
     del range  # unused
-    return x.ptp() / np.sqrt(x.size)
+    return _ptp(x) / np.sqrt(x.size)
 
 
 def _hist_bin_sturges(x, range):
@@ -62,7 +72,7 @@ def _hist_bin_sturges(x, range):
     h : An estimate of the optimal bin width for the given data.
     """
     del range  # unused
-    return x.ptp() / (np.log2(x.size) + 1.0)
+    return _ptp(x) / (np.log2(x.size) + 1.0)
 
 
 def _hist_bin_rice(x, range):
@@ -86,7 +96,7 @@ def _hist_bin_rice(x, range):
     h : An estimate of the optimal bin width for the given data.
     """
     del range  # unused
-    return x.ptp() / (2.0 * x.size ** (1.0 / 3))
+    return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
 
 
 def _hist_bin_scott(x, range):
@@ -136,7 +146,7 @@ def _hist_bin_stone(x, range):
     """
 
     n = x.size
-    ptp_x = np.ptp(x)
+    ptp_x = _ptp(x)
     if n <= 1 or ptp_x == 0:
         return 0
 
@@ -182,7 +192,7 @@ def _hist_bin_doane(x, range):
             np.true_divide(temp, sigma, temp)
             np.power(temp, 3, temp)
             g1 = np.mean(temp)
-            return x.ptp() / (1.0 + np.log2(x.size) +
+            return _ptp(x) / (1.0 + np.log2(x.size) +
                                     np.log2(1.0 + np.absolute(g1) / sg1))
     return 0.0
 
index 038d6a4966069c01223df8917537029e2b554543..fe1e65d5b2c2213839489e5afd7028a1d1e27d01 100644 (file)
@@ -2154,7 +2154,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
             outputmask = np.array(masks, dtype=mdtype)
     else:
         # Overwrite the initial dtype names if needed
-        if names and dtype.names:
+        if names and dtype.names is not None:
             dtype.names = names
         # Case 1. We have a structured type
         if len(dtype_flat) > 1:
@@ -2204,7 +2204,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
             #
             output = np.array(data, dtype)
             if usemask:
-                if dtype.names:
+                if dtype.names is not None:
                     mdtype = [(_, bool) for _ in dtype.names]
                 else:
                     mdtype = bool
index c17c39c8aade2ee6f71b83acf1beded629d7ec5b..40060b41a7dc2f6c213923e721b9144a4c229843 100644 (file)
@@ -72,7 +72,7 @@ def recursive_fill_fields(input, output):
             current = input[field]
         except ValueError:
             continue
-        if current.dtype.names:
+        if current.dtype.names is not None:
             recursive_fill_fields(current, output[field])
         else:
             output[field][:len(current)] = current
@@ -139,11 +139,11 @@ def get_names(adtype):
     names = adtype.names
     for name in names:
         current = adtype[name]
-        if current.names:
+        if current.names is not None:
             listnames.append((name, tuple(get_names(current))))
         else:
             listnames.append(name)
-    return tuple(listnames) or None
+    return tuple(listnames)
 
 
 def get_names_flat(adtype):
@@ -176,9 +176,9 @@ def get_names_flat(adtype):
     for name in names:
         listnames.append(name)
         current = adtype[name]
-        if current.names:
+        if current.names is not None:
             listnames.extend(get_names_flat(current))
-    return tuple(listnames) or None
+    return tuple(listnames)
 
 
 def flatten_descr(ndtype):
@@ -215,8 +215,8 @@ def _zip_dtype(seqarrays, flatten=False):
     else:
         for a in seqarrays:
             current = a.dtype
-            if current.names and len(current.names) <= 1:
-                # special case - dtypes of 0 or 1 field are flattened
+            if current.names is not None and len(current.names) == 1:
+                # special case - dtypes of 1 field are flattened
                 newdtype.extend(_get_fieldspec(current))
             else:
                 newdtype.append(('', current))
@@ -268,7 +268,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,):
     names = adtype.names
     for name in names:
         current = adtype[name]
-        if current.names:
+        if current.names is not None:
             if lastname:
                 parents[name] = [lastname, ]
             else:
@@ -281,7 +281,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,):
             elif lastname:
                 lastparent = [lastname, ]
             parents[name] = lastparent or []
-    return parents or None
+    return parents
 
 
 def _izip_fields_flat(iterable):
@@ -435,7 +435,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
     if isinstance(seqarrays, (ndarray, np.void)):
         seqdtype = seqarrays.dtype
         # Make sure we have named fields
-        if not seqdtype.names:
+        if seqdtype.names is None:
             seqdtype = np.dtype([('', seqdtype)])
         if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
             # Minimal processing needed: just make sure everythng's a-ok
@@ -653,7 +653,7 @@ def rename_fields(base, namemapper):
         for name in ndtype.names:
             newname = namemapper.get(name, name)
             current = ndtype[name]
-            if current.names:
+            if current.names is not None:
                 newdtype.append(
                     (newname, _recursive_rename_fields(current, namemapper))
                     )
index 20f6e4a1bf2a28d3281959cd7d07b39a17c6c1a1..6620db8df31918e0fd2ded2bf8740cb98d0582d5 100644 (file)
@@ -679,6 +679,30 @@ class TestLinearRamp(object):
         ])
         assert_equal(actual, expected)
 
+    @pytest.mark.parametrize("dtype", (
+        np.sctypes["uint"]
+        + np.sctypes["int"]
+        + np.sctypes["float"]
+        + np.sctypes["complex"]
+    ))
+    def test_negative_difference(self, dtype):
+        """
+        Check correct behavior of unsigned dtypes if there is a negative
+        difference between the edge to pad and `end_values`. Check both cases
+        to be independent of implementation. Test behavior for all other dtypes
+        in case dtype casting interferes with complex dtypes. See gh-14191.
+        """
+        x = np.array([3], dtype=dtype)
+        result = np.pad(x, 3, mode="linear_ramp", end_values=0)
+        expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype)
+        assert_equal(result, expected)
+
+        x = np.array([0], dtype=dtype)
+        result = np.pad(x, 3, mode="linear_ramp", end_values=3)
+        expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype)
+        assert_equal(result, expected)
+
+
 
 class TestReflect(object):
     def test_check_simple(self):
index c96b01d42e781e952884a5a7b797fca8fa5d0ef1..594c8e782c51088700e4acd7f48ab501eb3b518e 100644 (file)
@@ -8,6 +8,7 @@ from numpy.testing import (
     assert_array_almost_equal, assert_raises, assert_allclose,
     assert_array_max_ulp, assert_raises_regex, suppress_warnings,
     )
+import pytest
 
 
 class TestHistogram(object):
@@ -595,6 +596,16 @@ class TestHistogramOptimBinNums(object):
                 msg += " with datasize of {0}".format(testlen)
                 assert_equal(len(a), numbins, err_msg=msg)
 
+    @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',
+                                      'stone', 'rice', 'sturges'])
+    def test_signed_integer_data(self, bins):
+        # Regression test for gh-14379.
+        a = np.array([-2, 0, 127], dtype=np.int8)
+        hist, edges = np.histogram(a, bins=bins)
+        hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins)
+        assert_array_equal(hist, hist32)
+        assert_array_equal(edges, edges32)
+
     def test_simple_weighted(self):
         """
         Check that weighted data raises a TypeError
index b8b786816a52e3dccddac8b416b399e9eb3b0b9f..899e490312e22e94fda24496be9d02bf65ed0f92 100644 (file)
@@ -1527,6 +1527,13 @@ M   33  21.99
             test = np.genfromtxt(TextIO(data), delimiter=";",
                                  dtype=ndtype, converters=converters)
 
+        # nested but empty fields also aren't supported
+        ndtype = [('idx', int), ('code', object), ('nest', [])]
+        with assert_raises_regex(NotImplementedError,
+                                 'Nested fields.* not supported.*'):
+            test = np.genfromtxt(TextIO(data), delimiter=";",
+                                 dtype=ndtype, converters=converters)
+
     def test_userconverters_with_explicit_dtype(self):
         # Test user_converters w/ explicit (standard) dtype
         data = TextIO('skip,skip,2001-01-01,1.0,skip')
index dc4afe077c3de3030a4afe2917da1a2ce0ab8929..0c839d486fe7ca4cdc990f44ed37a98f311e4949 100644 (file)
@@ -115,6 +115,14 @@ class TestRecFunctions(object):
         test = get_names(ndtype)
         assert_equal(test, ('a', ('b', ('ba', 'bb'))))
 
+        ndtype = np.dtype([('a', int), ('b', [])])
+        test = get_names(ndtype)
+        assert_equal(test, ('a', ('b', ())))
+
+        ndtype = np.dtype([])
+        test = get_names(ndtype)
+        assert_equal(test, ())
+
     def test_get_names_flat(self):
         # Test get_names_flat
         ndtype = np.dtype([('A', '|S3'), ('B', float)])
@@ -125,6 +133,14 @@ class TestRecFunctions(object):
         test = get_names_flat(ndtype)
         assert_equal(test, ('a', 'b', 'ba', 'bb'))
 
+        ndtype = np.dtype([('a', int), ('b', [])])
+        test = get_names_flat(ndtype)
+        assert_equal(test, ('a', 'b'))
+
+        ndtype = np.dtype([])
+        test = get_names_flat(ndtype)
+        assert_equal(test, ())
+
     def test_get_fieldstructure(self):
         # Test get_fieldstructure
 
@@ -147,6 +163,11 @@ class TestRecFunctions(object):
                    'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
         assert_equal(test, control)
 
+        # 0 fields
+        ndtype = np.dtype([])
+        test = get_fieldstructure(ndtype)
+        assert_equal(test, {})
+
     def test_find_duplicates(self):
         # Test find_duplicates
         a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
index daf2f8770c1ba823f323715c0d0cfbcfcc678f7c..b16e1670af38070a7ac9c959649fd298bf9d1356 100644 (file)
@@ -211,7 +211,7 @@ class MaskedRecords(MaskedArray, object):
         _localdict = ndarray.__getattribute__(self, '__dict__')
         _data = ndarray.view(self, _localdict['_baseclass'])
         obj = _data.getfield(*res)
-        if obj.dtype.fields:
+        if obj.dtype.names is not None:
             raise NotImplementedError("MaskedRecords is currently limited to"
                                       "simple records.")
         # Get some special attributes
index 5eec368fd27a4c811d7f53a11e56ad735c43a6cb..c098e9181f2ad31a096b292425969b98f2b445a7 100644 (file)
@@ -20,7 +20,7 @@ from warnings import WarningMessage
 import pprint
 
 from numpy.core import(
-     float32, empty, arange, array_repr, ndarray, isnat, array)
+     intp, float32, empty, arange, array_repr, ndarray, isnat, array)
 from numpy.lib.utils import deprecate
 
 if sys.version_info[0] >= 3:
@@ -301,6 +301,15 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
     check that all elements of these objects are equal. An exception is raised
     at the first conflicting values.
 
+    When one of `actual` and `desired` is a scalar and the other is array_like,
+    the function checks that each element of the array_like object is equal to
+    the scalar.
+
+    This function handles NaN comparisons as if NaN was a "normal" number.
+    That is, no assertion is raised if both objects have NaNs in the same
+    positions.  This is in contrast to the IEEE standard on NaNs, which says
+    that NaN compared to anything must return False.
+
     Parameters
     ----------
     actual : array_like
@@ -328,6 +337,11 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
      ACTUAL: 5
      DESIRED: 6
 
+    The following comparison does not raise an exception.  There are NaNs
+    in the inputs, but they are in the same positions.
+
+    >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan])
+
     """
     __tracebackhide__ = True  # Hide traceback for py.test
     if isinstance(desired, dict):
@@ -381,21 +395,6 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
     if isscalar(desired) != isscalar(actual):
         raise AssertionError(msg)
 
-    # Inf/nan/negative zero handling
-    try:
-        isdesnan = gisnan(desired)
-        isactnan = gisnan(actual)
-        if isdesnan and isactnan:
-            return  # both nan, so equal
-
-        # handle signed zero specially for floats
-        if desired == 0 and actual == 0:
-            if not signbit(desired) == signbit(actual):
-                raise AssertionError(msg)
-
-    except (TypeError, ValueError, NotImplementedError):
-        pass
-
     try:
         isdesnat = isnat(desired)
         isactnat = isnat(actual)
@@ -411,6 +410,33 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
     except (TypeError, ValueError, NotImplementedError):
         pass
 
+    # Inf/nan/negative zero handling
+    try:
+        isdesnan = gisnan(desired)
+        isactnan = gisnan(actual)
+        if isdesnan and isactnan:
+            return  # both nan, so equal
+
+        # handle signed zero specially for floats
+        array_actual = array(actual)
+        array_desired = array(desired)
+        if (array_actual.dtype.char in 'Mm' or
+                array_desired.dtype.char in 'Mm'):
+            # version 1.18
+            # until this version, gisnan failed for datetime64 and timedelta64.
+            # Now it succeeds but comparison to scalar with a different type
+            # emits a DeprecationWarning.
+            # Avoid that by skipping the next check
+            raise NotImplementedError('cannot compare to a scalar '
+                                      'with a different type')
+
+        if desired == 0 and actual == 0:
+            if not signbit(desired) == signbit(actual):
+                raise AssertionError(msg)
+
+    except (TypeError, ValueError, NotImplementedError):
+        pass
+
     try:
         # Explicitly use __eq__ for comparison, gh-2552
         if not (desired == actual):
@@ -693,12 +719,12 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
                          header='', precision=6, equal_nan=True,
                          equal_inf=True):
     __tracebackhide__ = True  # Hide traceback for py.test
-    from numpy.core import array, array2string, isnan, inf, bool_, errstate
+    from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_
 
     x = array(x, copy=False, subok=True)
     y = array(y, copy=False, subok=True)
 
-    # original array for output formating
+    # original array for output formatting
     ox, oy = x, y
 
     def isnumber(x):
@@ -723,7 +749,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
         # (2) __eq__ on some ndarray subclasses returns Python booleans
         #     instead of element-wise comparisons, so we cast to bool_() and
         #     use isinstance(..., bool) checks
-        # (3) subclasses with bare-bones __array_function__ implemenations may
+        # (3) subclasses with bare-bones __array_function__ implementations may
         #     not implement np.all(), so favor using the .all() method
         # We are not committed to supporting such subclasses, but it's nice to
         # support them if possible.
@@ -784,26 +810,29 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
 
         if isinstance(val, bool):
             cond = val
-            reduced = [0]
+            reduced = array([val])
         else:
             reduced = val.ravel()
             cond = reduced.all()
-            reduced = reduced.tolist()
 
         # The below comparison is a hack to ensure that fully masked
         # results, for which val.ravel().all() returns np.ma.masked,
         # do not trigger a failure (np.ma.masked != True evaluates as
         # np.ma.masked, which is falsy).
         if cond != True:
-            mismatch = 100.0 * reduced.count(0) / ox.size
-            remarks = ['Mismatch: {:.3g}%'.format(mismatch)]
+            n_mismatch = reduced.size - reduced.sum(dtype=intp)
+            n_elements = flagged.size if flagged.ndim != 0 else reduced.size
+            percent_mismatch = 100 * n_mismatch / n_elements
+            remarks = [
+                'Mismatched elements: {} / {} ({:.3g}%)'.format(
+                    n_mismatch, n_elements, percent_mismatch)]
 
             with errstate(invalid='ignore', divide='ignore'):
                 # ignore errors for non-numeric types
                 try:
                     error = abs(x - y)
-                    max_abs_error = error.max()
-                    if error.dtype == 'object':
+                    max_abs_error = max(error)
+                    if getattr(error, 'dtype', object_) == object_:
                         remarks.append('Max absolute difference: '
                                         + str(max_abs_error))
                     else:
@@ -812,8 +841,13 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
 
                     # note: this definition of relative error matches that one
                     # used by assert_allclose (found in np.isclose)
-                    max_rel_error = (error / abs(y)).max()
-                    if error.dtype == 'object':
+                    # Filter values where the divisor would be zero
+                    nonzero = bool_(y != 0)
+                    if all(~nonzero):
+                        max_rel_error = array(inf)
+                    else:
+                        max_rel_error = max(error[nonzero] / abs(y[nonzero]))
+                    if getattr(error, 'dtype', object_) == object_:
                         remarks.append('Max relative difference: '
                                         + str(max_rel_error))
                     else:
@@ -842,10 +876,11 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
     Raises an AssertionError if two array_like objects are not equal.
 
     Given two array_like objects, check that the shape is equal and all
-    elements of these objects are equal. An exception is raised at
-    shape mismatch or conflicting values. In contrast to the standard usage
-    in numpy, NaNs are compared like numbers, no assertion is raised if
-    both objects have NaNs in the same positions.
+    elements of these objects are equal (but see the Notes for the special
+    handling of a scalar). An exception is raised at shape mismatch or
+    conflicting values. In contrast to the standard usage in numpy, NaNs
+    are compared like numbers, no assertion is raised if both objects have
+    NaNs in the same positions.
 
     The usual caution for verifying equality with floating point numbers is
     advised.
@@ -872,6 +907,12 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
                      relative and/or absolute precision.
     assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
 
+    Notes
+    -----
+    When one of `x` and `y` is a scalar and the other is array_like, the
+    function checks that each element of the array_like object is equal to
+    the scalar.
+
     Examples
     --------
     The first assert does not raise an exception:
@@ -879,7 +920,7 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
     >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
     ...                               [np.exp(0),2.33333, np.nan])
 
-    Assert fails with numerical inprecision with floats:
+    Assert fails with numerical imprecision with floats:
 
     >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
     ...                               [1, np.sqrt(np.pi)**2, np.nan])
@@ -900,6 +941,12 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
     ...                            [1, np.sqrt(np.pi)**2, np.nan],
     ...                            rtol=1e-10, atol=0)
 
+    As mentioned in the Notes section, `assert_array_equal` has special
+    handling for scalars. Here the test checks that each value in `x` is 3:
+
+    >>> x = np.full((2, 5), fill_value=3)
+    >>> np.testing.assert_array_equal(x, 3)
+
     """
     __tracebackhide__ = True  # Hide traceback for py.test
     assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
@@ -1138,7 +1185,7 @@ def assert_string_equal(actual, desired):
     if desired == actual:
         return
 
-    diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
+    diff = list(difflib.Differ().compare(actual.splitlines(True), desired.splitlines(True)))
     diff_list = []
     while diff:
         d1 = diff.pop(0)
@@ -1451,9 +1498,9 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
     Raises an AssertionError if two objects are not equal up to desired
     tolerance.
 
-    The test is equivalent to ``allclose(actual, desired, rtol, atol)``.
-    It compares the difference between `actual` and `desired` to
-    ``atol + rtol * abs(desired)``.
+    The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note
+    that ``allclose`` has different default values). It compares the difference
+    between `actual` and `desired` to ``atol + rtol * abs(desired)``.
 
     .. versionadded:: 1.5.0
 
index 5038530015119baf10dab4608fad91c9a113a1af..7f6cbb8fe767bef1dbcc3b0d26462385b587e2a3 100644 (file)
@@ -17,6 +17,7 @@ from numpy.testing import (
     clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
     tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
     )
+from numpy.core.overrides import ENABLE_ARRAY_FUNCTION
 
 
 class _GenericTest(object):
@@ -89,6 +90,21 @@ class TestArrayEqual(_GenericTest):
         for t in ['S1', 'U1']:
             foo(t)
 
+    def test_0_ndim_array(self):
+        x = np.array(473963742225900817127911193656584771)
+        y = np.array(18535119325151578301457182298393896)
+        assert_raises(AssertionError, self._assert_func, x, y)
+
+        y = x
+        self._assert_func(x, y)
+
+        x = np.array(43)
+        y = np.array(10)
+        assert_raises(AssertionError, self._assert_func, x, y)
+
+        y = x
+        self._assert_func(x, y)
+
     def test_generic_rank3(self):
         """Test rank 3 array for all dtypes."""
         def foo(t):
@@ -179,6 +195,8 @@ class TestArrayEqual(_GenericTest):
         self._test_not_equal(a, b)
         self._test_not_equal(b, a)
 
+    @pytest.mark.skipif(
+        not ENABLE_ARRAY_FUNCTION, reason='requires __array_function__')
     def test_subclass_that_does_not_implement_npall(self):
         class MyArray(np.ndarray):
             def __array_function__(self, *args, **kwargs):
@@ -186,9 +204,8 @@ class TestArrayEqual(_GenericTest):
 
         a = np.array([1., 2.]).view(MyArray)
         b = np.array([2., 3.]).view(MyArray)
-        if np.core.overrides.ENABLE_ARRAY_FUNCTION:
-            with assert_raises(TypeError):
-                np.all(a)
+        with assert_raises(TypeError):
+            np.all(a)
         self._test_equal(a, a)
         self._test_not_equal(a, b)
         self._test_not_equal(b, a)
@@ -518,7 +535,7 @@ class TestAlmostEqual(_GenericTest):
         with pytest.raises(AssertionError) as exc_info:
             self._assert_func(x, y, decimal=12)
         msgs = str(exc_info.value).split('\n')
-        assert_equal(msgs[3], 'Mismatch: 100%')
+        assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)')
         assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
         assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
         assert_equal(
@@ -534,7 +551,7 @@ class TestAlmostEqual(_GenericTest):
         with pytest.raises(AssertionError) as exc_info:
             self._assert_func(x, y)
         msgs = str(exc_info.value).split('\n')
-        assert_equal(msgs[3], 'Mismatch: 33.3%')
+        assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)')
         assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
         assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
         assert_equal(msgs[6], ' x: array([1.     , 2.     , 3.00003])')
@@ -546,7 +563,7 @@ class TestAlmostEqual(_GenericTest):
         with pytest.raises(AssertionError) as exc_info:
             self._assert_func(x, y)
         msgs = str(exc_info.value).split('\n')
-        assert_equal(msgs[3], 'Mismatch: 50%')
+        assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)')
         assert_equal(msgs[4], 'Max absolute difference: 1.')
         assert_equal(msgs[5], 'Max relative difference: 1.')
         assert_equal(msgs[6], ' x: array([inf,  0.])')
@@ -558,10 +575,30 @@ class TestAlmostEqual(_GenericTest):
         with pytest.raises(AssertionError) as exc_info:
             self._assert_func(x, y)
         msgs = str(exc_info.value).split('\n')
-        assert_equal(msgs[3], 'Mismatch: 100%')
+        assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)')
         assert_equal(msgs[4], 'Max absolute difference: 2')
         assert_equal(msgs[5], 'Max relative difference: inf')
 
+    def test_error_message_2(self):
+        """Check the message is formatted correctly when either x or y is a scalar."""
+        x = 2
+        y = np.ones(20)
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
+        assert_equal(msgs[4], 'Max absolute difference: 1.')
+        assert_equal(msgs[5], 'Max relative difference: 1.')
+
+        y = 2
+        x = np.ones(20)
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
+        assert_equal(msgs[4], 'Max absolute difference: 1.')
+        assert_equal(msgs[5], 'Max relative difference: 0.5')
+
     def test_subclass_that_cannot_be_bool(self):
         # While we cannot guarantee testing functions will always work for
         # subclasses, the tests should ideally rely only on subclasses having
@@ -586,9 +623,9 @@ class TestApproxEqual(object):
     def setup(self):
         self._assert_func = assert_approx_equal
 
-    def test_simple_arrays(self):
-        x = np.array([1234.22])
-        y = np.array([1234.23])
+    def test_simple_0d_arrays(self):
+        x = np.array(1234.22)
+        y = np.array(1234.23)
 
         self._assert_func(x, y, significant=5)
         self._assert_func(x, y, significant=6)
@@ -853,7 +890,8 @@ class TestAssertAllclose(object):
         with pytest.raises(AssertionError) as exc_info:
             assert_allclose(a, b)
         msg = str(exc_info.value)
-        assert_('Mismatch: 25%\nMax absolute difference: 1\n'
+        assert_('Mismatched elements: 1 / 4 (25%)\n'
+                'Max absolute difference: 1\n'
                 'Max relative difference: 0.5' in msg)
 
     def test_equal_nan(self):
@@ -878,6 +916,15 @@ class TestAssertAllclose(object):
         assert_array_less(a, b)
         assert_allclose(a, b)
 
+    def test_report_max_relative_error(self):
+        a = np.array([0, 1])
+        b = np.array([0, 2])
+
+        with pytest.raises(AssertionError) as exc_info:
+            assert_allclose(a, b)
+        msg = str(exc_info.value)
+        assert_('Max relative difference: 0.5' in msg)
+
 
 class TestArrayAlmostEqualNulp(object):
 
@@ -1506,6 +1553,7 @@ class TestAssertNoGcCycles(object):
         with assert_raises(AssertionError):
             assert_no_gc_cycles(make_cycle)
 
+    @pytest.mark.slow
     def test_fails(self):
         """
         Test that in cases where the garbage cannot be collected, we raise an
index 33a3fc751d7c27f46f51bf31ce5b9f392278c228..8538786071a53dec93fe6f32c0ee78e3fe8a14d3 100644 (file)
@@ -42,7 +42,7 @@ from paver.easy import Bunch, options, task, sh
 #-----------------------------------
 
 # Path to the release notes
-RELEASE_NOTES = 'doc/release/1.16.5-notes.rst'
+RELEASE_NOTES = 'doc/release/1.16.6-notes.rst'
 
 
 #-------------------------------------------------------
index 61c5e6e7d3d262f510a77814ad53e0ee720d472d..954668236cdb052df7698a558fdac8c1481ce66e 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@ Operating System :: MacOS
 
 MAJOR               = 1
 MINOR               = 16
-MICRO               = 5
+MICRO               = 6
 ISRELEASED          = True
 VERSION             = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
 
index 49cd91e1e57985f29d781080009e9af14026e59a..98d9a74d11bc3684fc32f1c7e13b509298c55be2 100644 (file)
@@ -23,6 +23,7 @@ runtime:
 build:
     ci:
     # install dependencies
+    - sudo apt-get update
     - sudo apt-get install gcc gfortran libblas-dev liblapack-dev
     # add pathlib for Python 2, otherwise many tests are skipped
     - pip install --upgrade pip