--- /dev/null
+
+Contributors
+============
+
+A total of 5 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Chris Burr +
+* Matti Picus
+* Qiming Sun +
+* Warren Weckesser
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#14758 <https://github.com/numpy/numpy/pull/14758>`__: BLD: declare support for python 3.8
+* `#14781 <https://github.com/numpy/numpy/pull/14781>`__: BUG: random: biased samples from integers() with 8 or 16 bit...
+* `#14851 <https://github.com/numpy/numpy/pull/14851>`__: BUG: Fix _ctypes class circular reference. (#13808)
+* `#14852 <https://github.com/numpy/numpy/pull/14852>`__: BLD: add 'apt update' to shippable
+* `#14855 <https://github.com/numpy/numpy/pull/14855>`__: BUG: Fix `np.einsum` errors on Power9 Linux and z/Linux
+* `#14857 <https://github.com/numpy/numpy/pull/14857>`__: BUG: lib: Fix histogram problem with signed integer arrays.
+* `#14858 <https://github.com/numpy/numpy/pull/14858>`__: BLD: Prevent -flto from optimising long double representation...
+* `#14866 <https://github.com/numpy/numpy/pull/14866>`__: MAINT: move buffer.h -> npy_buffer.h to avoid conflicts
--- /dev/null
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.17.4 Release Notes
+==========================
+
+This release contains fixes for bugs reported against NumPy 1.17.3 along with
+some build improvements. The Python versions supported in this release
+are 3.5-3.8.
+
+Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
+OpenBLAS >= 3.7 to avoid errors on the Skylake architecture.
+
+
+Highlights
+==========
+
+- Fixed `random.random_integers` biased generation of 8 and 16 bit integers.
+- Fixed `np.einsum` regression on Power9 and z/Linux.
+- Fixed histogram problem with signed integer arrays.
+
+
+Contributors
+============
+
+A total of 5 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Chris Burr +
+* Matti Picus
+* Qiming Sun +
+* Warren Weckesser
+
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#14758 <https://github.com/numpy/numpy/pull/14758>`__: BLD: declare support for python 3.8
+* `#14781 <https://github.com/numpy/numpy/pull/14781>`__: BUG: random: biased samples from integers() with 8 or 16 bit...
+* `#14851 <https://github.com/numpy/numpy/pull/14851>`__: BUG: Fix _ctypes class circular reference. (#13808)
+* `#14852 <https://github.com/numpy/numpy/pull/14852>`__: BLD: add 'apt update' to shippable
+* `#14855 <https://github.com/numpy/numpy/pull/14855>`__: BUG: Fix `np.einsum` errors on Power9 Linux and z/Linux
+* `#14857 <https://github.com/numpy/numpy/pull/14857>`__: BUG: lib: Fix histogram problem with signed integer arrays.
+* `#14858 <https://github.com/numpy/numpy/pull/14858>`__: BLD: Prevent -flto from optimising long double representation...
+* `#14866 <https://github.com/numpy/numpy/pull/14866>`__: MAINT: move buffer.h -> npy_buffer.h to avoid conflicts
+
Release Notes
*************
+.. include:: ../release/1.17.4-notes.rst
.. include:: ../release/1.17.3-notes.rst
.. include:: ../release/1.17.2-notes.rst
.. include:: ../release/1.17.1-notes.rst
self.value = ptr
-class _unsafe_first_element_pointer(object):
- """
- Helper to allow viewing an array as a ctypes pointer to the first element
-
- This avoids:
- * dealing with strides
- * `.view` rejecting object-containing arrays
- * `memoryview` not supporting overlapping fields
- """
- def __init__(self, arr):
- self.base = arr
-
- @property
- def __array_interface__(self):
- i = dict(
- shape=(),
- typestr='|V0',
- data=(self.base.__array_interface__['data'][0], False),
- strides=(),
- version=3,
- )
- return i
-
-
-def _get_void_ptr(arr):
- """
- Get a `ctypes.c_void_p` to arr.data, that keeps a reference to the array
- """
- import numpy as np
- # convert to a 0d array that has a data pointer referrign to the start
- # of arr. This holds a reference to arr.
- simple_arr = np.asarray(_unsafe_first_element_pointer(arr))
-
- # create a `char[0]` using the same memory.
- c_arr = (ctypes.c_char * 0).from_buffer(simple_arr)
-
- # finally cast to void*
- return ctypes.cast(ctypes.pointer(c_arr), ctypes.c_void_p)
-
-
class _ctypes(object):
def __init__(self, array, ptr=None):
self._arr = array
if ctypes:
self._ctypes = ctypes
- # get a void pointer to the buffer, which keeps the array alive
- self._data = _get_void_ptr(array)
- assert self._data.value == ptr
+ self._data = self._ctypes.c_void_p(ptr)
else:
# fake a pointer-like object that holds onto the reference
self._ctypes = _missing_ctypes()
The returned pointer will keep a reference to the array.
"""
- return self._ctypes.cast(self._data, obj)
+ # _ctypes.cast function causes a circular reference of self._data in
+ # self._data._objects. Attributes of self._data cannot be released
+ # until gc.collect is called. Make a copy of the pointer first then let
+ # it hold the array reference. This is a workaround to circumvent the
+ # CPython bug https://bugs.python.org/issue12836
+ ptr = self._ctypes.cast(self._data, obj)
+ ptr._arr = self._arr
+ return ptr
def shape_as(self, obj):
"""
Enables `c_func(some_array.ctypes)`
"""
- return self._data
+ return self.data_as(ctypes.c_void_p)
# kept for compatibility
get_data = data.fget
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'arrayfunction_override.h'),
- join('src', 'multiarray', 'buffer.h'),
+ join('src', 'multiarray', 'npy_buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
except ValueError:
# try linking to support CC="gcc -flto" or icc -ipo
# struct needs to be volatile so it isn't optimized away
+ # additionally "clang -flto" requires the foo struct to be used
body = body.replace('struct', 'volatile struct')
- body += "int main(void) { return 0; }\n"
+ body += "int main(void) { return foo.before[0]; }\n"
src, obj = cmd._compile(body, None, None, 'c')
cmd.temp_files.append("_configtest")
cmd.compiler.link_executable([obj], "_configtest")
#include "mapping.h"
#include "getset.h"
#include "sequence.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include "array_assign.h"
#include "alloc.h"
#include "mem_overlap.h"
#include "cblasfuncs.h"
#include "npy_cblas.h"
-#include "buffer.h"
+#include "npy_buffer.h"
/* check for sequences, but ignore the types numpy considers scalars */
static NPY_INLINE npy_bool
#include "npy_pycompat.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include "common.h"
#include "numpyos.h"
#include "arrayobject.h"
+++ /dev/null
-#ifndef _NPY_PRIVATE_BUFFER_H_
-#define _NPY_PRIVATE_BUFFER_H_
-
-extern NPY_NO_EXPORT PyBufferProcs array_as_buffer;
-
-NPY_NO_EXPORT void
-_dealloc_cached_buffer_info(PyObject *self);
-
-NPY_NO_EXPORT PyArray_Descr*
-_descriptor_from_pep3118_format(char *s);
-
-NPY_NO_EXPORT int
-gentype_getbuffer(PyObject *obj, Py_buffer *view, int flags);
-
-#endif
#include "usertypes.h"
#include "common.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include "get_attr_string.h"
#include "mem_overlap.h"
#include "conversion_utils.h"
#include "alloc.h"
-#include "buffer.h"
+#include "npy_buffer.h"
static int
PyArray_PyIntAsInt_ErrMsg(PyObject *o, const char * msg) NPY_GCC_NONNULL(2);
#include "ctors.h"
#include "convert_datatype.h"
#include "shape.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include "lowlevel_strided_loops.h"
#include "methods.h"
#include "_datetime.h"
#include "descriptor.h"
#include "alloc.h"
#include "assert.h"
-#include "buffer.h"
+#include "npy_buffer.h"
/*
* offset: A starting offset.
* later where it matters the char is cast to a signed char.
*/
for (idim = 0; idim < ndim - 1; ++idim) {
- int label = op_labels[idim];
+ int label = (signed char)op_labels[idim];
/* If it is a proper label, find any duplicates of it. */
if (label > 0) {
/* Search for the next matching label. */
#include "arrayobject.h"
#include "mem_overlap.h"
#include "alloc.h"
-#include "buffer.h"
+#include "npy_buffer.h"
/******************* array attribute get and set routines ******************/
--- /dev/null
+#ifndef _NPY_PRIVATE_BUFFER_H_
+#define _NPY_PRIVATE_BUFFER_H_
+
+extern NPY_NO_EXPORT PyBufferProcs array_as_buffer;
+
+NPY_NO_EXPORT void
+_dealloc_cached_buffer_info(PyObject *self);
+
+NPY_NO_EXPORT PyArray_Descr*
+_descriptor_from_pep3118_format(char *s);
+
+NPY_NO_EXPORT int
+gentype_getbuffer(PyObject *obj, Py_buffer *view, int flags);
+
+#endif
#include "npy_import.h"
#include "dragon4.h"
#include "npy_longdouble.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include <stdlib.h>
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
- assert_raises, suppress_warnings, assert_raises_regex
+ assert_raises, suppress_warnings, assert_raises_regex, assert_allclose
)
# Setup for optimize einsum
y2 = x[idx[:, None], idx[:, None], idx, idx]
assert_equal(y1, y2)
+ def test_einsum_failed_on_p9_and_s390x(self):
+ # Issues gh-14692 and gh-12689
+ # Bug with signed vs unsigned char errored on power9 and s390x Linux
+ tensor = np.random.random_sample((10, 10, 10, 10))
+ x = np.einsum('ijij->', tensor)
+ y = tensor.trace(axis1=0, axis2=2).trace()
+ assert_allclose(x, y)
+
def test_einsum_all_contig_non_contig_output(self):
# Issue gh-5907, tests that the all contiguous special case
# actually checks the contiguity of the output
dst = object.__format__(a, '30')
assert_equal(res, dst)
+from numpy.testing import IS_PYPY
+
class TestCTypes(object):
def test_ctypes_is_available(self):
# but when the `ctypes_ptr` object dies, so should `arr`
del ctypes_ptr
+ if IS_PYPY:
+ # Pypy does not recycle arr objects immediately. Trigger gc to
+ # release arr. Cpython uses refcounts. An explicit call to gc
+ # should not be needed here.
+ break_cycles()
+ assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
+
+ def test_ctypes_as_parameter_holds_reference(self):
+ arr = np.array([None]).copy()
+
+ arr_ref = weakref.ref(arr)
+
+ ctypes_ptr = arr.ctypes._as_parameter_
+
+ # `ctypes_ptr` should hold onto `arr`
+ del arr
break_cycles()
+ assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
+
+ # but when the `ctypes_ptr` object dies, so should `arr`
+ del ctypes_ptr
+ if IS_PYPY:
+ break_cycles()
assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
_range = range
+def _ptp(x):
+ """Peak-to-peak value of x.
+
+ This implementation avoids the problem of signed integer arrays having a
+ peak-to-peak value that cannot be represented with the array's data type.
+ This function returns an unsigned value for signed integer arrays.
+ """
+ return _unsigned_subtract(x.max(), x.min())
+
+
def _hist_bin_sqrt(x, range):
"""
Square root histogram bin estimator.
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
- return x.ptp() / np.sqrt(x.size)
+ return _ptp(x) / np.sqrt(x.size)
def _hist_bin_sturges(x, range):
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
- return x.ptp() / (np.log2(x.size) + 1.0)
+ return _ptp(x) / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x, range):
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
- return x.ptp() / (2.0 * x.size ** (1.0 / 3))
+ return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x, range):
"""
n = x.size
- ptp_x = np.ptp(x)
+ ptp_x = _ptp(x)
if n <= 1 or ptp_x == 0:
return 0
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
- return x.ptp() / (1.0 + np.log2(x.size) +
+ return _ptp(x) / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
assert_array_almost_equal, assert_raises, assert_allclose,
assert_array_max_ulp, assert_raises_regex, suppress_warnings,
)
+import pytest
class TestHistogram(object):
msg += " with datasize of {0}".format(testlen)
assert_equal(len(a), numbins, err_msg=msg)
+ @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',
+ 'stone', 'rice', 'sturges'])
+ def test_signed_integer_data(self, bins):
+ # Regression test for gh-14379.
+ a = np.array([-2, 0, 127], dtype=np.int8)
+ hist, edges = np.histogram(a, bins=bins)
+ hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins)
+ assert_array_equal(hist, hist32)
+ assert_array_equal(edges, edges32)
+
def test_simple_weighted(self):
"""
Check that weighted data raises a TypeError
if (leftover < rng_excl) {
/* `rng_excl` is a simple upper bound for `threshold`. */
-
- const uint64_t threshold = -rng_excl % rng_excl;
- /* Same as: threshold=((uint64_t)(0x10000000000000000ULLL - rng_excl)) %
- * rng_excl; */
+ const uint64_t threshold = (UINT64_MAX - rng) % rng_excl;
while (leftover < threshold) {
m = ((__uint128_t)next_uint64(bitgen_state)) * rng_excl;
if (leftover < rng_excl) {
/* `rng_excl` is a simple upper bound for `threshold`. */
-
- const uint64_t threshold = -rng_excl % rng_excl;
- /* Same as:threshold=((uint64_t)(0x10000000000000000ULLL - rng_excl)) %
- * rng_excl; */
+ const uint64_t threshold = (UINT64_MAX - rng) % rng_excl;
while (leftover < threshold) {
x = next_uint64(bitgen_state);
if (leftover < rng_excl) {
/* `rng_excl` is a simple upper bound for `threshold`. */
- const uint32_t threshold = -rng_excl % rng_excl;
- /* Same as: threshold=((uint64_t)(0x100000000ULL - rng_excl)) % rng_excl; */
+ const uint32_t threshold = (UINT32_MAX - rng) % rng_excl;
while (leftover < threshold) {
m = ((uint64_t)next_uint32(bitgen_state)) * rng_excl;
if (leftover < rng_excl) {
/* `rng_excl` is a simple upper bound for `threshold`. */
- const uint16_t threshold = -rng_excl % rng_excl;
- /* Same as: threshold=((uint32_t)(0x10000ULL - rng_excl)) % rng_excl; */
+ const uint16_t threshold = (UINT16_MAX - rng) % rng_excl;
while (leftover < threshold) {
m = ((uint32_t)buffered_uint16(bitgen_state, bcnt, buf)) * rng_excl;
if (leftover < rng_excl) {
/* `rng_excl` is a simple upper bound for `threshold`. */
- const uint8_t threshold = -rng_excl % rng_excl;
- /* Same as: threshold=((uint16_t)(0x100ULL - rng_excl)) % rng_excl; */
+ const uint8_t threshold = (UINT8_MAX - rng) % rng_excl;
while (leftover < threshold) {
m = ((uint16_t)buffered_uint8(bitgen_state, bcnt, buf)) * rng_excl;
'int16': '39624ead49ad67e37545744024d2648b',
'int32': '5c4810373f979336c6c0c999996e47a1',
'int64': 'ab126c15edff26f55c50d2b7e37391ac',
- 'int8': 'd1746364b48a020dab9ef0568e6c0cd2',
+ 'int8': 'ba71ccaffeeeb9eeb1860f8075020b9c',
'uint16': '39624ead49ad67e37545744024d2648b',
'uint32': '5c4810373f979336c6c0c999996e47a1',
'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
- 'uint8': 'd1746364b48a020dab9ef0568e6c0cd2'}
+ 'uint8': 'ba71ccaffeeeb9eeb1860f8075020b9c'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
+ # chi2max is the maximum acceptable chi-squared value.
+ @pytest.mark.slow
+ @pytest.mark.parametrize('sample_size,high,dtype,chi2max',
+ [(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25
+ (5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30
+ (10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25
+ (50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25
+ ])
+ def test_integers_small_dtype_chisquared(self, sample_size, high,
+ dtype, chi2max):
+ # Regression test for gh-14774.
+ samples = random.integers(high, size=sample_size, dtype=dtype)
+
+ values, counts = np.unique(samples, return_counts=True)
+ expected = sample_size / high
+ chi2 = ((counts - expected)**2 / expected).sum()
+ assert chi2 < chi2max
+
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
#-----------------------------------
# Path to the release notes
-RELEASE_NOTES = 'doc/release/1.17.3-notes.rst'
+RELEASE_NOTES = 'doc/release/1.17.4-notes.rst'
#-------------------------------------------------------
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
+Programming Language :: Python :: 3.8
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
MAJOR = 1
MINOR = 17
-MICRO = 3
+MICRO = 4
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
build:
ci:
# install dependencies
+ - sudo apt-get update
- sudo apt-get install gcc gfortran
- target=$(python tools/openblas_support.py)
- sudo cp -r "${target}"/64/lib/* /usr/lib