# manually link critical gfortran libraries
ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libgfortran.3.dylib /usr/local/lib/libgfortran.3.dylib
ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libquadmath.0.dylib /usr/local/lib/libquadmath.0.dylib
- # manually symlink gfortran-4.9 to plain gfortran
- # for f2py
- ln -s /usr/local/bin/gfortran-4.9 /usr/local/bin/gfortran
+ # Manually symlink gfortran-4.9 to plain gfortran for f2py.
+ # No longer needed after Feb 13 2020 as gfortran is already present
+ # and the attempted link errors. Keep this for future reference.
+ # ln -s /usr/local/bin/gfortran-4.9 /usr/local/bin/gfortran
displayName: 'make gfortran available on mac os vm'
# use the pre-built openblas binary that most closely
# matches our MacOS wheel builds -- currently based
--- /dev/null
+
+Contributors
+============
+
+A total of 5 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Ganesh Kathiresan +
+* Matti Picus
+* Sebastian Berg
+* przemb +
+
+Pull requests merged
+====================
+
+A total of 7 pull requests were merged for this release.
+
+* `#15675 <https://github.com/numpy/numpy/pull/15675>`__: TST: move _no_tracing to testing._private
+* `#15676 <https://github.com/numpy/numpy/pull/15676>`__: MAINT: Large overhead in some random functions
+* `#15677 <https://github.com/numpy/numpy/pull/15677>`__: TST: Do not create gfortran link in azure Mac testing.
+* `#15679 <https://github.com/numpy/numpy/pull/15679>`__: BUG: Added missing error check in `ndarray.__contains__`
+* `#15722 <https://github.com/numpy/numpy/pull/15722>`__: MAINT: use list-based APIs to call subprocesses
+* `#15729 <https://github.com/numpy/numpy/pull/15729>`__: REL: Prepare for 1.18.2 release.
+* `#15734 <https://github.com/numpy/numpy/pull/15734>`__: BUG: fix logic error when nm fails on 32-bit
.. toctree::
:maxdepth: 3
+ 1.18.2 <release/1.18.2-notes>
1.18.1 <release/1.18.1-notes>
1.18.0 <release/1.18.0-notes>
1.17.5 <release/1.17.5-notes>
--- /dev/null
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.18.2 Release Notes
+==========================
+
+This small elease contains a fix for a performance regression in numpy/random
+and several bug/maintenance updates.
+
+The Python versions supported in this release are 3.5-3.8. Downstream
+developers should use Cython >= 0.29.15 for Python 3.8 support and OpenBLAS >=
+3.7 to avoid errors on the Skylake architecture.
+
+
+Contributors
+============
+
+A total of 5 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Ganesh Kathiresan +
+* Matti Picus
+* Sebastian Berg
+* przemb +
+
+
+Pull requests merged
+====================
+
+A total of 7 pull requests were merged for this release.
+
+* `#15675 <https://github.com/numpy/numpy/pull/15675>`__: TST: move _no_tracing to testing._private
+* `#15676 <https://github.com/numpy/numpy/pull/15676>`__: MAINT: Large overhead in some random functions
+* `#15677 <https://github.com/numpy/numpy/pull/15677>`__: TST: Do not create gfortran link in azure Mac testing.
+* `#15679 <https://github.com/numpy/numpy/pull/15679>`__: BUG: Added missing error check in `ndarray.__contains__`
+* `#15722 <https://github.com/numpy/numpy/pull/15722>`__: MAINT: use list-based APIs to call subprocesses
+* `#15729 <https://github.com/numpy/numpy/pull/15729>`__: REL: Prepare for 1.18.2 release.
+* `#15734 <https://github.com/numpy/numpy/pull/15734>`__: BUG: fix logic error when nm fails on 32-bit
if (res == NULL) {
return -1;
}
+
any = PyArray_Any((PyArrayObject *)res, NPY_MAXDIMS, NULL);
Py_DECREF(res);
+ if (any == NULL) {
+ return -1;
+ }
+
ret = PyObject_IsTrue(any);
Py_DECREF(any);
return ret;
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
temppath, suppress_warnings, break_cycles,
)
+from numpy.testing._private.utils import _no_tracing
from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
data.fill(0)
return data
-def _no_tracing(func):
- """
- Decorator to temporarily turn off tracing for the duration of a test.
- Needed in tests that check refcounting, otherwise the tracing itself
- influences the refcounts
- """
- if not hasattr(sys, 'gettrace'):
- return func
- else:
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- original_trace = sys.gettrace()
- try:
- sys.settrace(None)
- return func(*args, **kwargs)
- finally:
- sys.settrace(original_trace)
- return wrapper
-
-
class TestFlags(object):
def setup(self):
assert_raises_regex, assert_warns, suppress_warnings,
_assert_valid_refcount, HAS_REFCOUNT,
)
+from numpy.testing._private.utils import _no_tracing
from numpy.compat import asbytes, asunicode, long, pickle
-from test.support import no_tracing
try:
RecursionError
assert_(pickle.loads(
pickle.dumps(test_record, protocol=proto)) == test_record)
- @no_tracing
+ @_no_tracing
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
py_ver = "%d%d" % tuple(sys.version_info[:2])
-DEFAULT_NM = 'nm -Cs'
+DEFAULT_NM = ['nm', '-Cs']
DEF_HEADER = """LIBRARY python%s.dll
;CODE PRELOAD MOVEABLE DISCARDABLE
deffile = None
return libfile, deffile
-def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]):
+def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True):
"""Returns the output of nm_cmd via a pipe.
-nm_output = getnam(nm_cmd = 'nm -Cs py_lib')"""
- f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
- nm_output = f.stdout.read()
- f.stdout.close()
+nm_output = getnm(nm_cmd = 'nm -Cs py_lib')"""
+ p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, universal_newlines=True)
+ nm_output, nm_err = p.communicate()
+ if p.returncode != 0:
+ raise RuntimeError('failed to run "%s": "%s"' % (
+ ' '.join(nm_cmd), nm_err))
return nm_output
def parse_nm(nm_output):
deffile = sys.stdout
else:
deffile = open(deffile, 'w')
- nm_cmd = [str(DEFAULT_NM), str(libfile)]
- nm_output = getnm(nm_cmd)
+ nm_cmd = DEFAULT_NM + [str(libfile)]
+ nm_output = getnm(nm_cmd, shell=False)
dlist, flist = parse_nm(nm_output)
output_def(dlist, flist, DEF_HEADER, deffile)
# we need to support 3.2 which doesn't match the standard
# get_versions methods regex
if self.gcc_version is None:
- p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,
- stdout=subprocess.PIPE)
- out_string = p.stdout.read()
- p.stdout.close()
+ try:
+ out_string = subprocess.check_output(['gcc', '-dumpversion'])
+ except (OSError, CalledProcessError):
+ out_string = "" # ignore failures to match old behavior
result = re.search(r'(\d+\.\d+)', out_string)
if result:
self.gcc_version = StrictVersion(result.group(1))
raise ValueError("%s not found in %s" % (dllname, lib_dirs))
def dump_table(dll):
- st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE)
- return st.stdout.readlines()
+ st = subprocess.check_output(["objdump.exe", "-p", dll])
+ return st.split(b'\n')
def generate_def(dll, dfile):
"""Given a dll file location, get all its exported symbols and dump them
if len(syms) == 0:
log.warn('No symbols found in %s' % dll)
- d = open(dfile, 'w')
- d.write('LIBRARY %s\n' % os.path.basename(dll))
- d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
- d.write(';DATA PRELOAD SINGLE\n')
- d.write('\nEXPORTS\n')
- for s in syms:
- #d.write('@%d %s\n' % (s[0], s[1]))
- d.write('%s\n' % s[1])
- d.close()
+ with open(dfile, 'w') as d:
+ d.write('LIBRARY %s\n' % os.path.basename(dll))
+ d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
+ d.write(';DATA PRELOAD SINGLE\n')
+ d.write('\nEXPORTS\n')
+ for s in syms:
+ #d.write('@%d %s\n' % (s[0], s[1]))
+ d.write('%s\n' % s[1])
def find_dll(dll_name):
# generate import library from this symbol list
cmd = ['dlltool', '-d', def_file, '-l', out_file]
- subprocess.Popen(cmd)
+ subprocess.check_call(cmd)
def _build_import_library_x86():
""" Build the import libraries for Mingw32-gcc on Windows
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
- nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)
- nm_output = lib2def.getnm(nm_cmd)
+ nm_output = lib2def.getnm(
+ lib2def.DEFAULT_NM + [lib_file], shell=False)
dlist, flist = lib2def.parse_nm(nm_output)
- lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))
+ with open(def_file, 'w') as fid:
+ lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid)
dll_name = find_python_dll ()
- args = (dll_name, def_file, out_file)
- cmd = 'dlltool --dllname "%s" --def "%s" --output-lib "%s"' % args
- status = os.system(cmd)
- # for now, fail silently
+
+ cmd = ["dlltool",
+ "--dllname", dll_name,
+ "--def", def_file,
+ "--output-lib", out_file]
+ status = subprocess.check_output(cmd)
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
return
# Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0
# on Windows XP:
_MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
+ # Python 3.7 uses 1415, but get_build_version returns 140 ??
+ _MSVCRVER_TO_FULLVER['140'] = "14.15.26726.0"
if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2)
_MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION
"""Return path's SVN revision number.
"""
try:
- output = subprocess.check_output(
- ['svnversion'], shell=True, cwd=path)
+ output = subprocess.check_output(['svnversion'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
"""
try:
output = subprocess.check_output(
- ['hg identify --num'], shell=True, cwd=path)
+ ['hg', 'identify', '--num'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
--- /dev/null
+import shutil
+import subprocess
+import sys
+import pytest
+
+from numpy.distutils import mingw32ccompiler
+
+
+@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test')
+def test_build_import():
+ '''Test the mingw32ccompiler.build_import_library, which builds a
+ `python.a` from the MSVC `python.lib`
+ '''
+
+ # make sure `nm.exe` exists and supports the current python version. This
+ # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit
+ try:
+ out = subprocess.check_output(['nm.exe', '--help'])
+ except FileNotFoundError:
+ pytest.skip("'nm.exe' not on path, is mingw installed?")
+ supported = out[out.find(b'supported targets:'):]
+ if sys.maxsize < 2**32:
+ if b'pe-i386' not in supported:
+ raise ValueError("'nm.exe' found but it does not support 32-bit "
+ "dlls when using 32-bit python. Supported "
+ "formats: '%s'" % supported)
+ elif b'pe-x86-64' not in supported:
+ raise ValueError("'nm.exe' found but it does not support 64-bit "
+ "dlls when using 64-bit python. Supported "
+ "formats: '%s'" % supported)
+ # Hide the import library to force a build
+ has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib()
+ if has_import_lib:
+ shutil.move(fullpath, fullpath + '.bak')
+
+ try:
+ # Whew, now we can actually test the function
+ mingw32ccompiler.build_import_library()
+
+ finally:
+ if has_import_lib:
+ shutil.move(fullpath + '.bak', fullpath)
np.npy_bool *out) nogil
-
-_integers_types = {'bool': (0, 2),
- 'int8': (-2**7, 2**7),
- 'int16': (-2**15, 2**15),
- 'int32': (-2**31, 2**31),
- 'int64': (-2**63, 2**63),
- 'uint8': (0, 2**8),
- 'uint16': (0, 2**16),
- 'uint32': (0, 2**32),
- 'uint64': (0, 2**64)}
{{
py:
type_info = (('uint32', 'uint32', 'uint64', 'NPY_UINT64', 0, 0, 0, '0X100000000ULL'),
from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64,
_rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16,
_rand_uint8, _gen_mask)
-from ._bounded_integers import _integers_types
from ._pcg64 import PCG64
from numpy.random cimport bitgen_t
from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
def random(self, size=None, dtype=np.float64, out=None):
"""
- random(size=None, dtype='d', out=None)
+ random(size=None, dtype=np.float64, out=None)
Return random floats in the half-open interval [0.0, 1.0).
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
- dtype : {str, dtype}, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
+ dtype : dtype, optional
+ Desired dtype of the result, only `float64` and `float32` are supported.
+ Byteorder must be native. The default value is np.float64.
out : ndarray, optional
Alternative output array in which to place the result. If size is not None,
it must have the same shape as the provided size and must match the type of
"""
cdef double temp
- key = np.dtype(dtype).name
- if key == 'float64':
+ _dtype = np.dtype(dtype)
+ if _dtype == np.float64:
return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, out)
- elif key == 'float32':
+ elif _dtype == np.float32:
return float_fill(&random_standard_uniform_fill_f, &self._bitgen, size, self.lock, out)
else:
- raise TypeError('Unsupported dtype "%s" for random' % key)
+ raise TypeError('Unsupported dtype %r for random' % _dtype)
def beta(self, a, b, size=None):
"""
def standard_exponential(self, size=None, dtype=np.float64, method=u'zig', out=None):
"""
- standard_exponential(size=None, dtype='d', method='zig', out=None)
+ standard_exponential(size=None, dtype=np.float64, method='zig', out=None)
Draw samples from the standard exponential distribution.
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
+ Desired dtype of the result, only `float64` and `float32` are supported.
+ Byteorder must be native. The default value is np.float64.
method : str, optional
Either 'inv' or 'zig'. 'inv' uses the default inverse CDF method.
'zig' uses the much faster Ziggurat method of Marsaglia and Tsang.
>>> n = np.random.default_rng().standard_exponential((3, 8000))
"""
- key = np.dtype(dtype).name
- if key == 'float64':
+ _dtype = np.dtype(dtype)
+ if _dtype == np.float64:
if method == u'zig':
return double_fill(&random_standard_exponential_fill, &self._bitgen, size, self.lock, out)
else:
return double_fill(&random_standard_exponential_inv_fill, &self._bitgen, size, self.lock, out)
- elif key == 'float32':
+ elif _dtype == np.float32:
if method == u'zig':
return float_fill(&random_standard_exponential_fill_f, &self._bitgen, size, self.lock, out)
else:
return float_fill(&random_standard_exponential_inv_fill_f, &self._bitgen, size, self.lock, out)
else:
- raise TypeError('Unsupported dtype "%s" for standard_exponential'
- % key)
+ raise TypeError('Unsupported dtype %r for standard_exponential'
+ % _dtype)
def integers(self, low, high=None, size=None, dtype=np.int64, endpoint=False):
"""
- integers(low, high=None, size=None, dtype='int64', endpoint=False)
+ integers(low, high=None, size=None, dtype=np.int64, endpoint=False)
Return random integers from `low` (inclusive) to `high` (exclusive), or
if endpoint=True, `low` (inclusive) to `high` (inclusive). Replaces
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
- dtype : {str, dtype}, optional
- Desired dtype of the result. All dtypes are determined by their
- name, i.e., 'int64', 'int', etc, so byteorder is not available
- and a specific precision may have different C types depending
- on the platform. The default value is `np.int_`.
+ dtype : dtype, optional
+ Desired dtype of the result. Byteorder must be native.
+ The default value is np.int64.
endpoint : bool, optional
If true, sample from the interval [low, high] instead of the
default [low, high)
high = low
low = 0
- dt = np.dtype(dtype)
- key = dt.name
- if key not in _integers_types:
- raise TypeError('Unsupported dtype "%s" for integers' % key)
- if not dt.isnative:
- raise ValueError('Providing a dtype with a non-native byteorder '
- 'is not supported. If you require '
- 'platform-independent byteorder, call byteswap '
- 'when required.')
+ _dtype = np.dtype(dtype)
# Implementation detail: the old API used a masked method to generate
# bounded uniform integers. Lemire's method is preferable since it is
# faster. randomgen allows a choice, we will always use the faster one.
cdef bint _masked = False
- if key == 'int32':
+ if _dtype == np.int32:
ret = _rand_int32(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'int64':
+ elif _dtype == np.int64:
ret = _rand_int64(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'int16':
+ elif _dtype == np.int16:
ret = _rand_int16(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'int8':
+ elif _dtype == np.int8:
ret = _rand_int8(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint64':
+ elif _dtype == np.uint64:
ret = _rand_uint64(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint32':
+ elif _dtype == np.uint32:
ret = _rand_uint32(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint16':
+ elif _dtype == np.uint16:
ret = _rand_uint16(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint8':
+ elif _dtype == np.uint8:
ret = _rand_uint8(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'bool':
+ elif _dtype == np.bool_:
ret = _rand_bool(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif not _dtype.isnative:
+ raise ValueError('Providing a dtype with a non-native byteorder '
+ 'is not supported. If you require '
+ 'platform-independent byteorder, call byteswap '
+ 'when required.')
+ else:
+ raise TypeError('Unsupported dtype %r for integers' % _dtype)
+
if size is None and dtype in (bool, int, np.compat.long):
if np.array(ret).shape == ():
# Complicated, continuous distributions:
def standard_normal(self, size=None, dtype=np.float64, out=None):
"""
- standard_normal(size=None, dtype='d', out=None)
+ standard_normal(size=None, dtype=np.float64, out=None)
Draw samples from a standard Normal distribution (mean=0, stdev=1).
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
- dtype : {str, dtype}, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
+ dtype : dtype, optional
+ Desired dtype of the result, only `float64` and `float32` are supported.
+ Byteorder must be native. The default value is np.float64.
out : ndarray, optional
Alternative output array in which to place the result. If size is not None,
it must have the same shape as the provided size and must match the type of
[ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
"""
- key = np.dtype(dtype).name
- if key == 'float64':
+ _dtype = np.dtype(dtype)
+ if _dtype == np.float64:
return double_fill(&random_standard_normal_fill, &self._bitgen, size, self.lock, out)
- elif key == 'float32':
+ elif _dtype == np.float32:
return float_fill(&random_standard_normal_fill_f, &self._bitgen, size, self.lock, out)
-
else:
- raise TypeError('Unsupported dtype "%s" for standard_normal' % key)
+ raise TypeError('Unsupported dtype %r for standard_normal' % _dtype)
def normal(self, loc=0.0, scale=1.0, size=None):
"""
def standard_gamma(self, shape, size=None, dtype=np.float64, out=None):
"""
- standard_gamma(shape, size=None, dtype='d', out=None)
+ standard_gamma(shape, size=None, dtype=np.float64, out=None)
Draw samples from a standard Gamma distribution.
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``shape`` is a scalar. Otherwise,
``np.array(shape).size`` samples are drawn.
- dtype : {str, dtype}, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
+ dtype : dtype, optional
+ Desired dtype of the result, only `float64` and `float32` are supported.
+ Byteorder must be native. The default value is np.float64.
out : ndarray, optional
Alternative output array in which to place the result. If size is
not None, it must have the same shape as the provided size and
"""
cdef void *func
- key = np.dtype(dtype).name
- if key == 'float64':
+ _dtype = np.dtype(dtype)
+ if _dtype == np.float64:
return cont(&random_standard_gamma, &self._bitgen, size, self.lock, 1,
shape, 'shape', CONS_NON_NEGATIVE,
0.0, '', CONS_NONE,
0.0, '', CONS_NONE,
out)
- if key == 'float32':
+ if _dtype == np.float32:
return cont_f(&random_standard_gamma_f, &self._bitgen, size, self.lock,
shape, 'shape', CONS_NON_NEGATIVE,
out)
else:
- raise TypeError('Unsupported dtype "%s" for standard_gamma' % key)
+ raise TypeError('Unsupported dtype %r for standard_gamma' % _dtype)
def gamma(self, shape, scale=1.0, size=None):
"""
from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64,
_rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16,
_rand_uint8,)
-from ._bounded_integers import _integers_types
from ._mt19937 import MT19937 as _MT19937
from numpy.random cimport bitgen_t
from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
def randint(self, low, high=None, size=None, dtype=int):
"""
- randint(low, high=None, size=None, dtype='l')
+ randint(low, high=None, size=None, dtype=int)
Return random integers from `low` (inclusive) to `high` (exclusive).
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
- Desired dtype of the result. All dtypes are determined by their
- name, i.e., 'int64', 'int', etc, so byteorder is not available
- and a specific precision may have different C types depending
- on the platform. The default value is `np.int_`.
+ Desired dtype of the result. Byteorder must be native.
+ The default value is int.
.. versionadded:: 1.11.0
high = low
low = 0
- dt = np.dtype(dtype)
- key = dt.name
- if key not in _integers_types:
- raise TypeError('Unsupported dtype "%s" for randint' % key)
- if not dt.isnative:
+ _dtype = np.dtype(dtype)
+
+ if not _dtype.isnative:
# numpy 1.17.0, 2019-05-28
warnings.warn('Providing a dtype with a non-native byteorder is '
'not supported. If you require platform-independent '
'byteorder, call byteswap when required.\nIn future '
'version, providing byteorder will raise a '
'ValueError', DeprecationWarning)
+ _dtype = _dtype.newbyteorder()
# Implementation detail: the use a masked method to generate
# bounded uniform integers. Lemire's method is preferable since it is
cdef bint _masked = True
cdef bint _endpoint = False
- if key == 'int32':
+ if _dtype == np.int32:
ret = _rand_int32(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'int64':
+ elif _dtype == np.int64:
ret = _rand_int64(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'int16':
+ elif _dtype == np.int16:
ret = _rand_int16(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'int8':
+ elif _dtype == np.int8:
ret = _rand_int8(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'uint64':
+ elif _dtype == np.uint64:
ret = _rand_uint64(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'uint32':
+ elif _dtype == np.uint32:
ret = _rand_uint32(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'uint16':
+ elif _dtype == np.uint16:
ret = _rand_uint16(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'uint8':
+ elif _dtype == np.uint8:
ret = _rand_uint8(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'bool':
+ elif _dtype == np.bool_:
ret = _rand_bool(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
+ else:
+ raise TypeError('Unsupported dtype %r for randint' % _dtype)
if size is None and dtype in (bool, int, np.compat.long):
if np.array(ret).shape == ():
return info['memfree'] + info['cached']
return None
+
+
+def _no_tracing(func):
+ """
+ Decorator to temporarily turn off tracing for the duration of a test.
+ Needed in tests that check refcounting, otherwise the tracing itself
+ influences the refcounts
+ """
+ if not hasattr(sys, 'gettrace'):
+ return func
+ else:
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ original_trace = sys.gettrace()
+ try:
+ sys.settrace(None)
+ return func(*args, **kwargs)
+ finally:
+ sys.settrace(original_trace)
+ return wrapper
+
#-----------------------------------
# Path to the release notes
-RELEASE_NOTES = 'doc/source/release/1.18.1-notes.rst'
+RELEASE_NOTES = 'doc/source/release/1.18.2-notes.rst'
#-------------------------------------------------------
addopts = -l
norecursedirs = doc tools numpy/linalg/lapack_lite numpy/core/code_generators
doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS ALLOW_UNICODE ALLOW_BYTES
+junit_family=xunit2
filterwarnings =
error
MAJOR = 1
MINOR = 18
-MICRO = 1
+MICRO = 2
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
-cython==0.29.14
-pytest==5.3.1
+cython==0.29.15
+pytest==5.3.5
pytz==2019.3
pytest-cov==2.8.1
pickle5; python_version == '3.7'
pickle5; python_version == '3.6' and platform_python_implementation != 'PyPy'
-nose
# for numpy.random.test.test_extending
cffi
echo Make sure the correct openblas has been linked in
-pypy3/bin/pip install .
+pypy3/bin/pypy3 -m pip install .
pypy3/bin/pypy3 tools/openblas_support.py --check_version "$OpenBLAS_version"
#!/bin/bash
+# Exit the script immediately if a command exits with a non-zero status,
+# and print commands and their arguments as they are executed.
+set -ex
+
uname -a
free -m
df -h
ulimit -a
-if [ -n "$DOWNLOAD_OPENBLAS" ]; then
- pwd
- ls -ltrh
- target=$(python tools/openblas_support.py)
- sudo cp -r $target/lib/* /usr/lib
- sudo cp $target/include/* /usr/include
-fi
-
mkdir builds
pushd builds
if [ -n "$USE_DEBUG" ]
then
- virtualenv --python=python3-dbg venv
+ virtualenv --python=$(which python3-dbg) venv
else
virtualenv --python=python venv
fi
source venv/bin/activate
python -V
+gcc --version
popd
-pip install --upgrade pip setuptools
-pip install -r test_requirements.txt
+pip install --upgrade pip
+
+# 'setuptools', 'wheel' and 'cython' are build dependencies. This information
+# is stored in pyproject.toml, but there is not yet a standard way to install
+# those dependencies with, say, a pip command, so we'll just hard-code their
+# installation here. We only need to install them separately for the cases
+# where numpy is installed with setup.py, which is the case for the Travis jobs
+# where the environment variables USE_DEBUG or USE_WHEEL are set. When pip is
+# used to install numpy, pip gets the build dependencies from pyproject.toml.
+# A specific version of cython is required, so we read the cython package
+# requirement using `grep cython test_requirements.txt` instead of simply
+# writing 'pip install setuptools wheel cython'.
+# urllib3 is needed for openblas_support
+pip install setuptools wheel urllib3 `grep cython test_requirements.txt`
+
+if [ -n "$DOWNLOAD_OPENBLAS" ]; then
+ pwd
+ target=$(python tools/openblas_support.py)
+ sudo cp -r $target/lib/* /usr/lib
+ sudo cp $target/include/* /usr/include
+fi
+
+
if [ -n "$USE_ASV" ]; then pip install asv; fi
if [ -z "$USE_DEBUG" ]; then
$PIP install -v . 2>&1 | tee log
else
- # Python3.5-dbg on travis seems to need this
+ # The job run with USE_DEBUG=1 on travis needs this.
export CFLAGS=$CFLAGS" -Wno-maybe-uninitialized"
$PYTHON setup.py build build_src --verbose-cfg build_ext --inplace 2>&1 | tee log
fi
run_test()
{
- $PIP install -r test_requirements.txt
+ # Install the test dependencies.
+ # Clear PYTHONOPTIMIZE when running `pip install -r test_requirements.txt`
+ # because version 2.19 of pycparser (a dependency of one of the packages
+ # in test_requirements.txt) does not provide a wheel, and the source tar
+ # file does not install correctly when Python's optimization level is set
+ # to strip docstrings (see https://github.com/eliben/pycparser/issues/291).
+ PYTHONOPTIMIZE="" $PIP install -r test_requirements.txt
if [ -n "$USE_DEBUG" ]; then
export PYTHONPATH=$PWD
fi
}
+
export PYTHON
export PIP
-$PIP install setuptools
if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
- # Build wheel
- $PIP install wheel
- # ensure that the pip / setuptools versions deployed inside
- # the venv are recent enough
- $PIP install -U virtualenv
# ensure some warnings are not issued
export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result"
# adjust gcc flags if C coverage requested
export F90='gfortran --coverage'
export LDFLAGS='--coverage'
fi
- $PYTHON setup.py build build_src --verbose-cfg bdist_wheel
+ $PYTHON setup.py build --warn-error build_src --verbose-cfg bdist_wheel
# Make another virtualenv to install into
virtualenv --python=`which $PYTHON` venv-for-wheel
. venv-for-wheel/bin/activate
run_test
elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
- # use an up-to-date pip / setuptools inside the venv
- $PIP install -U virtualenv
# temporary workaround for sdist failures.
$PYTHON -c "import fcntl; fcntl.fcntl(1, fcntl.F_SETFL, 0)"
# ensure some warnings are not issued