Imported Upstream version 1.21.1 upstream/1.21.1
authorDongHun Kwak <dh0128.kwak@samsung.com>
Fri, 15 Jul 2022 02:14:48 +0000 (11:14 +0900)
committerDongHun Kwak <dh0128.kwak@samsung.com>
Fri, 15 Jul 2022 02:14:48 +0000 (11:14 +0900)
48 files changed:
PKG-INFO
doc/changelog/1.21.1-changelog.rst [new file with mode: 0644]
doc/source/_static/scipy-mathjax/package.json
doc/source/release.rst
doc/source/release/1.21.1-notes.rst [new file with mode: 0644]
numpy/__init__.pyi
numpy/_version.py
numpy/core/src/common/npy_cpu_features.c.src
numpy/core/src/multiarray/arrayobject.c
numpy/core/src/multiarray/conversion_utils.c
numpy/core/src/multiarray/conversion_utils.h
numpy/core/src/multiarray/convert_datatype.c
numpy/core/src/multiarray/convert_datatype.h
numpy/core/src/multiarray/dtype_transfer.c
numpy/core/src/multiarray/getset.c
numpy/core/src/multiarray/item_selection.c
numpy/core/src/multiarray/iterators.c
numpy/core/src/multiarray/nditer_pywrap.c
numpy/core/src/umath/loops_arithmetic.dispatch.c.src
numpy/core/src/umath/ufunc_object.c
numpy/core/src/umath/ufunc_type_resolution.c
numpy/core/tests/test_casting_unittests.py
numpy/core/tests/test_multiarray.py
numpy/core/tests/test_nditer.py
numpy/core/tests/test_numeric.py
numpy/distutils/conv_template.py
numpy/distutils/from_template.py
numpy/distutils/misc_util.py
numpy/f2py/__init__.py
numpy/f2py/tests/test_regression.py
numpy/lib/arraysetops.py
numpy/lib/tests/test_arraysetops.py
numpy/random/_common.pxd
numpy/random/_generator.pyx
numpy/random/mtrand.pyx
numpy/typing/__init__.py
numpy/typing/_array_like.py
numpy/typing/_dtype_like.py
numpy/typing/_extended_precision.py
numpy/typing/_shape.py
numpy/typing/tests/data/fail/dtype.py
numpy/typing/tests/data/fail/scalars.py
numpy/typing/tests/data/reveal/dtype.py
numpy/typing/tests/data/reveal/scalars.py
numpy/typing/tests/test_generic_alias.py
numpy/typing/tests/test_runtime.py [new file with mode: 0644]
pavement.py
tools/openblas_support.py

index 916e7e68a157fad2fa0b984fa3c9e22260aca1df..8977ea8035d1117d9252a9b7f99f199315e60f3d 100644 (file)
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.2
 Name: numpy
-Version: 1.21.0
+Version: 1.21.1
 Summary:  NumPy is the fundamental package for array computing with Python.
 Home-page: https://www.numpy.org
 Author: Travis E. Oliphant et al.
diff --git a/doc/changelog/1.21.1-changelog.rst b/doc/changelog/1.21.1-changelog.rst
new file mode 100644 (file)
index 0000000..f219c50
--- /dev/null
@@ -0,0 +1,51 @@
+
+Contributors
+============
+
+A total of 11 people contributed to this release.  People with a "+" by their
+names contributed a patch for the first time.
+
+* Bas van Beek
+* Charles Harris
+* Ganesh Kathiresan
+* Gregory R. Lee
+* Hugo Defois +
+* Kevin Sheppard
+* Matti Picus
+* Ralf Gommers
+* Sayed Adel
+* Sebastian Berg
+* Thomas J. Fan
+
+Pull requests merged
+====================
+
+A total of 26 pull requests were merged for this release.
+
+* `#19311 <https://github.com/numpy/numpy/pull/19311>`__: REV,BUG: Replace ``NotImplemented`` with ``typing.Any``
+* `#19324 <https://github.com/numpy/numpy/pull/19324>`__: MAINT: Fixed the return-dtype of ``ndarray.real`` and ``imag``
+* `#19330 <https://github.com/numpy/numpy/pull/19330>`__: MAINT: Replace ``"dtype[Any]"`` with ``dtype`` in the definiton of...
+* `#19342 <https://github.com/numpy/numpy/pull/19342>`__: DOC: Fix some docstrings that crash pdf generation.
+* `#19343 <https://github.com/numpy/numpy/pull/19343>`__: MAINT: bump scipy-mathjax
+* `#19347 <https://github.com/numpy/numpy/pull/19347>`__: BUG: Fix arr.flat.index for large arrays and big-endian machines
+* `#19348 <https://github.com/numpy/numpy/pull/19348>`__: ENH: add ``numpy.f2py.get_include`` function
+* `#19349 <https://github.com/numpy/numpy/pull/19349>`__: BUG: Fix reference count leak in ufunc dtype handling
+* `#19350 <https://github.com/numpy/numpy/pull/19350>`__: MAINT: Annotate missing attributes of ``np.number`` subclasses
+* `#19351 <https://github.com/numpy/numpy/pull/19351>`__: BUG: Fix cast safety and comparisons for zero sized voids
+* `#19352 <https://github.com/numpy/numpy/pull/19352>`__: BUG: Correct Cython declaration in random
+* `#19353 <https://github.com/numpy/numpy/pull/19353>`__: BUG: protect against accessing base attribute of a NULL subarray
+* `#19365 <https://github.com/numpy/numpy/pull/19365>`__: BUG, SIMD: Fix detecting AVX512 features on Darwin
+* `#19366 <https://github.com/numpy/numpy/pull/19366>`__: MAINT: remove ``print()``'s in distutils template handling
+* `#19390 <https://github.com/numpy/numpy/pull/19390>`__: ENH: SIMD architectures to show_config
+* `#19391 <https://github.com/numpy/numpy/pull/19391>`__: BUG: Do not raise deprecation warning for all nans in unique...
+* `#19392 <https://github.com/numpy/numpy/pull/19392>`__: BUG: Fix NULL special case in object-to-any cast code
+* `#19430 <https://github.com/numpy/numpy/pull/19430>`__: MAINT: Use arm64-graviton2 for testing on travis
+* `#19495 <https://github.com/numpy/numpy/pull/19495>`__: BUILD: update OpenBLAS to v0.3.17
+* `#19496 <https://github.com/numpy/numpy/pull/19496>`__: MAINT: Avoid unicode characters in division SIMD code comments
+* `#19499 <https://github.com/numpy/numpy/pull/19499>`__: BUG, SIMD: Fix infinite loop during count non-zero on GCC-11
+* `#19500 <https://github.com/numpy/numpy/pull/19500>`__: BUG: fix a numpy.npiter leak in npyiter_multi_index_set
+* `#19501 <https://github.com/numpy/numpy/pull/19501>`__: TST: Fix a ``GenericAlias`` test failure for python 3.9.0
+* `#19502 <https://github.com/numpy/numpy/pull/19502>`__: MAINT: Start testing with Python 3.10.0b3.
+* `#19503 <https://github.com/numpy/numpy/pull/19503>`__: MAINT: Add missing dtype overloads for object- and ctypes-based...
+* `#19510 <https://github.com/numpy/numpy/pull/19510>`__: REL: Prepare for NumPy 1.21.1 release.
+
index 466677bfe269f2382d696de32c0da17b5f0bc8e4..3a2c3a371e2c52719b5754c3ca4f8cefb9db7a60 100644 (file)
@@ -4,10 +4,10 @@
   "author": "MathJax Consortium",
   "private": true,
   "devDependencies": {
-    "grunt": "^0.4.5",
-    "grunt-cli": "^1.2.0",
-    "grunt-contrib-clean": "^0.6.0",
-    "grunt-regex-replace": "^0.2.6",
+    "grunt": ">=1.3.0",
+    "grunt-cli": ">=1.2.0",
+    "grunt-contrib-clean": ">=0.6.0",
+    "grunt-regex-replace": ">=0.2.6",
     "matchdep": "*"
   }
 }
index 6d208d395b90ffacca5b7d2456ca8319a54e4d80..4a9d2b1945faff90f4fec6f70882dc52dce21d2b 100644 (file)
@@ -5,6 +5,7 @@ Release Notes
 .. toctree::
     :maxdepth: 3
 
+    1.21.1 <release/1.21.1-notes>
     1.21.0 <release/1.21.0-notes>
     1.20.3 <release/1.20.3-notes>
     1.20.2 <release/1.20.2-notes>
diff --git a/doc/source/release/1.21.1-notes.rst b/doc/source/release/1.21.1-notes.rst
new file mode 100644 (file)
index 0000000..0194327
--- /dev/null
@@ -0,0 +1,69 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.21.1 Release Notes
+==========================
+The NumPy 1.21.1 is maintenance release that fixes bugs discovered after the
+1.21.0 release and updates OpenBLAS to v0.3.17 to deal with problems on arm64.
+
+The Python versions supported for this release are 3.7-3.9. The 1.21.x series
+is compatible with development Python 3.10. Python 3.10 will be officially
+supported after it is released.
+
+.. warning::
+   There are unresolved problems compiling NumPy 1.20.0 with gcc-11.1.
+
+   * Optimization level `-O3` results in many incorrect warnings when
+     running the tests.
+   * On some hardware NumPY will hang in an infinite loop.
+
+Contributors
+============
+
+A total of 11 people contributed to this release.  People with a "+" by their
+names contributed a patch for the first time.
+
+* Bas van Beek
+* Charles Harris
+* Ganesh Kathiresan
+* Gregory R. Lee
+* Hugo Defois +
+* Kevin Sheppard
+* Matti Picus
+* Ralf Gommers
+* Sayed Adel
+* Sebastian Berg
+* Thomas J. Fan
+
+Pull requests merged
+====================
+
+A total of 26 pull requests were merged for this release.
+
+* `#19311 <https://github.com/numpy/numpy/pull/19311>`__: REV,BUG: Replace ``NotImplemented`` with ``typing.Any``
+* `#19324 <https://github.com/numpy/numpy/pull/19324>`__: MAINT: Fixed the return-dtype of ``ndarray.real`` and ``imag``
+* `#19330 <https://github.com/numpy/numpy/pull/19330>`__: MAINT: Replace ``"dtype[Any]"`` with ``dtype`` in the definiton of...
+* `#19342 <https://github.com/numpy/numpy/pull/19342>`__: DOC: Fix some docstrings that crash pdf generation.
+* `#19343 <https://github.com/numpy/numpy/pull/19343>`__: MAINT: bump scipy-mathjax
+* `#19347 <https://github.com/numpy/numpy/pull/19347>`__: BUG: Fix arr.flat.index for large arrays and big-endian machines
+* `#19348 <https://github.com/numpy/numpy/pull/19348>`__: ENH: add ``numpy.f2py.get_include`` function
+* `#19349 <https://github.com/numpy/numpy/pull/19349>`__: BUG: Fix reference count leak in ufunc dtype handling
+* `#19350 <https://github.com/numpy/numpy/pull/19350>`__: MAINT: Annotate missing attributes of ``np.number`` subclasses
+* `#19351 <https://github.com/numpy/numpy/pull/19351>`__: BUG: Fix cast safety and comparisons for zero sized voids
+* `#19352 <https://github.com/numpy/numpy/pull/19352>`__: BUG: Correct Cython declaration in random
+* `#19353 <https://github.com/numpy/numpy/pull/19353>`__: BUG: protect against accessing base attribute of a NULL subarray
+* `#19365 <https://github.com/numpy/numpy/pull/19365>`__: BUG, SIMD: Fix detecting AVX512 features on Darwin
+* `#19366 <https://github.com/numpy/numpy/pull/19366>`__: MAINT: remove ``print()``'s in distutils template handling
+* `#19390 <https://github.com/numpy/numpy/pull/19390>`__: ENH: SIMD architectures to show_config
+* `#19391 <https://github.com/numpy/numpy/pull/19391>`__: BUG: Do not raise deprecation warning for all nans in unique...
+* `#19392 <https://github.com/numpy/numpy/pull/19392>`__: BUG: Fix NULL special case in object-to-any cast code
+* `#19430 <https://github.com/numpy/numpy/pull/19430>`__: MAINT: Use arm64-graviton2 for testing on travis
+* `#19495 <https://github.com/numpy/numpy/pull/19495>`__: BUILD: update OpenBLAS to v0.3.17
+* `#19496 <https://github.com/numpy/numpy/pull/19496>`__: MAINT: Avoid unicode characters in division SIMD code comments
+* `#19499 <https://github.com/numpy/numpy/pull/19499>`__: BUG, SIMD: Fix infinite loop during count non-zero on GCC-11
+* `#19500 <https://github.com/numpy/numpy/pull/19500>`__: BUG: fix a numpy.npiter leak in npyiter_multi_index_set
+* `#19501 <https://github.com/numpy/numpy/pull/19501>`__: TST: Fix a ``GenericAlias`` test failure for python 3.9.0
+* `#19502 <https://github.com/numpy/numpy/pull/19502>`__: MAINT: Start testing with Python 3.10.0b3.
+* `#19503 <https://github.com/numpy/numpy/pull/19503>`__: MAINT: Add missing dtype overloads for object- and ctypes-based...
+* `#19510 <https://github.com/numpy/numpy/pull/19510>`__: REL: Prepare for NumPy 1.21.1 release.
+
index 4ec46aea01a469dc99cf34eb72618ff704b00fbf..74c33b16243e00b9b6dc6d9d14492be867de1d84 100644 (file)
@@ -1,6 +1,9 @@
 import builtins
 import os
 import sys
+import mmap
+import ctypes as ct
+import array as _array
 import datetime as dt
 from abc import abstractmethod
 from types import TracebackType
@@ -920,7 +923,7 @@ class dtype(Generic[_DTypeScalar_co]):
     # other special cases. Order is sometimes important because of the
     # subtype relationships
     #
-    # bool < int < float < complex
+    # bool < int < float < complex < object
     #
     # so we have to make sure the overloads for the narrowest type is
     # first.
@@ -938,51 +941,54 @@ class dtype(Generic[_DTypeScalar_co]):
     @overload
     def __new__(cls, dtype: Type[bytes], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ...
 
-    # `unsignedinteger` string-based representations
+    # `unsignedinteger` string-based representations and ctypes
     @overload
-    def __new__(cls, dtype: _UInt8Codes, align: bool = ..., copy: bool = ...) -> dtype[uint8]: ...
+    def __new__(cls, dtype: _UInt8Codes | Type[ct.c_uint8], align: bool = ..., copy: bool = ...) -> dtype[uint8]: ...
     @overload
-    def __new__(cls, dtype: _UInt16Codes, align: bool = ..., copy: bool = ...) -> dtype[uint16]: ...
+    def __new__(cls, dtype: _UInt16Codes | Type[ct.c_uint16], align: bool = ..., copy: bool = ...) -> dtype[uint16]: ...
     @overload
-    def __new__(cls, dtype: _UInt32Codes, align: bool = ..., copy: bool = ...) -> dtype[uint32]: ...
+    def __new__(cls, dtype: _UInt32Codes | Type[ct.c_uint32], align: bool = ..., copy: bool = ...) -> dtype[uint32]: ...
     @overload
-    def __new__(cls, dtype: _UInt64Codes, align: bool = ..., copy: bool = ...) -> dtype[uint64]: ...
+    def __new__(cls, dtype: _UInt64Codes | Type[ct.c_uint64], align: bool = ..., copy: bool = ...) -> dtype[uint64]: ...
     @overload
-    def __new__(cls, dtype: _UByteCodes, align: bool = ..., copy: bool = ...) -> dtype[ubyte]: ...
+    def __new__(cls, dtype: _UByteCodes | Type[ct.c_ubyte], align: bool = ..., copy: bool = ...) -> dtype[ubyte]: ...
     @overload
-    def __new__(cls, dtype: _UShortCodes, align: bool = ..., copy: bool = ...) -> dtype[ushort]: ...
+    def __new__(cls, dtype: _UShortCodes | Type[ct.c_ushort], align: bool = ..., copy: bool = ...) -> dtype[ushort]: ...
     @overload
-    def __new__(cls, dtype: _UIntCCodes, align: bool = ..., copy: bool = ...) -> dtype[uintc]: ...
+    def __new__(cls, dtype: _UIntCCodes | Type[ct.c_uint], align: bool = ..., copy: bool = ...) -> dtype[uintc]: ...
+
+    # NOTE: We're assuming here that `uint_ptr_t == size_t`,
+    # an assumption that does not hold in rare cases (same for `ssize_t`)
     @overload
-    def __new__(cls, dtype: _UIntPCodes, align: bool = ..., copy: bool = ...) -> dtype[uintp]: ...
+    def __new__(cls, dtype: _UIntPCodes | Type[ct.c_void_p] | Type[ct.c_size_t], align: bool = ..., copy: bool = ...) -> dtype[uintp]: ...
     @overload
-    def __new__(cls, dtype: _UIntCodes, align: bool = ..., copy: bool = ...) -> dtype[uint]: ...
+    def __new__(cls, dtype: _UIntCodes | Type[ct.c_ulong], align: bool = ..., copy: bool = ...) -> dtype[uint]: ...
     @overload
-    def __new__(cls, dtype: _ULongLongCodes, align: bool = ..., copy: bool = ...) -> dtype[ulonglong]: ...
+    def __new__(cls, dtype: _ULongLongCodes | Type[ct.c_ulonglong], align: bool = ..., copy: bool = ...) -> dtype[ulonglong]: ...
 
-    # `signedinteger` string-based representations
+    # `signedinteger` string-based representations and ctypes
     @overload
-    def __new__(cls, dtype: _Int8Codes, align: bool = ..., copy: bool = ...) -> dtype[int8]: ...
+    def __new__(cls, dtype: _Int8Codes | Type[ct.c_int8], align: bool = ..., copy: bool = ...) -> dtype[int8]: ...
     @overload
-    def __new__(cls, dtype: _Int16Codes, align: bool = ..., copy: bool = ...) -> dtype[int16]: ...
+    def __new__(cls, dtype: _Int16Codes | Type[ct.c_int16], align: bool = ..., copy: bool = ...) -> dtype[int16]: ...
     @overload
-    def __new__(cls, dtype: _Int32Codes, align: bool = ..., copy: bool = ...) -> dtype[int32]: ...
+    def __new__(cls, dtype: _Int32Codes | Type[ct.c_int32], align: bool = ..., copy: bool = ...) -> dtype[int32]: ...
     @overload
-    def __new__(cls, dtype: _Int64Codes, align: bool = ..., copy: bool = ...) -> dtype[int64]: ...
+    def __new__(cls, dtype: _Int64Codes | Type[ct.c_int64], align: bool = ..., copy: bool = ...) -> dtype[int64]: ...
     @overload
-    def __new__(cls, dtype: _ByteCodes, align: bool = ..., copy: bool = ...) -> dtype[byte]: ...
+    def __new__(cls, dtype: _ByteCodes | Type[ct.c_byte], align: bool = ..., copy: bool = ...) -> dtype[byte]: ...
     @overload
-    def __new__(cls, dtype: _ShortCodes, align: bool = ..., copy: bool = ...) -> dtype[short]: ...
+    def __new__(cls, dtype: _ShortCodes | Type[ct.c_short], align: bool = ..., copy: bool = ...) -> dtype[short]: ...
     @overload
-    def __new__(cls, dtype: _IntCCodes, align: bool = ..., copy: bool = ...) -> dtype[intc]: ...
+    def __new__(cls, dtype: _IntCCodes | Type[ct.c_int], align: bool = ..., copy: bool = ...) -> dtype[intc]: ...
     @overload
-    def __new__(cls, dtype: _IntPCodes, align: bool = ..., copy: bool = ...) -> dtype[intp]: ...
+    def __new__(cls, dtype: _IntPCodes | Type[ct.c_ssize_t], align: bool = ..., copy: bool = ...) -> dtype[intp]: ...
     @overload
-    def __new__(cls, dtype: _IntCodes, align: bool = ..., copy: bool = ...) -> dtype[int_]: ...
+    def __new__(cls, dtype: _IntCodes | Type[ct.c_long], align: bool = ..., copy: bool = ...) -> dtype[int_]: ...
     @overload
-    def __new__(cls, dtype: _LongLongCodes, align: bool = ..., copy: bool = ...) -> dtype[longlong]: ...
+    def __new__(cls, dtype: _LongLongCodes | Type[ct.c_longlong], align: bool = ..., copy: bool = ...) -> dtype[longlong]: ...
 
-    # `floating` string-based representations
+    # `floating` string-based representations and ctypes
     @overload
     def __new__(cls, dtype: _Float16Codes, align: bool = ..., copy: bool = ...) -> dtype[float16]: ...
     @overload
@@ -992,11 +998,11 @@ class dtype(Generic[_DTypeScalar_co]):
     @overload
     def __new__(cls, dtype: _HalfCodes, align: bool = ..., copy: bool = ...) -> dtype[half]: ...
     @overload
-    def __new__(cls, dtype: _SingleCodes, align: bool = ..., copy: bool = ...) -> dtype[single]: ...
+    def __new__(cls, dtype: _SingleCodes | Type[ct.c_float], align: bool = ..., copy: bool = ...) -> dtype[single]: ...
     @overload
-    def __new__(cls, dtype: _DoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[double]: ...
+    def __new__(cls, dtype: _DoubleCodes | Type[ct.c_double], align: bool = ..., copy: bool = ...) -> dtype[double]: ...
     @overload
-    def __new__(cls, dtype: _LongDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[longdouble]: ...
+    def __new__(cls, dtype: _LongDoubleCodes | Type[ct.c_longdouble], align: bool = ..., copy: bool = ...) -> dtype[longdouble]: ...
 
     # `complexfloating` string-based representations
     @overload
@@ -1010,9 +1016,9 @@ class dtype(Generic[_DTypeScalar_co]):
     @overload
     def __new__(cls, dtype: _CLongDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[clongdouble]: ...
 
-    # Miscellaneous string-based representations
+    # Miscellaneous string-based representations and ctypes
     @overload
-    def __new__(cls, dtype: _BoolCodes, align: bool = ..., copy: bool = ...) -> dtype[bool_]: ...
+    def __new__(cls, dtype: _BoolCodes | Type[ct.c_bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ...
     @overload
     def __new__(cls, dtype: _TD64Codes, align: bool = ..., copy: bool = ...) -> dtype[timedelta64]: ...
     @overload
@@ -1020,11 +1026,11 @@ class dtype(Generic[_DTypeScalar_co]):
     @overload
     def __new__(cls, dtype: _StrCodes, align: bool = ..., copy: bool = ...) -> dtype[str_]: ...
     @overload
-    def __new__(cls, dtype: _BytesCodes, align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ...
+    def __new__(cls, dtype: _BytesCodes | Type[ct.c_char], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ...
     @overload
     def __new__(cls, dtype: _VoidCodes, align: bool = ..., copy: bool = ...) -> dtype[void]: ...
     @overload
-    def __new__(cls, dtype: _ObjectCodes, align: bool = ..., copy: bool = ...) -> dtype[object_]: ...
+    def __new__(cls, dtype: _ObjectCodes | Type[ct.py_object], align: bool = ..., copy: bool = ...) -> dtype[object_]: ...
 
     # dtype of a dtype is the same dtype
     @overload
@@ -1049,7 +1055,7 @@ class dtype(Generic[_DTypeScalar_co]):
         align: bool = ...,
         copy: bool = ...,
     ) -> dtype[Any]: ...
-    # Catchall overload
+    # Catchall overload for void-likes
     @overload
     def __new__(
         cls,
@@ -1057,6 +1063,14 @@ class dtype(Generic[_DTypeScalar_co]):
         align: bool = ...,
         copy: bool = ...,
     ) -> dtype[void]: ...
+    # Catchall overload for object-likes
+    @overload
+    def __new__(
+        cls,
+        dtype: Type[object],
+        align: bool = ...,
+        copy: bool = ...,
+    ) -> dtype[object_]: ...
 
     @overload
     def __getitem__(self: dtype[void], key: List[str]) -> dtype[void]: ...
@@ -1645,6 +1659,14 @@ _ArrayTD64_co = NDArray[Union[bool_, integer[Any], timedelta64]]
 class _SupportsItem(Protocol[_T_co]):
     def item(self, __args: Any) -> _T_co: ...
 
+class _SupportsReal(Protocol[_T_co]):
+    @property
+    def real(self) -> _T_co: ...
+
+class _SupportsImag(Protocol[_T_co]):
+    @property
+    def imag(self) -> _T_co: ...
+
 class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
     @property
     def base(self) -> Optional[ndarray]: ...
@@ -1653,11 +1675,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
     @property
     def size(self) -> int: ...
     @property
-    def real(self: _ArraySelf) -> _ArraySelf: ...
+    def real(
+        self: NDArray[_SupportsReal[_ScalarType]],  # type: ignore[type-var]
+    ) -> ndarray[_ShapeType, dtype[_ScalarType]]: ...
     @real.setter
     def real(self, value: ArrayLike) -> None: ...
     @property
-    def imag(self: _ArraySelf) -> _ArraySelf: ...
+    def imag(
+        self: NDArray[_SupportsImag[_ScalarType]],  # type: ignore[type-var]
+    ) -> ndarray[_ShapeType, dtype[_ScalarType]]: ...
     @imag.setter
     def imag(self, value: ArrayLike) -> None: ...
     def __new__(
@@ -3060,6 +3086,15 @@ else:
     ]
 
 class integer(number[_NBit1]):  # type: ignore
+    @property
+    def numerator(self: _ScalarType) -> _ScalarType: ...
+    @property
+    def denominator(self) -> L[1]: ...
+    @overload
+    def __round__(self, ndigits: None = ...) -> int: ...
+    @overload
+    def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ...
+
     # NOTE: `__index__` is technically defined in the bottom-most
     # sub-classes (`int64`, `uint32`, etc)
     def item(
@@ -3133,6 +3168,10 @@ class timedelta64(generic):
         __value: Union[None, int, _CharLike_co, dt.timedelta, timedelta64] = ...,
         __format: Union[_CharLike_co, Tuple[_CharLike_co, _IntLike_co]] = ...,
     ) -> None: ...
+    @property
+    def numerator(self: _ScalarType) -> _ScalarType: ...
+    @property
+    def denominator(self) -> L[1]: ...
 
     # NOTE: Only a limited number of units support conversion
     # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as`
@@ -3202,7 +3241,8 @@ uint0 = unsignedinteger[_NBitIntP]
 uint = unsignedinteger[_NBitInt]
 ulonglong = unsignedinteger[_NBitLongLong]
 
-class inexact(number[_NBit1]): ...  # type: ignore
+class inexact(number[_NBit1]):  # type: ignore
+    def __getnewargs__(self: inexact[_64Bit]) -> Tuple[float, ...]: ...
 
 _IntType = TypeVar("_IntType", bound=integer)
 _FloatType = TypeVar('_FloatType', bound=floating)
@@ -3214,6 +3254,21 @@ class floating(inexact[_NBit1]):
         __args: Union[L[0], Tuple[()], Tuple[L[0]]] = ...,
     ) -> float: ...
     def tolist(self) -> float: ...
+    def is_integer(self: float64) -> bool: ...
+    def hex(self: float64) -> str: ...
+    @classmethod
+    def fromhex(cls: Type[float64], __string: str) -> float64: ...
+    def as_integer_ratio(self) -> Tuple[int, int]: ...
+    if sys.version_info >= (3, 9):
+        def __ceil__(self: float64) -> int: ...
+        def __floor__(self: float64) -> int: ...
+    def __trunc__(self: float64) -> int: ...
+    def __getnewargs__(self: float64) -> Tuple[float]: ...
+    def __getformat__(self: float64, __typestr: L["double", "float"]) -> str: ...
+    @overload
+    def __round__(self, ndigits: None = ...) -> int: ...
+    @overload
+    def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ...
     __add__: _FloatOp[_NBit1]
     __radd__: _FloatOp[_NBit1]
     __sub__: _FloatOp[_NBit1]
@@ -3258,6 +3313,9 @@ class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]):
     @property
     def imag(self) -> floating[_NBit2]: ...  # type: ignore[override]
     def __abs__(self) -> floating[_NBit1]: ...  # type: ignore[override]
+    def __getnewargs__(self: complex128) -> Tuple[float, float]: ...
+    # NOTE: Deprecated
+    # def __round__(self, ndigits=...): ...
     __add__: _ComplexOp[_NBit1]
     __radd__: _ComplexOp[_NBit1]
     __sub__: _ComplexOp[_NBit1]
index 04126821f321f1f9343bd625d70abdb1866d3566..0a5a25f2c15857af60e25b6dd5b4a03be6ccb438 100644 (file)
@@ -8,11 +8,11 @@ import json
 
 version_json = '''
 {
- "date": "2021-06-19T12:50:55-0600",
+ "date": "2021-07-18T11:34:41-0600",
  "dirty": false,
  "error": null,
- "full-revisionid": "b235f9e701e14ed6f6f6dcba885f7986a833743f",
- "version": "1.21.0"
+ "full-revisionid": "df6d2600c51502e1877aac563658d0616a75c5e5",
+ "version": "1.21.1"
 }
 '''  # END VERSION_JSON
 
index 4f3a95c717a15b8ff6b46ac3034b70654bb00618..1e0f4a57179d130ead3ecce5612d7a6c338ed894 100644 (file)
@@ -394,8 +394,30 @@ npy__cpu_init_features(void)
     npy__cpu_have[NPY_CPU_FEATURE_FMA]    = npy__cpu_have[NPY_CPU_FEATURE_FMA3];
 
     // check AVX512 OS support
-    if ((xcr & 0xe6) != 0xe6)
+    int avx512_os = (xcr & 0xe6) == 0xe6;
+#if defined(__APPLE__) && defined(__x86_64__)
+    /**
+     * On darwin, machines with AVX512 support, by default, threads are created with
+     * AVX512 masked off in XCR0 and an AVX-sized savearea is used.
+     * However, AVX512 capabilities are advertised in the commpage and via sysctl.
+     * for more information, check:
+     *  - https://github.com/apple/darwin-xnu/blob/0a798f6738bc1db01281fc08ae024145e84df927/osfmk/i386/fpu.c#L175-L201
+     *  - https://github.com/golang/go/issues/43089
+     *  - https://github.com/numpy/numpy/issues/19319
+     */
+    if (!avx512_os) {
+        npy_uintp commpage64_addr = 0x00007fffffe00000ULL;
+        npy_uint16 commpage64_ver = *((npy_uint16*)(commpage64_addr + 0x01E));
+        // cpu_capabilities64 undefined in versions < 13
+        if (commpage64_ver > 12) {
+            npy_uint64 commpage64_cap = *((npy_uint64*)(commpage64_addr + 0x010));
+            avx512_os = (commpage64_cap & 0x0000004000000000ULL) != 0;
+        }
+    }
+#endif
+    if (!avx512_os) {
         return;
+    }
     npy__cpu_have[NPY_CPU_FEATURE_AVX512F]  = (reg[1] & (1 << 16)) != 0;
     npy__cpu_have[NPY_CPU_FEATURE_AVX512CD] = (reg[1] & (1 << 28)) != 0;
     if (npy__cpu_have[NPY_CPU_FEATURE_AVX512F] && npy__cpu_have[NPY_CPU_FEATURE_AVX512CD]) {
index e7fbb88cd28201211d8bf28631ba098639263c38..0f772c689954318bd5c597481bda43794aa582c2 100644 (file)
@@ -41,6 +41,7 @@ maintainer email:  oliphant.travis@ieee.org
 #include "arraytypes.h"
 #include "scalartypes.h"
 #include "arrayobject.h"
+#include "convert_datatype.h"
 #include "conversion_utils.h"
 #include "ctors.h"
 #include "dtypemeta.h"
@@ -1390,9 +1391,13 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
                 return Py_NotImplemented;
             }
 
-            _res = PyArray_CanCastTypeTo(PyArray_DESCR(self),
-                                         PyArray_DESCR(array_other),
-                                         NPY_EQUIV_CASTING);
+            _res = PyArray_CheckCastSafety(
+                    NPY_EQUIV_CASTING,
+                    PyArray_DESCR(self), PyArray_DESCR(array_other), NULL);
+            if (_res < 0) {
+                PyErr_Clear();
+                _res = 0;
+            }
             if (_res == 0) {
                 /* 2015-05-07, 1.10 */
                 Py_DECREF(array_other);
@@ -1441,9 +1446,13 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
                 return Py_NotImplemented;
             }
 
-            _res = PyArray_CanCastTypeTo(PyArray_DESCR(self),
-                                         PyArray_DESCR(array_other),
-                                         NPY_EQUIV_CASTING);
+            _res = PyArray_CheckCastSafety(
+                    NPY_EQUIV_CASTING,
+                    PyArray_DESCR(self), PyArray_DESCR(array_other), NULL);
+            if (_res < 0) {
+                PyErr_Clear();
+                _res = 0;
+            }
             if (_res == 0) {
                 /* 2015-05-07, 1.10 */
                 Py_DECREF(array_other);
index 3c4c21dedd234cf2192a4f42989dfc4ae54684dc..adfff11292a3909b2b45327fa4c574d13881bcf6 100644 (file)
@@ -1222,11 +1222,7 @@ PyArray_IntTupleFromIntp(int len, npy_intp const *vals)
         goto fail;
     }
     for (i = 0; i < len; i++) {
-#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
-        PyObject *o = PyLong_FromLong((long) vals[i]);
-#else
-        PyObject *o = PyLong_FromLongLong((npy_longlong) vals[i]);
-#endif
+        PyObject *o = PyArray_PyIntFromIntp(vals[i]);
         if (!o) {
             Py_DECREF(intTuple);
             intTuple = NULL;
index 7d1871c43ddb593909a4ad38eb291a36528a5ee9..55c0cdd3578f471ad9fb7e78d35c5c8fcc774908 100644 (file)
@@ -39,6 +39,17 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals);
 NPY_NO_EXPORT int
 PyArray_TypestrConvert(int itemsize, int gentype);
 
+
+static NPY_INLINE PyObject *
+PyArray_PyIntFromIntp(npy_intp const value)
+{
+#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
+    return PyLong_FromLong((long)value);
+#else
+    return PyLong_FromLongLong((npy_longlong)value);
+#endif
+}
+
 NPY_NO_EXPORT PyObject *
 PyArray_IntTupleFromIntp(int len, npy_intp const *vals);
 
index d197a4bea31e4fef44391645c76c2a8208cc98a4..cd0e21098bcdba2eb6fe428a219bf608f61365df 100644 (file)
@@ -457,7 +457,7 @@ PyArray_GetCastSafety(
  *        is ignored).
  * @return 0 for an invalid cast, 1 for a valid and -1 for an error.
  */
-static int
+NPY_NO_EXPORT int
 PyArray_CheckCastSafety(NPY_CASTING casting,
         PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype)
 {
@@ -2841,6 +2841,10 @@ cast_to_void_dtype_class(
     loop_descrs[1]->elsize = given_descrs[0]->elsize;
     Py_INCREF(given_descrs[0]);
     loop_descrs[0] = given_descrs[0];
+    if (loop_descrs[0]->type_num == NPY_VOID &&
+            loop_descrs[0]->subarray == NULL && loop_descrs[1]->names == NULL) {
+        return NPY_NO_CASTING | _NPY_CAST_IS_VIEW;
+    }
     return NPY_SAFE_CASTING | _NPY_CAST_IS_VIEW;
 }
 
@@ -3293,8 +3297,10 @@ void_to_void_resolve_descriptors(
                 casting = NPY_NO_CASTING | _NPY_CAST_IS_VIEW;
             }
         }
-        NPY_CASTING field_casting = PyArray_GetCastSafety(
-                given_descrs[0]->subarray->base, given_descrs[1]->subarray->base, NULL);
+
+        PyArray_Descr *from_base = (from_sub == NULL) ? given_descrs[0] : from_sub->base;
+        PyArray_Descr *to_base = (to_sub == NULL) ? given_descrs[1] : to_sub->base;
+        NPY_CASTING field_casting = PyArray_GetCastSafety(from_base, to_base, NULL);
         if (field_casting < 0) {
             return -1;
         }
index ba16d4d1bd5a12f30d1d181678dffb4fa7134259..22b3859d2ab3ec9a8b7c0ce9aed87f8dd2535a6d 100644 (file)
@@ -71,6 +71,10 @@ NPY_NO_EXPORT NPY_CASTING
 PyArray_GetCastSafety(
         PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype);
 
+NPY_NO_EXPORT int
+PyArray_CheckCastSafety(NPY_CASTING casting,
+        PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype);
+
 NPY_NO_EXPORT NPY_CASTING
 legacy_same_dtype_resolve_descriptors(
         PyArrayMethodObject *self,
index aa8cc84ffa6fdf38458104f54b4145e0c4c4e18b..50db627eafd307da2b91cec60b0baa17ec49aaa7 100644 (file)
@@ -322,11 +322,11 @@ strided_to_strided_object_to_any(
 
     while (N > 0) {
         memcpy(&src_ref, src, sizeof(src_ref));
-        if (PyArray_Pack(data->descr, dst, src_ref) < 0) {
+        if (PyArray_Pack(data->descr, dst, src_ref ? src_ref : Py_None) < 0) {
             return -1;
         }
 
-        if (data->move_references) {
+        if (data->move_references && src_ref != NULL) {
             Py_DECREF(src_ref);
             memset(src, 0, sizeof(src_ref));
         }
index 3575d6fad54e33554cc595b14e292064a8d3fc2e..bccbb7b0c54ab39290765a612ff128f909b48aa0 100644 (file)
@@ -419,33 +419,13 @@ array_itemsize_get(PyArrayObject *self)
 static PyObject *
 array_size_get(PyArrayObject *self)
 {
-    npy_intp size=PyArray_SIZE(self);
-#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
-    return PyLong_FromLong((long) size);
-#else
-    if (size > NPY_MAX_LONG || size < NPY_MIN_LONG) {
-        return PyLong_FromLongLong(size);
-    }
-    else {
-        return PyLong_FromLong((long) size);
-    }
-#endif
+    return PyArray_PyIntFromIntp(PyArray_SIZE(self));
 }
 
 static PyObject *
 array_nbytes_get(PyArrayObject *self)
 {
-    npy_intp nbytes = PyArray_NBYTES(self);
-#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
-    return PyLong_FromLong((long) nbytes);
-#else
-    if (nbytes > NPY_MAX_LONG || nbytes < NPY_MIN_LONG) {
-        return PyLong_FromLongLong(nbytes);
-    }
-    else {
-        return PyLong_FromLong((long) nbytes);
-    }
-#endif
+    return PyArray_PyIntFromIntp(PyArray_NBYTES(self));
 }
 
 
index fb354ce5473a56ee9c9fd1cd13897e3e7aebf746..2b8ea9e79acec32b58af1ecc829dc2f871801eab 100644 (file)
@@ -2131,7 +2131,7 @@ count_nonzero_bytes_384(const npy_uint64 * w)
 
 #if NPY_SIMD
 /* Count the zero bytes between `*d` and `end`, updating `*d` to point to where to keep counting from. */
-static NPY_INLINE NPY_GCC_OPT_3 npyv_u8
+NPY_FINLINE NPY_GCC_OPT_3 npyv_u8
 count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end, npy_uint8 max_count)
 {
     const npyv_u8 vone = npyv_setall_u8(1);
@@ -2150,7 +2150,7 @@ count_zero_bytes_u8(const npy_uint8 **d, const npy_uint8 *end, npy_uint8 max_cou
     return vsum8;
 }
 
-static NPY_INLINE NPY_GCC_OPT_3 npyv_u16x2
+NPY_FINLINE NPY_GCC_OPT_3 npyv_u16x2
 count_zero_bytes_u16(const npy_uint8 **d, const npy_uint8 *end, npy_uint16 max_count)
 {
     npyv_u16x2 vsum16;
index 3ebd4c8589741b3536bc553fb9990628a1df9c3e..576ea89b32fc8b94eb352474d1b9580758767316 100644 (file)
@@ -15,6 +15,7 @@
 #include "iterators.h"
 #include "ctors.h"
 #include "common.h"
+#include "conversion_utils.h"
 #include "array_coercion.h"
 
 #define NEWAXIS_INDEX -1
@@ -1062,13 +1063,15 @@ static PyMemberDef iter_members[] = {
         T_OBJECT,
         offsetof(PyArrayIterObject, ao),
         READONLY, NULL},
-    {"index",
-        T_INT,
-        offsetof(PyArrayIterObject, index),
-        READONLY, NULL},
     {NULL, 0, 0, 0, NULL},
 };
 
+static PyObject *
+iter_index_get(PyArrayIterObject *self)
+{
+    return PyArray_PyIntFromIntp(self->index);
+}
+
 static PyObject *
 iter_coords_get(PyArrayIterObject *self)
 {
@@ -1095,10 +1098,12 @@ iter_coords_get(PyArrayIterObject *self)
 }
 
 static PyGetSetDef iter_getsets[] = {
+    {"index",
+        (getter)iter_index_get,
+        NULL, NULL, NULL},
     {"coords",
         (getter)iter_coords_get,
-        NULL,
-        NULL, NULL},
+        NULL, NULL, NULL},
     {NULL, NULL, NULL, NULL, NULL},
 };
 
@@ -1410,31 +1415,13 @@ arraymultiter_dealloc(PyArrayMultiIterObject *multi)
 static PyObject *
 arraymultiter_size_get(PyArrayMultiIterObject *self)
 {
-#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
-    return PyLong_FromLong((long) self->size);
-#else
-    if (self->size < NPY_MAX_LONG) {
-        return PyLong_FromLong((long) self->size);
-    }
-    else {
-        return PyLong_FromLongLong((npy_longlong) self->size);
-    }
-#endif
+    return PyArray_PyIntFromIntp(self->size);
 }
 
 static PyObject *
 arraymultiter_index_get(PyArrayMultiIterObject *self)
 {
-#if NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG
-    return PyLong_FromLong((long) self->index);
-#else
-    if (self->size < NPY_MAX_LONG) {
-        return PyLong_FromLong((long) self->index);
-    }
-    else {
-        return PyLong_FromLongLong((npy_longlong) self->index);
-    }
-#endif
+    return PyArray_PyIntFromIntp(self->index);
 }
 
 static PyObject *
index 7698ae43d07e31efad13c16ca0fb2841780a6203..73df962e4520bdab43e072e002a4ce57b35f9d06 100644 (file)
@@ -1595,8 +1595,8 @@ npyiter_multi_index_set(NewNpyArrayIterObject *self, PyObject *value)
         for (idim = 0; idim < ndim; ++idim) {
             PyObject *v = PySequence_GetItem(value, idim);
             multi_index[idim] = PyLong_AsLong(v);
+            Py_DECREF(v);
             if (error_converting(multi_index[idim])) {
-                Py_XDECREF(v);
                 return -1;
             }
         }
index 19e05f2b57b0c74cb3908a8d7fa6fbc47053c8ee..1ddf7c3b1a6fe1525663bc1a61ec978ddc4a620e 100644 (file)
  ** Defining the SIMD kernels
  *
  * Floor division of signed is based on T. Granlund and P. L. Montgomery
- * â€œDivision by invariant integers using multiplication(see [Figure 6.1]
+ * "Division by invariant integers using multiplication(see [Figure 6.1]
  * http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.2556)"
  * For details on TRUNC division see simd/intdiv.h for more clarification
  ***********************************************************************************
- ** Figure 6.1: Signed division by run–time invariant divisor, rounded towards -INF
+ ** Figure 6.1: Signed division by run-time invariant divisor, rounded towards -INF
  ***********************************************************************************
  * For q = FLOOR(a/d), all sword:
- *     sword âˆ’dsign = SRL(d, N âˆ’ 1);
- *     uword âˆ’nsign = (n < âˆ’dsign);
- *     uword âˆ’qsign = EOR(−nsign, âˆ’dsign);
- *     q = TRUNC((n âˆ’ (−dsign ) + (−nsign))/d) âˆ’ (−qsign);
+ *     sword -dsign = SRL(d, N - 1);
+ *     uword -nsign = (n < -dsign);
+ *     uword -qsign = EOR(-nsign, -dsign);
+ *     q = TRUNC((n - (-dsign ) + (-nsign))/d) - (-qsign);
  ********************************************************************************/
 
 #if NPY_SIMD
index 0644a28c011b9a282f512d5f7c068d5713bd3786..9e73dfd94a9c1f2179d4e8ec7c90d63c91bf910f 100644 (file)
@@ -4157,8 +4157,9 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc,
         if (dtype == NULL) {
             goto fail;
         }
-        Py_INCREF(dtype->singleton);
         otype = dtype->singleton;
+        Py_INCREF(otype);
+        Py_DECREF(dtype);
     }
     if (out_obj && !PyArray_OutputConverter(out_obj, &out)) {
         goto fail;
index 2834235e409ff84ee36f6540cf6f9d0c5188c986..88aa9ed6c112b6ff3f0177f4ba8371f946711cca 100644 (file)
@@ -390,7 +390,6 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc,
                     operands, type_tup, out_dtypes);
         }
 
-        Py_INCREF(descr);
         out_dtypes[0] = ensure_dtype_nbo(descr);
         if (out_dtypes[0] == NULL) {
             return -1;
index 2cec1acd34909177df2ff1b94f671da4e92f0e65..8398b3cad1f05608349dbf2eb7f43b053a28ea43 100644 (file)
@@ -147,6 +147,9 @@ class TestChanges:
         assert not np.can_cast("U1", "V1")
         # Structured to unstructured is just like any other:
         assert np.can_cast("d,i", "V", casting="same_kind")
+        # Unstructured void to unstructured is actually no cast at all:
+        assert np.can_cast("V3", "V", casting="no")
+        assert np.can_cast("V0", "V", casting="no")
 
 
 class TestCasting:
@@ -646,3 +649,28 @@ class TestCasting:
         with pytest.raises(TypeError,
                     match="casting from object to the parametric DType"):
             cast._resolve_descriptors((np.dtype("O"), None))
+
+    @pytest.mark.parametrize("casting", ["no", "unsafe"])
+    def test_void_and_structured_with_subarray(self, casting):
+        # test case corresponding to gh-19325
+        dtype = np.dtype([("foo", "<f4", (3, 2))])
+        expected = casting == "unsafe"
+        assert np.can_cast("V4", dtype, casting=casting) == expected
+        assert np.can_cast(dtype, "V4", casting=casting) == expected
+
+    @pytest.mark.parametrize("dtype", np.typecodes["All"])
+    def test_object_casts_NULL_None_equivalence(self, dtype):
+        # None to <other> casts may succeed or fail, but a NULL'ed array must
+        # behave the same as one filled with None's.
+        arr_normal = np.array([None] * 5)
+        arr_NULLs = np.empty_like([None] * 5)
+        # If the check fails (maybe it should) the test would lose its purpose:
+        assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes
+
+        try:
+            expected = arr_normal.astype(dtype)
+        except TypeError:
+            with pytest.raises(TypeError):
+                arr_NULLs.astype(dtype)
+        else:
+            assert_array_equal(expected, arr_NULLs.astype(dtype))
index d567653f5a4a94982d9a6edc08ca9d48db6a4409..f807b90a3654b52665586807717c4d94b80acc65 100644 (file)
@@ -5364,6 +5364,17 @@ class TestFlat:
             assert_(abs(sys.getrefcount(ind) - rc_ind) < 50)
             assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)
 
+    def test_index_getset(self):
+        it = np.arange(10).reshape(2, 1, 5).flat
+        with pytest.raises(AttributeError):
+            it.index = 10
+
+        for _ in it:
+            pass
+        # Check the value of `.index` is updated correctly (see also gh-19153)
+        # If the type was incorrect, this would show up on big-endian machines
+        assert it.index == it.base.size
+
 
 class TestResize:
 
index b44343c5755cdb2f0e833debb9deda74121fc84d..adcf921f60163236c966f9df3495da860703743f 100644 (file)
@@ -185,6 +185,29 @@ def test_iter_c_or_f_order():
                 assert_equal([x for x in i],
                                     aview.swapaxes(0, 1).ravel(order='A'))
 
+def test_nditer_multi_index_set():
+    # Test the multi_index set
+    a = np.arange(6).reshape(2, 3)
+    it = np.nditer(a, flags=['multi_index'])
+
+    # Removes the iteration on two first elements of a[0]
+    it.multi_index = (0, 2,)
+
+    assert_equal([i for i in it], [2, 3, 4, 5])
+    
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_nditer_multi_index_set_refcount():
+    # Test if the reference count on index variable is decreased
+    
+    index = 0
+    i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index'])
+
+    start_count = sys.getrefcount(index)
+    i.multi_index = (index,)
+    end_count = sys.getrefcount(index)
+    
+    assert_equal(start_count, end_count)
+
 def test_iter_best_order_multi_index_1d():
     # The multi-indices should be correct with any reordering
 
index f5113150e8f76c779c333cdb74bd795d7397201d..fe310058a72abb421d32144cc6bb0a1745537f54 100644 (file)
@@ -1724,6 +1724,22 @@ class TestArrayComparisons:
         assert_(not res)
         assert_(type(res) is bool)
 
+    @pytest.mark.parametrize("dtype", ["V0", "V3", "V10"])
+    def test_compare_unstructured_voids(self, dtype):
+        zeros = np.zeros(3, dtype=dtype)
+
+        assert_array_equal(zeros, zeros)
+        assert not (zeros != zeros).any()
+
+        if dtype == "V0":
+            # Can't test != of actually different data
+            return
+
+        nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype)
+
+        assert not (zeros == nonzeros).any()
+        assert (zeros != nonzeros).all()
+
 
 def assert_array_strict_equal(x, y):
     assert_array_equal(x, y)
index 90e07f8b10368a22c11c8ea1dab11aa668eef611..c8933d1d42865f745bb985f7f9068a96985997f7 100644 (file)
@@ -271,7 +271,6 @@ def resolve_includes(source):
                 if not os.path.isabs(fn):
                     fn = os.path.join(d, fn)
                 if os.path.isfile(fn):
-                    print('Including file', fn)
                     lines.extend(resolve_includes(fn))
                 else:
                     lines.append(line)
index 7add44c7679d24332fd451b0395a895be6afb3bf..90d1f4c384c7807c621eada8ed7685e5845c5c56 100644 (file)
@@ -219,7 +219,6 @@ def resolve_includes(source):
                 if not os.path.isabs(fn):
                     fn = os.path.join(d, fn)
                 if os.path.isfile(fn):
-                    print('Including file', fn)
                     lines.extend(resolve_includes(fn))
                 else:
                     lines.append(line)
index e797745e12dbf97d4b0f79923f883db6b149ce8f..60696438f346700167294a39571bc7b8f268f93e 100644 (file)
@@ -2357,6 +2357,10 @@ def generate_config_py(target):
                 * ``src_dirs``: directories containing library source files
                 * ``define_macros``: preprocessor macros used by
                   ``distutils.setup``
+                * ``baseline``: minimum CPU features required
+                * ``found``: dispatched features supported in the system
+                * ``not found``: dispatched features that are not supported
+                  in the system
 
                 Examples
                 --------
@@ -2368,6 +2372,9 @@ def generate_config_py(target):
                     libraries = ['openblas', 'openblas']
                     library_dirs = ['/usr/local/lib']
                 """
+                from numpy.core._multiarray_umath import (
+                    __cpu_features__, __cpu_baseline__, __cpu_dispatch__
+                )
                 for name,info_dict in globals().items():
                     if name[0] == "_" or type(info_dict) is not type({}): continue
                     print(name + ":")
@@ -2378,6 +2385,19 @@ def generate_config_py(target):
                         if k == "sources" and len(v) > 200:
                             v = v[:60] + " ...\n... " + v[-60:]
                         print("    %s = %s" % (k,v))
+
+                features_found, features_not_found = [], []
+                for feature in __cpu_dispatch__:
+                    if __cpu_features__[feature]:
+                        features_found.append(feature)
+                    else:
+                        features_not_found.append(feature)
+
+                print("Supported SIMD extensions in this NumPy install:")
+                print("    baseline = %s" % (','.join(__cpu_baseline__)))
+                print("    found = %s" % (','.join(features_found)))
+                print("    not found = %s" % (','.join(features_not_found)))
+
                     '''))
 
     return target
index 07ab6cd7da9650f748a89400a838d4b1722151ce..4f6938ed2b4e6bb2c85f6ff24c246ab8bcae9b1f 100644 (file)
@@ -2,7 +2,7 @@
 """Fortran to Python Interface Generator.
 
 """
-__all__ = ['run_main', 'compile', 'f2py_testing']
+__all__ = ['run_main', 'compile', 'get_include', 'f2py_testing']
 
 import sys
 import subprocess
@@ -122,6 +122,53 @@ def compile(source,
         return cp.returncode
 
 
+def get_include():
+    """
+    Return the directory that contains the fortranobject.c and .h files.
+
+    .. note::
+
+        This function is not needed when building an extension with
+        `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files
+        in one go.
+
+    Python extension modules built with f2py-generated code need to use
+    ``fortranobject.c`` as a source file, and include the ``fortranobject.h``
+    header. This function can be used to obtain the directory containing
+    both of these files.
+
+    Returns
+    -------
+    include_path : str
+        Absolute path to the directory containing ``fortranobject.c`` and
+        ``fortranobject.h``.
+
+    Notes
+    -----
+    .. versionadded:: 1.22.0
+
+    Unless the build system you are using has specific support for f2py,
+    building a Python extension using a ``.pyf`` signature file is a two-step
+    process. For a module ``mymod``:
+
+        - Step 1: run ``python -m numpy.f2py mymod.pyf --quiet``. This
+          generates ``_mymodmodule.c`` and (if needed)
+          ``_fblas-f2pywrappers.f`` files next to ``mymod.pyf``.
+        - Step 2: build your Python extension module. This requires the
+          following source files:
+
+              - ``_mymodmodule.c``
+              - ``_mymod-f2pywrappers.f`` (if it was generated in step 1)
+              - ``fortranobject.c``
+
+    See Also
+    --------
+    numpy.get_include : function that returns the numpy include directory
+
+    """
+    return os.path.join(os.path.dirname(__file__), 'src')
+
+
 if sys.version_info[:2] >= (3, 7):
     # module level getattr is only supported in 3.7 onwards
     # https://www.python.org/dev/peps/pep-0562/
index a1b772069a0b6caf4281bb2f064259afdbcda6f2..b91499e4adb3562ffabdda19a1fb74855194a36c 100644 (file)
@@ -25,23 +25,31 @@ class TestIntentInOut(util.F2PyTest):
         x = np.arange(3, dtype=np.float32)
         self.module.foo(x)
         assert_equal(x, [3, 1, 2])
+
 
 class TestNumpyVersionAttribute(util.F2PyTest):
     # Check that th attribute __f2py_numpy_version__ is present
     # in the compiled module and that has the value np.__version__.
     sources = [_path('src', 'regression', 'inout.f90')]
-    
+
     @pytest.mark.slow
     def test_numpy_version_attribute(self):
-        
+
         # Check that self.module has an attribute named "__f2py_numpy_version__"
-        assert_(hasattr(self.module, "__f2py_numpy_version__"), 
+        assert_(hasattr(self.module, "__f2py_numpy_version__"),
                 msg="Fortran module does not have __f2py_numpy_version__")
-        
+
         # Check that the attribute __f2py_numpy_version__ is a string
         assert_(isinstance(self.module.__f2py_numpy_version__, str),
                 msg="__f2py_numpy_version__ is not a string")
-        
+
         # Check that __f2py_numpy_version__ has the value numpy.__version__
         assert_string_equal(np.__version__, self.module.__f2py_numpy_version__)
+
+
+def test_include_path():
+    incdir = np.f2py.get_include()
+    fnames_in_dir = os.listdir(incdir)
+    for fname in ('fortranobject.c', 'fortranobject.h'):
+        assert fname in fnames_in_dir
+
index 7600e17be88b615c611c9e481902ee1f2c5c90f0..bd56b697566976cf4a137660759cd1784ba34f83 100644 (file)
@@ -339,7 +339,9 @@ def _unique1d(ar, return_index=False, return_inverse=False,
             aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left')
         else:
             aux_firstnan = np.searchsorted(aux, aux[-1], side='left')
-        mask[1:aux_firstnan] = (aux[1:aux_firstnan] != aux[:aux_firstnan - 1])
+        if aux_firstnan > 0:
+            mask[1:aux_firstnan] = (
+                aux[1:aux_firstnan] != aux[:aux_firstnan - 1])
         mask[aux_firstnan] = True
         mask[aux_firstnan + 1:] = False
     else:
index d62da9efba0bd7cc50fe7d0c8b7ba366c9125edf..13385cd2409d7d1615c74d8114211a4907b50eac 100644 (file)
@@ -610,6 +610,17 @@ class TestUnique:
         assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
         assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
 
+        # test for gh-19300
+        all_nans = [np.nan] * 4
+        ua = [np.nan]
+        ua_idx = [0]
+        ua_inv = [0, 0, 0, 0]
+        ua_cnt = [4]
+        assert_equal(np.unique(all_nans), ua)
+        assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx))
+        assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv))
+        assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt))
+
     def test_unique_axis_errors(self):
         assert_raises(TypeError, self._run_axis_tests, object)
         assert_raises(TypeError, self._run_axis_tests,
index 4f404b7a11e304441d9b8717ff79f8290bc67dc1..9f2e8c3ca117e9179a2b371fbbe995f746eefecb 100644 (file)
@@ -39,7 +39,7 @@ cdef extern from "include/aligned_malloc.h":
     cdef void *PyArray_calloc_aligned(size_t n, size_t s)
     cdef void PyArray_free_aligned(void *p)
 
-ctypedef double (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil
+ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil
 ctypedef double (*random_double_0)(void *state) nogil
 ctypedef double (*random_double_1)(void *state, double a) nogil
 ctypedef double (*random_double_2)(void *state, double a, double b) nogil
index cd0b248723d23ba68323d9ca80c6125386595686..e2430d139a42b1c3d3cc956193cd68828b81c594 100644 (file)
@@ -585,7 +585,7 @@ cdef class Generator:
         Examples
         --------
         >>> np.random.default_rng().bytes(10)
-        b'\xfeC\x9b\x86\x17\xf2\xa1\xafcp' # random
+        b'\\xfeC\\x9b\\x86\\x17\\xf2\\xa1\\xafcp' # random
 
         """
         cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
index 863879a0465f00dce4411c11e20aa0660d8d2bca..4f5862faa1d993c4a006c442f4baf0b2601f56bd 100644 (file)
@@ -795,7 +795,7 @@ cdef class RandomState:
         Examples
         --------
         >>> np.random.bytes(10)
-        ' eh\\x85\\x022SZ\\xbf\\xa4' #random
+        b' eh\\x85\\x022SZ\\xbf\\xa4' #random
         """
         cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
         # Interpret the uint32s as little-endian to convert them to bytes
index 1bfdf07ae74e4b9cf498f8c2157f19812e393831..8e758b26cb6fbd04ea8c89798297bac837fccbd2 100644 (file)
@@ -161,7 +161,7 @@ API
 # NOTE: The API section will be appended with additional entries
 # further down in this file
 
-from typing import TYPE_CHECKING, List
+from typing import TYPE_CHECKING, List, Any
 
 if TYPE_CHECKING:
     import sys
@@ -364,14 +364,14 @@ if TYPE_CHECKING:
         _GUFunc_Nin2_Nout1,
     )
 else:
-    _UFunc_Nin1_Nout1 = NotImplemented
-    _UFunc_Nin2_Nout1 = NotImplemented
-    _UFunc_Nin1_Nout2 = NotImplemented
-    _UFunc_Nin2_Nout2 = NotImplemented
-    _GUFunc_Nin2_Nout1 = NotImplemented
+    _UFunc_Nin1_Nout1 = Any
+    _UFunc_Nin2_Nout1 = Any
+    _UFunc_Nin1_Nout2 = Any
+    _UFunc_Nin2_Nout2 = Any
+    _GUFunc_Nin2_Nout1 = Any
 
 # Clean up the namespace
-del TYPE_CHECKING, final, List
+del TYPE_CHECKING, final, List, Any
 
 if __doc__ is not None:
     from ._add_docstring import _docstrings
index 9f57b22956cc85e98e41028fc3aee90a4bc51d32..cfd9aacb4927a0749bba949a99b21767b72d58e8 100644 (file)
@@ -77,7 +77,7 @@ _ArrayLike = Union[
 ArrayLike = Union[
     _RecursiveSequence,
     _ArrayLike[
-        "dtype[Any]",
+        dtype,
         Union[bool, int, float, complex, str, bytes]
     ],
 ]
index a41e2f358d97f105a78fe615a98ae07ac5f9a043..636e2209b45f62cb811abd03629dbb34725ade43 100644 (file)
@@ -1,5 +1,15 @@
 import sys
-from typing import Any, List, Sequence, Tuple, Union, Type, TypeVar, TYPE_CHECKING
+from typing import (
+    Any,
+    List,
+    Sequence,
+    Tuple,
+    Union,
+    Type,
+    TypeVar,
+    Generic,
+    TYPE_CHECKING,
+)
 
 import numpy as np
 from ._shape import _ShapeLike
@@ -81,7 +91,9 @@ if TYPE_CHECKING or HAVE_PROTOCOL:
 
 else:
     _DTypeDict = Any
-    _SupportsDType = Any
+
+    class _SupportsDType(Generic[_DType_co]):
+        pass
 
 
 # Would create a dtype[np.void]
@@ -112,7 +124,7 @@ DTypeLike = Union[
     # array-scalar types and generic types
     type,  # TODO: enumerate these when we add type hints for numpy scalars
     # anything with a dtype attribute
-    "_SupportsDType[np.dtype[Any]]",
+    _SupportsDType[np.dtype],
     # character codes, type strings or comma-separated fields, e.g., 'float64'
     str,
     _VoidDTypeLike,
index 3f1ce2038282f119ec2d9fd4779c5842bb32efe3..bad20b048e9f07afd7f3b694311dcaa983c0e869 100644 (file)
@@ -4,7 +4,7 @@ The subclasses are defined here (instead of ``__init__.pyi``) such
 that they can be imported conditionally via the numpy's mypy plugin.
 """
 
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Any
 
 import numpy as np
 from . import (
@@ -28,15 +28,15 @@ if TYPE_CHECKING:
     complex256 = np.complexfloating[_128Bit, _128Bit]
     complex512 = np.complexfloating[_256Bit, _256Bit]
 else:
-    uint128 = NotImplemented
-    uint256 = NotImplemented
-    int128 = NotImplemented
-    int256 = NotImplemented
-    float80 = NotImplemented
-    float96 = NotImplemented
-    float128 = NotImplemented
-    float256 = NotImplemented
-    complex160 = NotImplemented
-    complex192 = NotImplemented
-    complex256 = NotImplemented
-    complex512 = NotImplemented
+    uint128 = Any
+    uint256 = Any
+    int128 = Any
+    int256 = Any
+    float80 = Any
+    float96 = Any
+    float128 = Any
+    float256 = Any
+    complex160 = Any
+    complex192 = Any
+    complex256 = Any
+    complex512 = Any
index b720c3ffc19246df3876428cbc2ef22905c98172..cac121026407b5307fe960488d2317ee790e13e0 100644 (file)
@@ -1,5 +1,5 @@
 import sys
-from typing import Sequence, Tuple, Union
+from typing import Sequence, Tuple, Union, Any
 
 if sys.version_info >= (3, 8):
     from typing import SupportsIndex
@@ -7,7 +7,7 @@ else:
     try:
         from typing_extensions import SupportsIndex
     except ImportError:
-        SupportsIndex = NotImplemented
+        SupportsIndex = Any
 
 _Shape = Tuple[int, ...]
 
index 7d419a1d1e5f10dec90c791c34d87d29caabb308..0f3810f3c014aafac0e149cfc6da0ec38c61f165 100644 (file)
@@ -18,5 +18,3 @@ np.dtype(  # E: No overload variant of "dtype" matches
         "field2": (int, 3),
     }
 )
-
-np.dtype[np.float64](np.int64)  # E: Argument 1 to "dtype" has incompatible type
index 0aeff398fc879370d9f6ca77063501e5777371e1..099418e67a81ab0c7a723d18322d2160a092920a 100644 (file)
@@ -1,7 +1,9 @@
+import sys
 import numpy as np
 
 f2: np.float16
 f8: np.float64
+c8: np.complex64
 
 # Construction
 
@@ -80,3 +82,13 @@ def func(a: np.float32) -> None: ...
 
 func(f2)  # E: incompatible type
 func(f8)  # E: incompatible type
+
+round(c8)  # E: No overload variant
+
+c8.__getnewargs__()  # E: Invalid self argument
+f2.__getnewargs__()  # E: Invalid self argument
+f2.is_integer()  # E: Invalid self argument
+f2.hex()  # E: Invalid self argument
+np.float16.fromhex("0x0.0p+0")  # E: Invalid self argument
+f2.__trunc__()  # E: Invalid self argument
+f2.__getformat__("float")  # E: Invalid self argument
index 215d89ead66c97fdcbbdf3afeeb40bf44c8141b3..299fed30ab486fa7d3a647c078588edfe5a07a63 100644 (file)
@@ -1,3 +1,4 @@
+import ctypes as ct
 import numpy as np
 
 dtype_obj: np.dtype[np.str_]
@@ -22,6 +23,15 @@ reveal_type(np.dtype(int))  # E: numpy.dtype[{int_}]
 reveal_type(np.dtype(bool))  # E: numpy.dtype[numpy.bool_]
 reveal_type(np.dtype(str))  # E: numpy.dtype[numpy.str_]
 reveal_type(np.dtype(bytes))  # E: numpy.dtype[numpy.bytes_]
+reveal_type(np.dtype(object))  # E: numpy.dtype[numpy.object_]
+
+# ctypes
+reveal_type(np.dtype(ct.c_double))  # E: numpy.dtype[{double}]
+reveal_type(np.dtype(ct.c_longlong))  # E: numpy.dtype[{longlong}]
+reveal_type(np.dtype(ct.c_uint32))  # E: numpy.dtype[{uint32}]
+reveal_type(np.dtype(ct.c_bool))  # E: numpy.dtype[numpy.bool_]
+reveal_type(np.dtype(ct.c_char))  # E: numpy.dtype[numpy.bytes_]
+reveal_type(np.dtype(ct.py_object))  # E: numpy.dtype[numpy.object_]
 
 # Special case for None
 reveal_type(np.dtype(None))  # E: numpy.dtype[{double}]
index d98388422e07f5b3ee9fe9591b586f5041a4ecfd..8d1181f84607845c3c0ed0b3742315ea6d020b70 100644 (file)
@@ -1,3 +1,4 @@
+import sys
 import numpy as np
 
 b: np.bool_
@@ -6,6 +7,7 @@ i8: np.int64
 f8: np.float64
 c8: np.complex64
 c16: np.complex128
+m: np.timedelta64
 U: np.str_
 S: np.bytes_
 
@@ -114,3 +116,31 @@ reveal_type(f8.reshape(1))  # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
 reveal_type(c16.reshape(1))  # E: numpy.ndarray[Any, numpy.dtype[{complex128}]]
 reveal_type(U.reshape(1))  # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
 reveal_type(S.reshape(1))  # E: numpy.ndarray[Any, numpy.dtype[numpy.bytes_]]
+
+reveal_type(f8.as_integer_ratio())  # E: Tuple[builtins.int, builtins.int]
+reveal_type(f8.is_integer())  # E: bool
+reveal_type(f8.__trunc__())  # E: int
+reveal_type(f8.__getformat__("float"))  # E: str
+reveal_type(f8.hex())  # E: str
+reveal_type(np.float64.fromhex("0x0.0p+0"))  # E: {float64}
+
+reveal_type(f8.__getnewargs__())  # E: Tuple[builtins.float]
+reveal_type(c16.__getnewargs__())  # E: Tuple[builtins.float, builtins.float]
+
+reveal_type(i8.numerator)  # E: {int64}
+reveal_type(i8.denominator)  # E: Literal[1]
+reveal_type(u8.numerator)  # E: {uint64}
+reveal_type(u8.denominator)  # E: Literal[1]
+reveal_type(m.numerator)  # E: numpy.timedelta64
+reveal_type(m.denominator)  # E: Literal[1]
+
+reveal_type(round(i8))  # E: int
+reveal_type(round(i8, 3))  # E: {int64}
+reveal_type(round(u8))  # E: int
+reveal_type(round(u8, 3))  # E: {uint64}
+reveal_type(round(f8))  # E: int
+reveal_type(round(f8, 3))  # E: {float64}
+
+if sys.version_info >= (3, 9):
+    reveal_type(f8.__ceil__())  # E: int
+    reveal_type(f8.__floor__())  # E: int
index 0b99174392f039b0dc77c016c19d408343a123f9..5f0ac915352cf21d5999c8ccb952daa5126b57fd 100644 (file)
@@ -21,8 +21,8 @@ if sys.version_info >= (3, 9):
     NDArray_ref = types.GenericAlias(np.ndarray, (Any, DType_ref))
     FuncType = Callable[[Union[_GenericAlias, types.GenericAlias]], Any]
 else:
-    DType_ref = NotImplemented
-    NDArray_ref = NotImplemented
+    DType_ref = Any
+    NDArray_ref = Any
     FuncType = Callable[[_GenericAlias], Any]
 
 GETATTR_NAMES = sorted(set(dir(np.ndarray)) - _GenericAlias._ATTR_EXCEPTIONS)
@@ -60,7 +60,6 @@ class TestGenericAlias:
         ("__call__", lambda n: n(shape=(1,), dtype=np.int64, buffer=BUFFER)),
         ("subclassing", lambda n: _get_subclass_mro(n)),
         ("pickle", lambda n: n == pickle.loads(pickle.dumps(n))),
-        ("__weakref__", lambda n: n == weakref.ref(n)()),
     ])
     def test_pass(self, name: str, func: FuncType) -> None:
         """Compare `types.GenericAlias` with its numpy-based backport.
@@ -75,6 +74,14 @@ class TestGenericAlias:
             value_ref = func(NDArray_ref)
             assert value == value_ref
 
+    def test_weakref(self) -> None:
+        """Test ``__weakref__``."""
+        value = weakref.ref(NDArray)()
+
+        if sys.version_info >= (3, 9, 1):  # xref bpo-42332
+            value_ref = weakref.ref(NDArray_ref)()
+            assert value == value_ref
+
     @pytest.mark.parametrize("name", GETATTR_NAMES)
     def test_getattr(self, name: str) -> None:
         """Test that `getattr` wraps around the underlying type,
diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py
new file mode 100644 (file)
index 0000000..e82b08a
--- /dev/null
@@ -0,0 +1,90 @@
+"""Test the runtime usage of `numpy.typing`."""
+
+from __future__ import annotations
+
+import sys
+from typing import get_type_hints, Union, Tuple, NamedTuple
+
+import pytest
+import numpy as np
+import numpy.typing as npt
+
+try:
+    from typing_extensions import get_args, get_origin
+    SKIP = False
+except ImportError:
+    SKIP = True
+
+
+class TypeTup(NamedTuple):
+    typ: type
+    args: Tuple[type, ...]
+    origin: None | type
+
+
+if sys.version_info >= (3, 9):
+    NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray)
+else:
+    NDArrayTup = TypeTup(npt.NDArray, (), None)
+
+TYPES = {
+    "ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union),
+    "DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union),
+    "NBitBase": TypeTup(npt.NBitBase, (), None),
+    "NDArray": NDArrayTup,
+}
+
+
+@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
+@pytest.mark.skipif(SKIP, reason="requires typing-extensions")
+def test_get_args(name: type, tup: TypeTup) -> None:
+    """Test `typing.get_args`."""
+    typ, ref = tup.typ, tup.args
+    out = get_args(typ)
+    assert out == ref
+
+
+@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
+@pytest.mark.skipif(SKIP, reason="requires typing-extensions")
+def test_get_origin(name: type, tup: TypeTup) -> None:
+    """Test `typing.get_origin`."""
+    typ, ref = tup.typ, tup.origin
+    out = get_origin(typ)
+    assert out == ref
+
+
+@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
+def test_get_type_hints(name: type, tup: TypeTup) -> None:
+    """Test `typing.get_type_hints`."""
+    typ = tup.typ
+
+    # Explicitly set `__annotations__` in order to circumvent the
+    # stringification performed by `from __future__ import annotations`
+    def func(a): pass
+    func.__annotations__ = {"a": typ, "return": None}
+
+    out = get_type_hints(func)
+    ref = {"a": typ, "return": type(None)}
+    assert out == ref
+
+
+@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
+def test_get_type_hints_str(name: type, tup: TypeTup) -> None:
+    """Test `typing.get_type_hints` with string-representation of types."""
+    typ_str, typ = f"npt.{name}", tup.typ
+
+    # Explicitly set `__annotations__` in order to circumvent the
+    # stringification performed by `from __future__ import annotations`
+    def func(a): pass
+    func.__annotations__ = {"a": typ_str, "return": None}
+
+    out = get_type_hints(func)
+    ref = {"a": typ, "return": type(None)}
+    assert out == ref
+
+
+def test_keys() -> None:
+    """Test that ``TYPES.keys()`` and ``numpy.typing.__all__`` are synced."""
+    keys = TYPES.keys()
+    ref = set(npt.__all__)
+    assert keys == ref
index 66c2cf953eafcbc7e1188fdc244b97ac347083de..c7ee86839e4fe69c7f711c1e446423b902a0e926 100644 (file)
@@ -38,7 +38,7 @@ from paver.easy import Bunch, options, task, sh
 #-----------------------------------
 
 # Path to the release notes
-RELEASE_NOTES = 'doc/source/release/1.21.0-notes.rst'
+RELEASE_NOTES = 'doc/source/release/1.21.1-notes.rst'
 
 
 #-------------------------------------------------------
index d11ad173befac31aba4cb63285c5cb58ebda4839..8509326f0d74a63a9735893f17c24b014967f3e5 100644 (file)
@@ -13,8 +13,8 @@ from tempfile import mkstemp, gettempdir
 from urllib.request import urlopen, Request
 from urllib.error import HTTPError
 
-OPENBLAS_V = '0.3.13'
-OPENBLAS_LONG = 'v0.3.13-62-gaf2b0d02'
+OPENBLAS_V = '0.3.17'
+OPENBLAS_LONG = 'v0.3.17'
 BASE_LOC = 'https://anaconda.org/multibuild-wheels-staging/openblas-libs'
 BASEURL = f'{BASE_LOC}/{OPENBLAS_LONG}/download'
 SUPPORTED_PLATFORMS = [