From: DongHun Kwak Date: Fri, 15 Jul 2022 02:14:54 +0000 (+0900) Subject: Imported Upstream version 1.22.1 X-Git-Tag: upstream/1.22.1^0 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d486c6312696e772b44168bdd9a23a87b1ebf7a3;p=platform%2Fupstream%2Fpython3-numpy.git Imported Upstream version 1.22.1 --- diff --git a/LICENSE.txt b/LICENSE.txt index 3dd3d252..23932bef 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2005-2021, NumPy Developers. +Copyright (c) 2005-2022, NumPy Developers. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/PKG-INFO b/PKG-INFO index 255e370f..972747ac 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: numpy -Version: 1.22.0 +Version: 1.22.1 Summary: NumPy is the fundamental package for array computing with Python. Home-page: https://www.numpy.org Author: Travis E. Oliphant et al. diff --git a/doc/RELEASE_WALKTHROUGH.rst.txt b/doc/RELEASE_WALKTHROUGH.rst.txt index 42d84e04..9324ce97 100644 --- a/doc/RELEASE_WALKTHROUGH.rst.txt +++ b/doc/RELEASE_WALKTHROUGH.rst.txt @@ -1,11 +1,25 @@ This file contains a walkthrough of the NumPy 1.21.0 release on Linux, modified -for building on azure and uploading to anaconda.org -The commands can be copied into the command line, but be sure to -replace 1.21.0 by the correct version. - +for building on azure and uploading to anaconda.org The commands can be copied +into the command line, but be sure to replace 1.21.0 by the correct version. This should be read together with the general directions in `releasing`. +Facility Preparation +==================== + +Before beginning to make a release, use the ``*_requirements.txt`` files to +ensure that you have the needed software. Most software can be installed with +pip, but some will require apt-get, dnf, or whatever your system uses for +software. Note that at this time the documentation cannot be built with Python +3.10, for that use 3.8-3.9 instead. You will also need a GitHub personal access +token (PAT) to push the documention. There are a few ways to streamline things. + +- Git can be set up to use a keyring to store your GitHub personal access token. + Search online for the details. +- You can use the ``keyring`` app to store the PyPI password for twine. See the + online twine documentation for details. + + Release Preparation =================== @@ -168,8 +182,8 @@ file is updated for continued development:: $ paver write_release -Reset the maintenance branch into a development state ------------------------------------------------------ +Reset the maintenance branch into a development state (skip for prereleases) +---------------------------------------------------------------------------- Create release notes for next release and edit them to set the version. These notes will be a skeleton and have little content:: @@ -228,60 +242,74 @@ may take several tries to get it look right. Then - Hit the ``{Publish,Update} release`` button at the bottom. -Upload documents to numpy.org ------------------------------ +Upload documents to numpy.org (skip for prereleases) +---------------------------------------------------- + +.. note:: You will need a GitHub personal access token to push the update. This step is only needed for final releases and can be skipped for pre-releases and most patch releases. ``make merge-doc`` clones the ``numpy/doc`` repo into -``doc/build/merge`` and updates it with the new documentation:: -Note that if you have a `.local` numpy install, you should either remove it or -install the current version for the docs to pick up the correct NumPy version. +``doc/build/merge`` and updates it with the new documentation. If you already +have a numpy installed, you need to locally install the new NumPy version so +that document generation will use the correct NumPy. This is because ``make +dist`` does not correctly set up the path. Note that Python 3.10 cannot be used +for generating the docs as it has no ``easy_install``, use 3.9 or 3.8 instead:: $ pushd doc $ make dist $ make merge-doc - $ popd + $ pushd build/merge If the release series is a new one, you will need to add a new section to the ``doc/build/merge/index.html`` front page just after the "insert here" comment:: - $ gvim doc/build/merge/index.html +/'insert here' + $ gvim index.html +/'insert here' Otherwise, only the ``zip`` and ``pdf`` links should be updated with the new tag name:: - $ gvim doc/build/merge/index.html +/'tag v1.21' + $ gvim index.html +/'tag v1.21' You can "test run" the new documentation in a browser to make sure the links work:: - $ firefox doc/build/merge/index.html + $ firefox index.html # or google-chrome, etc. -Update the stable link:: +Update the stable link and update:: $ ln -sfn 1.21 stable + $ ls -l # check the link -Once everything seems satisfactory, commit and upload the changes:: +Once everything seems satisfactory, update, commit and upload the changes:: - $ pushd doc/build/merge + $ python3 update.py $ git commit -a -m"Add documentation for v1.21.0" $ git push $ popd + $ popd -Announce the release on scipy.org ---------------------------------- +Announce the release on numpy.org (skip for prereleases) +-------------------------------------------------------- -This assumes that you have forked ``_:: +This assumes that you have forked ``_:: - $ cd ../scipy.org + $ cd ../numpy.org $ git checkout master $ git pull upstream master - $ git checkout -b numpy-1.21.0 - $ gvim www/index.rst # edit the News section - $ git commit -a + $ git checkout -b announce-numpy-1.21.0 + $ gvim content/en/news.md + +- For all releases, go to the bottom of the page and add a one line link. Look + to the previous links for example. +- For the ``*.0`` release in a cycle, add a new section at the top with a short + description of the new features and point the news link to it. + +commit and push:: + + $ git commit -a -m"announce the NumPy 1.21.0 release" $ git push origin HEAD -Now go to your fork and make a pull request for the branch. +Go to your Github fork and make a pull request. Announce to mailing lists ------------------------- @@ -293,8 +321,8 @@ as generated for the release notes above. If you crosspost, make sure that python-announce-list is BCC so that replies will not be sent to that list. -Post-Release Tasks ------------------- +Post-Release Tasks (skip for prereleases) +----------------------------------------- Checkout main and forward port the documentation changes:: diff --git a/doc/changelog/1.22.1-changelog.rst b/doc/changelog/1.22.1-changelog.rst new file mode 100644 index 00000000..3b401c1d --- /dev/null +++ b/doc/changelog/1.22.1-changelog.rst @@ -0,0 +1,47 @@ + +Contributors +============ + +A total of 14 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Arryan Singh +* Bas van Beek +* Charles Harris +* Denis Laxalde +* Isuru Fernando +* Kevin Sheppard +* Matthew Barber +* Matti Picus +* Melissa Weber Mendonça +* Mukulika Pahari +* Omid Rajaei + +* Pearu Peterson +* Ralf Gommers +* Sebastian Berg + +Pull requests merged +==================== + +A total of 20 pull requests were merged for this release. + +* `#20702 `__: MAINT, DOC: Post 1.22.0 release fixes. +* `#20703 `__: DOC, BUG: Use pngs instead of svgs. +* `#20704 `__: DOC:Fixed the link on user-guide landing page +* `#20714 `__: BUG: Restore vc141 support +* `#20724 `__: BUG: Fix array dimensions solver for multidimensional arguments... +* `#20725 `__: TYP: change type annotation for `__array_namespace__` to ModuleType +* `#20726 `__: TYP, MAINT: Allow `ndindex` to accept integer tuples +* `#20757 `__: BUG: Relax dtype identity check in reductions +* `#20763 `__: TYP: Allow time manipulation functions to accept `date` and `timedelta`... +* `#20768 `__: TYP: Relax the type of `ndarray.__array_finalize__` +* `#20795 `__: MAINT: Raise RuntimeError if setuptools version is too recent. +* `#20796 `__: BUG, DOC: Fixes SciPy docs build warnings +* `#20797 `__: DOC: fix OpenBLAS version in release note +* `#20798 `__: PERF: Optimize array check for bounded 0,1 values +* `#20805 `__: BUG: Fix that reduce-likes honor out always (and live in the... +* `#20806 `__: BUG: ``array_api.argsort(descending=True)`` respects relative... +* `#20807 `__: BUG: Allow integer inputs for pow-related functions in `array_api` +* `#20814 `__: DOC: Refer to NumPy, not pandas, in main page +* `#20815 `__: DOC: Update Copyright to 2022 [License] +* `#20819 `__: BUG: Return correctly shaped inverse indices in array_api set... diff --git a/doc/source/conf.py b/doc/source/conf.py index cd5aadd8..d1abc6fa 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -108,7 +108,7 @@ master_doc = 'index' # General substitutions. project = 'NumPy' -copyright = '2008-2021, The NumPy community' +copyright = '2008-2022, NumPy Developers' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. diff --git a/doc/source/index.rst b/doc/source/index.rst index 7fa7d8aa..a753a21f 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -58,7 +58,7 @@ basic statistical operations, random simulation and much more. ^^^^^^^^^^ The user guide provides in-depth information on the - key concepts of pandas with useful background information and explanation. + key concepts of NumPy with useful background information and explanation. .. link-button:: user :type: ref diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index a18211cc..0ee51f61 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -1,7 +1,7 @@ -.. _reference: - .. module:: numpy +.. _reference: + ############### NumPy Reference ############### diff --git a/doc/source/release.rst b/doc/source/release.rst index a4a5bde6..e90521be 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 3 + 1.22.1 1.22.0 1.21.4 1.21.3 diff --git a/doc/source/release/1.22.0-notes.rst b/doc/source/release/1.22.0-notes.rst index 08c74d99..991b4119 100644 --- a/doc/source/release/1.22.0-notes.rst +++ b/doc/source/release/1.22.0-notes.rst @@ -450,9 +450,9 @@ double precision functions respectively. (`gh-19478 `__) -OpenBLAS v0.3.17 +OpenBLAS v0.3.18 ---------------- -Update the OpenBLAS used in testing and in wheels to v0.3.17 +Update the OpenBLAS used in testing and in wheels to v0.3.18 -(`gh-19462 `__) +(`gh-20058 `__) diff --git a/doc/source/release/1.22.1-notes.rst b/doc/source/release/1.22.1-notes.rst new file mode 100644 index 00000000..e494bdef --- /dev/null +++ b/doc/source/release/1.22.1-notes.rst @@ -0,0 +1,63 @@ +.. currentmodule:: numpy + +========================== +NumPy 1.22.1 Release Notes +========================== + +The NumPy 1.22.1 is maintenance release that fixes bugs discovered after the +1.22.0 release. Notable fixes are: + +- Fix f2PY docstring problems (SciPy) +- Fix reduction type problems (AstroPy) +- Fix various typing bugs. + +The Python versions supported for this release are 3.8-3.10. + + +Contributors +============ + +A total of 14 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Arryan Singh +* Bas van Beek +* Charles Harris +* Denis Laxalde +* Isuru Fernando +* Kevin Sheppard +* Matthew Barber +* Matti Picus +* Melissa Weber Mendonça +* Mukulika Pahari +* Omid Rajaei + +* Pearu Peterson +* Ralf Gommers +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 20 pull requests were merged for this release. + +* `#20702 `__: MAINT, DOC: Post 1.22.0 release fixes. +* `#20703 `__: DOC, BUG: Use pngs instead of svgs. +* `#20704 `__: DOC: Fixed the link on user-guide landing page +* `#20714 `__: BUG: Restore vc141 support +* `#20724 `__: BUG: Fix array dimensions solver for multidimensional arguments... +* `#20725 `__: TYP: change type annotation for ``__array_namespace__`` to ModuleType +* `#20726 `__: TYP, MAINT: Allow ``ndindex`` to accept integer tuples +* `#20757 `__: BUG: Relax dtype identity check in reductions +* `#20763 `__: TYP: Allow time manipulation functions to accept ``date`` and ``timedelta``... +* `#20768 `__: TYP: Relax the type of ``ndarray.__array_finalize__`` +* `#20795 `__: MAINT: Raise RuntimeError if setuptools version is too recent. +* `#20796 `__: BUG, DOC: Fixes SciPy docs build warnings +* `#20797 `__: DOC: fix OpenBLAS version in release note +* `#20798 `__: PERF: Optimize array check for bounded 0,1 values +* `#20805 `__: BUG: Fix that reduce-likes honor out always (and live in the... +* `#20806 `__: BUG: ``array_api.argsort(descending=True)`` respects relative... +* `#20807 `__: BUG: Allow integer inputs for pow-related functions in ``array_api`` +* `#20814 `__: DOC: Refer to NumPy, not pandas, in main page +* `#20815 `__: DOC: Update Copyright to 2022 [License] +* `#20819 `__: BUG: Return correctly shaped inverse indices in array_api set... diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst index ca299085..5bea8e76 100644 --- a/doc/source/user/basics.broadcasting.rst +++ b/doc/source/user/basics.broadcasting.rst @@ -46,7 +46,7 @@ only conceptual. NumPy is smart enough to use the original scalar value without actually making copies so that broadcasting operations are as memory and computationally efficient as possible. -.. figure:: broadcasting_1.svg +.. figure:: broadcasting_1.png :alt: A scalar is broadcast to match the shape of the 1-d array it is being multiplied to. :name: broadcasting.figure-1 @@ -177,7 +177,7 @@ As shown in :ref:`broadcasting.figure-2`, ``b`` is added to each row of ``a``. In :ref:`broadcasting.figure-3`, an exception is raised because of the incompatible shapes. -.. figure:: broadcasting_2.svg +.. figure:: broadcasting_2.png :alt: A 1-d array with shape (3) is strectched to match the 2-d array of shape (4, 3) it is being added to, and the result is a 2-d array of shape (4, 3). @@ -189,7 +189,7 @@ incompatible shapes. broadcasting if number of 1-d array elements matches the number of 2-d array columns.* -.. figure:: broadcasting_3.svg +.. figure:: broadcasting_3.png :alt: A huge cross over the 2-d array of shape (4, 3) and the 1-d array of shape (4) shows that they can not be broadcast due to mismatch of shapes and thus produce no result. @@ -213,7 +213,7 @@ outer addition operation of two 1-d arrays:: [ 21., 22., 23.], [ 31., 32., 33.]]) -.. figure:: broadcasting_4.svg +.. figure:: broadcasting_4.png :alt: A 2-d array of shape (4, 1) and a 1-d array of shape (3) are stretched to match their shapes and produce a resultant array of shape (4, 3). @@ -261,7 +261,7 @@ the shape of the ``codes`` array:: Codes (2d array): 4 x 2 Diff (2d array): 4 x 2 -.. figure:: broadcasting_5.svg +.. figure:: broadcasting_5.png :alt: A height versus weight graph that shows data of a female gymnast, marathon runner, basketball player, football lineman and the athlete to be classified. Shortest distance diff --git a/doc/source/user/broadcasting_1.png b/doc/source/user/broadcasting_1.png new file mode 100644 index 00000000..40698f95 Binary files /dev/null and b/doc/source/user/broadcasting_1.png differ diff --git a/doc/source/user/broadcasting_2.png b/doc/source/user/broadcasting_2.png new file mode 100644 index 00000000..be71afb0 Binary files /dev/null and b/doc/source/user/broadcasting_2.png differ diff --git a/doc/source/user/broadcasting_3.png b/doc/source/user/broadcasting_3.png new file mode 100644 index 00000000..806e6f96 Binary files /dev/null and b/doc/source/user/broadcasting_3.png differ diff --git a/doc/source/user/broadcasting_4.png b/doc/source/user/broadcasting_4.png new file mode 100644 index 00000000..23c4c161 Binary files /dev/null and b/doc/source/user/broadcasting_4.png differ diff --git a/doc/source/user/broadcasting_5.png b/doc/source/user/broadcasting_5.png new file mode 100644 index 00000000..d4218ba2 Binary files /dev/null and b/doc/source/user/broadcasting_5.png differ diff --git a/doc_requirements.txt b/doc_requirements.txt index c849efb2..7f284d44 100644 --- a/doc_requirements.txt +++ b/doc_requirements.txt @@ -1,9 +1,10 @@ +# doxygen required, use apt-get or dnf sphinx==4.2.0 numpydoc==1.1.0 +pydata-sphinx-theme==0.7.2 +sphinx-panels ipython scipy matplotlib pandas -pydata-sphinx-theme breathe -sphinx-panels diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index eb1e81c6..01cd0c87 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1502,8 +1502,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): kwargs: Mapping[str, Any], ) -> Any: ... - @property - def __array_finalize__(self) -> None: ... + __array_finalize__: Any def __array_wrap__( self, @@ -3321,6 +3320,9 @@ class ndenumerate(Generic[_ScalarType]): def __iter__(self: _T) -> _T: ... class ndindex: + @overload + def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... + @overload def __init__(self, *shape: SupportsIndex) -> None: ... def __iter__(self: _T) -> _T: ... def __next__(self) -> _Shape: ... @@ -3371,7 +3373,7 @@ class busdaycalendar: def __new__( cls, weekmask: ArrayLike = ..., - holidays: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ..., ) -> busdaycalendar: ... @property def weekmask(self) -> NDArray[bool_]: ... diff --git a/numpy/_version.py b/numpy/_version.py index 1f442cce..43d7041f 100644 --- a/numpy/_version.py +++ b/numpy/_version.py @@ -8,11 +8,11 @@ import json version_json = ''' { - "date": "2021-12-30T14:37:35-0700", + "date": "2022-01-13T16:11:04-0700", "dirty": false, "error": null, - "full-revisionid": "4adc87dff15a247e417d50f10cc4def8e1c17a03", - "version": "1.22.0" + "full-revisionid": "7ce4118531b585b5d8f0380c6b896ae22d93bd96", + "version": "1.22.1" } ''' # END VERSION_JSON diff --git a/numpy/array_api/_array_object.py b/numpy/array_api/_array_object.py index 75baf34b..00ffe716 100644 --- a/numpy/array_api/_array_object.py +++ b/numpy/array_api/_array_object.py @@ -30,6 +30,7 @@ from ._dtypes import ( ) from typing import TYPE_CHECKING, Optional, Tuple, Union, Any +import types if TYPE_CHECKING: from ._typing import Any, PyCapsule, Device, Dtype @@ -415,7 +416,7 @@ class Array: def __array_namespace__( self: Array, /, *, api_version: Optional[str] = None - ) -> Any: + ) -> types.ModuleType: if api_version is not None and not api_version.startswith("2021."): raise ValueError(f"Unrecognized array API version: {api_version!r}") return array_api @@ -654,15 +655,13 @@ class Array: res = self._array.__pos__() return self.__class__._new(res) - # PEP 484 requires int to be a subtype of float, but __pow__ should not - # accept int. - def __pow__(self: Array, other: Union[float, Array], /) -> Array: + def __pow__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __pow__. """ from ._elementwise_functions import pow - other = self._check_allowed_dtypes(other, "floating-point", "__pow__") + other = self._check_allowed_dtypes(other, "numeric", "__pow__") if other is NotImplemented: return other # Note: NumPy's __pow__ does not follow type promotion rules for 0-d @@ -912,23 +911,23 @@ class Array: res = self._array.__ror__(other._array) return self.__class__._new(res) - def __ipow__(self: Array, other: Union[float, Array], /) -> Array: + def __ipow__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __ipow__. """ - other = self._check_allowed_dtypes(other, "floating-point", "__ipow__") + other = self._check_allowed_dtypes(other, "numeric", "__ipow__") if other is NotImplemented: return other self._array.__ipow__(other._array) return self - def __rpow__(self: Array, other: Union[float, Array], /) -> Array: + def __rpow__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __rpow__. """ from ._elementwise_functions import pow - other = self._check_allowed_dtypes(other, "floating-point", "__rpow__") + other = self._check_allowed_dtypes(other, "numeric", "__rpow__") if other is NotImplemented: return other # Note: NumPy's __pow__ does not follow the spec type promotion rules diff --git a/numpy/array_api/_elementwise_functions.py b/numpy/array_api/_elementwise_functions.py index 4408fe83..c758a094 100644 --- a/numpy/array_api/_elementwise_functions.py +++ b/numpy/array_api/_elementwise_functions.py @@ -591,8 +591,8 @@ def pow(x1: Array, x2: Array, /) -> Array: See its docstring for more information. """ - if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes: - raise TypeError("Only floating-point dtypes are allowed in pow") + if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: + raise TypeError("Only numeric dtypes are allowed in pow") # Call result type here just to raise on disallowed type combinations _result_type(x1.dtype, x2.dtype) x1, x2 = Array._normalize_two_args(x1, x2) diff --git a/numpy/array_api/_set_functions.py b/numpy/array_api/_set_functions.py index 05ee7e55..db9370f8 100644 --- a/numpy/array_api/_set_functions.py +++ b/numpy/array_api/_set_functions.py @@ -41,14 +41,21 @@ def unique_all(x: Array, /) -> UniqueAllResult: See its docstring for more information. """ - res = np.unique( + values, indices, inverse_indices, counts = np.unique( x._array, return_counts=True, return_index=True, return_inverse=True, ) - - return UniqueAllResult(*[Array._new(i) for i in res]) + # np.unique() flattens inverse indices, but they need to share x's shape + # See https://github.com/numpy/numpy/issues/20638 + inverse_indices = inverse_indices.reshape(x.shape) + return UniqueAllResult( + Array._new(values), + Array._new(indices), + Array._new(inverse_indices), + Array._new(counts), + ) def unique_counts(x: Array, /) -> UniqueCountsResult: @@ -68,13 +75,16 @@ def unique_inverse(x: Array, /) -> UniqueInverseResult: See its docstring for more information. """ - res = np.unique( + values, inverse_indices = np.unique( x._array, return_counts=False, return_index=False, return_inverse=True, ) - return UniqueInverseResult(*[Array._new(i) for i in res]) + # np.unique() flattens inverse indices, but they need to share x's shape + # See https://github.com/numpy/numpy/issues/20638 + inverse_indices = inverse_indices.reshape(x.shape) + return UniqueInverseResult(Array._new(values), Array._new(inverse_indices)) def unique_values(x: Array, /) -> Array: diff --git a/numpy/array_api/_sorting_functions.py b/numpy/array_api/_sorting_functions.py index 9cd49786..b2a11872 100644 --- a/numpy/array_api/_sorting_functions.py +++ b/numpy/array_api/_sorting_functions.py @@ -15,9 +15,20 @@ def argsort( """ # Note: this keyword argument is different, and the default is different. kind = "stable" if stable else "quicksort" - res = np.argsort(x._array, axis=axis, kind=kind) - if descending: - res = np.flip(res, axis=axis) + if not descending: + res = np.argsort(x._array, axis=axis, kind=kind) + else: + # As NumPy has no native descending sort, we imitate it here. Note that + # simply flipping the results of np.argsort(x._array, ...) would not + # respect the relative order like it would in native descending sorts. + res = np.flip( + np.argsort(np.flip(x._array, axis=axis), axis=axis, kind=kind), + axis=axis, + ) + # Rely on flip()/argsort() to validate axis + normalised_axis = axis if axis >= 0 else x.ndim + axis + max_i = x.shape[normalised_axis] - 1 + res = max_i - res return Array._new(res) diff --git a/numpy/array_api/tests/test_array_object.py b/numpy/array_api/tests/test_array_object.py index b980bacc..1fe1dfdd 100644 --- a/numpy/array_api/tests/test_array_object.py +++ b/numpy/array_api/tests/test_array_object.py @@ -98,7 +98,7 @@ def test_operators(): "__mul__": "numeric", "__ne__": "all", "__or__": "integer_or_boolean", - "__pow__": "floating", + "__pow__": "numeric", "__rshift__": "integer", "__sub__": "numeric", "__truediv__": "floating", diff --git a/numpy/array_api/tests/test_elementwise_functions.py b/numpy/array_api/tests/test_elementwise_functions.py index a9274aec..b2fb44e7 100644 --- a/numpy/array_api/tests/test_elementwise_functions.py +++ b/numpy/array_api/tests/test_elementwise_functions.py @@ -66,7 +66,7 @@ def test_function_types(): "negative": "numeric", "not_equal": "all", "positive": "numeric", - "pow": "floating-point", + "pow": "numeric", "remainder": "numeric", "round": "numeric", "sign": "numeric", diff --git a/numpy/array_api/tests/test_set_functions.py b/numpy/array_api/tests/test_set_functions.py new file mode 100644 index 00000000..b8eb65d4 --- /dev/null +++ b/numpy/array_api/tests/test_set_functions.py @@ -0,0 +1,19 @@ +import pytest +from hypothesis import given +from hypothesis.extra.array_api import make_strategies_namespace + +from numpy import array_api as xp + +xps = make_strategies_namespace(xp) + + +@pytest.mark.parametrize("func", [xp.unique_all, xp.unique_inverse]) +@given(xps.arrays(dtype=xps.scalar_dtypes(), shape=xps.array_shapes())) +def test_inverse_indices_shape(func, x): + """ + Inverse indices share shape of input array + + See https://github.com/numpy/numpy/issues/20638 + """ + out = func(x) + assert out.inverse_indices.shape == x.shape diff --git a/numpy/array_api/tests/test_sorting_functions.py b/numpy/array_api/tests/test_sorting_functions.py new file mode 100644 index 00000000..9848bbfe --- /dev/null +++ b/numpy/array_api/tests/test_sorting_functions.py @@ -0,0 +1,23 @@ +import pytest + +from numpy import array_api as xp + + +@pytest.mark.parametrize( + "obj, axis, expected", + [ + ([0, 0], -1, [0, 1]), + ([0, 1, 0], -1, [1, 0, 2]), + ([[0, 1], [1, 1]], 0, [[1, 0], [0, 1]]), + ([[0, 1], [1, 1]], 1, [[1, 0], [0, 1]]), + ], +) +def test_stable_desc_argsort(obj, axis, expected): + """ + Indices respect relative order of a descending stable-sort + + See https://github.com/numpy/numpy/issues/20778 + """ + x = xp.asarray(obj) + out = xp.argsort(x, axis=axis, stable=True, descending=True) + assert xp.all(out == xp.asarray(expected)) diff --git a/numpy/core/multiarray.pyi b/numpy/core/multiarray.pyi index a9f68e18..3c1c3710 100644 --- a/numpy/core/multiarray.pyi +++ b/numpy/core/multiarray.pyi @@ -67,6 +67,7 @@ from numpy.typing import ( NDArray, ArrayLike, _SupportsArray, + _NestedSequence, _FiniteNestedSequence, _ArrayLikeBool_co, _ArrayLikeUInt_co, @@ -818,28 +819,28 @@ def datetime_data( @overload def busday_count( # type: ignore[misc] - begindates: _ScalarLike_co, - enddates: _ScalarLike_co, + begindates: _ScalarLike_co | dt.date, + enddates: _ScalarLike_co | dt.date, weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> int_: ... @overload def busday_count( # type: ignore[misc] - begindates: ArrayLike, - enddates: ArrayLike, + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> NDArray[int_]: ... @overload def busday_count( - begindates: ArrayLike, - enddates: ArrayLike, + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: _ArrayType = ..., ) -> _ArrayType: ... @@ -847,100 +848,100 @@ def busday_count( # `roll="raise"` is (more or less?) equivalent to `casting="safe"` @overload def busday_offset( # type: ignore[misc] - dates: datetime64, - offsets: _TD64Like_co, + dates: datetime64 | dt.date, + offsets: _TD64Like_co | dt.timedelta, roll: L["raise"] = ..., weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> datetime64: ... @overload def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64], - offsets: _ArrayLikeTD64_co, + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], roll: L["raise"] = ..., weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> NDArray[datetime64]: ... @overload def busday_offset( # type: ignore[misc] - dates: _ArrayLike[datetime64], - offsets: _ArrayLike[timedelta64], + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], roll: L["raise"] = ..., weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: _ArrayType = ..., ) -> _ArrayType: ... @overload def busday_offset( # type: ignore[misc] - dates: _ScalarLike_co, - offsets: _ScalarLike_co, + dates: _ScalarLike_co | dt.date, + offsets: _ScalarLike_co | dt.timedelta, roll: _RollKind, weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> datetime64: ... @overload def busday_offset( # type: ignore[misc] - dates: ArrayLike, - offsets: ArrayLike, + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], roll: _RollKind, weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> NDArray[datetime64]: ... @overload def busday_offset( - dates: ArrayLike, - offsets: ArrayLike, + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], roll: _RollKind, weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: _ArrayType = ..., ) -> _ArrayType: ... @overload def is_busday( # type: ignore[misc] - dates: _ScalarLike_co, + dates: _ScalarLike_co | dt.date, weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> bool_: ... @overload def is_busday( # type: ignore[misc] - dates: ArrayLike, + dates: ArrayLike | _NestedSequence[dt.date], weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: None = ..., ) -> NDArray[bool_]: ... @overload def is_busday( - dates: ArrayLike, + dates: ArrayLike | _NestedSequence[dt.date], weekmask: ArrayLike = ..., - holidays: None | ArrayLike = ..., + holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ..., busdaycal: None | busdaycalendar = ..., out: _ArrayType = ..., ) -> _ArrayType: ... @overload def datetime_as_string( # type: ignore[misc] - arr: datetime64, + arr: datetime64 | dt.date, unit: None | L["auto"] | _UnitKind = ..., timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., casting: _CastingKind = ..., ) -> str_: ... @overload def datetime_as_string( - arr: _ArrayLikeDT64_co, + arr: _ArrayLikeDT64_co | _NestedSequence[dt.date], unit: None | L["auto"] | _UnitKind = ..., timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., casting: _CastingKind = ..., diff --git a/numpy/core/src/npymath/npy_math_internal.h.src b/numpy/core/src/npymath/npy_math_internal.h.src index 5b418342..15d35637 100644 --- a/numpy/core/src/npymath/npy_math_internal.h.src +++ b/numpy/core/src/npymath/npy_math_internal.h.src @@ -54,6 +54,9 @@ * ==================================================== */ #include "npy_math_private.h" +#ifdef _MSC_VER +# include // for __popcnt +#endif /* Magic binary numbers used by bit_count * For type T, the magic numbers are computed as follows: diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index 78f6f4b5..1772eb25 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -2718,6 +2718,21 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, * is NULL) so we pass `arr` instead in that case. */ PyArrayObject *ops[3] = {out ? out : arr, arr, out}; + + /* + * TODO: This is a dangerous hack, that works by relying on the GIL, it is + * terrible, terrifying, and trusts that nobody does crazy stuff + * in their type-resolvers. + * By mutating the `out` dimension, we ensure that reduce-likes + * live in a future without value-based promotion even when legacy + * promotion has to be used. + */ + npy_bool evil_ndim_mutating_hack = NPY_FALSE; + if (out != NULL && PyArray_NDIM(out) == 0 && PyArray_NDIM(arr) != 0) { + evil_ndim_mutating_hack = NPY_TRUE; + ((PyArrayObject_fields *)out)->nd = 1; + } + /* * TODO: If `out` is not provided, arguably `initial` could define * the first DType (and maybe also the out one), that way @@ -2738,6 +2753,9 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE, NPY_TRUE); + if (evil_ndim_mutating_hack) { + ((PyArrayObject_fields *)out)->nd = 0; + } /* DTypes may currently get filled in fallbacks and XDECREF for error: */ Py_XDECREF(operation_DTypes[0]); Py_XDECREF(operation_DTypes[1]); @@ -2762,9 +2780,13 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, * The first operand and output should be the same array, so they should * be identical. The second argument can be different for reductions, * but is checked to be identical for accumulate and reduceat. + * Ideally, the type-resolver ensures that all are identical, but we do + * not enforce this here strictly. Otherwise correct handling of + * byte-order changes (or metadata) requires a lot of care; see gh-20699. */ - if (out_descrs[0] != out_descrs[2] || ( - enforce_uniform_args && out_descrs[0] != out_descrs[1])) { + if (!PyArray_EquivTypes(out_descrs[0], out_descrs[2]) || ( + enforce_uniform_args && !PyArray_EquivTypes( + out_descrs[0], out_descrs[1]))) { PyErr_Format(PyExc_TypeError, "the resolved dtypes are not compatible with %s.%s. " "Resolved (%R, %R, %R)", @@ -3026,8 +3048,12 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, return NULL; } - /* The below code assumes that all descriptors are identical: */ - assert(descrs[0] == descrs[1] && descrs[0] == descrs[2]); + /* + * The below code assumes that all descriptors are interchangeable, we + * allow them to not be strictly identical (but they typically should be) + */ + assert(PyArray_EquivTypes(descrs[0], descrs[1]) + && PyArray_EquivTypes(descrs[0], descrs[2])); if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) { /* This can be removed, but the initial element copy needs fixing */ @@ -3439,8 +3465,12 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, return NULL; } - /* The below code assumes that all descriptors are identical: */ - assert(descrs[0] == descrs[1] && descrs[0] == descrs[2]); + /* + * The below code assumes that all descriptors are interchangeable, we + * allow them to not be strictly identical (but they typically should be) + */ + assert(PyArray_EquivTypes(descrs[0], descrs[1]) + && PyArray_EquivTypes(descrs[0], descrs[2])); if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) { /* This can be removed, but the initial element copy needs fixing */ diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 76e4cdcf..1e0829f0 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -2148,6 +2148,18 @@ class TestUfunc: # It would be safe, but not equiv casting: ufunc(a, c, out=out, casting="equiv") + def test_reducelike_byteorder_resolution(self): + # See gh-20699, byte-order changes need some extra care in the type + # resolution to make the following succeed: + arr_be = np.arange(10, dtype=">i8") + arr_le = np.arange(10, dtype="random_func(state) return out_array +cdef int _check_array_cons_bounded_0_1(np.ndarray val, object name) except -1: + cdef double *val_data + cdef np.npy_intp i + cdef bint err = 0 + + if not np.PyArray_ISONESEGMENT(val) or np.PyArray_TYPE(val) != np.NPY_DOUBLE: + # slow path for non-contiguous arrays or any non-double dtypes + err = not np.all(np.greater_equal(val, 0)) or not np.all(np.less_equal(val, 1)) + else: + val_data = np.PyArray_DATA(val) + for i in range(np.PyArray_SIZE(val)): + err = (not (val_data[i] >= 0)) or (not val_data[i] <= 1) + if err: + break + if err: + raise ValueError(f"{name} < 0, {name} > 1 or {name} contains NaNs") + + return 0 cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1: if cons == CONS_NON_NEGATIVE: @@ -354,9 +373,7 @@ cdef int check_array_constraint(np.ndarray val, object name, constraint_type con elif np.any(np.less_equal(val, 0)): raise ValueError(name + " <= 0") elif cons == CONS_BOUNDED_0_1: - if not np.all(np.greater_equal(val, 0)) or \ - not np.all(np.less_equal(val, 1)): - raise ValueError("{0} < 0, {0} > 1 or {0} contains NaNs".format(name)) + return _check_array_cons_bounded_0_1(val, name) elif cons == CONS_BOUNDED_GT_0_1: if not np.all(np.greater(val, 0)) or not np.all(np.less_equal(val, 1)): raise ValueError("{0} <= 0, {0} > 1 or {0} contains NaNs".format(name)) diff --git a/numpy/typing/tests/data/fail/index_tricks.pyi b/numpy/typing/tests/data/fail/index_tricks.pyi index c508bf3a..565e81a9 100644 --- a/numpy/typing/tests/data/fail/index_tricks.pyi +++ b/numpy/typing/tests/data/fail/index_tricks.pyi @@ -4,6 +4,7 @@ import numpy as np AR_LIKE_i: List[int] AR_LIKE_f: List[float] +np.ndindex([1, 2, 3]) # E: No overload variant np.unravel_index(AR_LIKE_f, (1, 2, 3)) # E: incompatible type np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # E: No overload variant np.mgrid[1] # E: Invalid index type diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index cee4d8c3..55c033fe 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -24,6 +24,8 @@ reveal_type(iter(np.ndenumerate(AR_i8))) # E: Iterator[Tuple[builtins.tuple[bui reveal_type(iter(np.ndenumerate(AR_LIKE_f))) # E: Iterator[Tuple[builtins.tuple[builtins.int], {double}]] reveal_type(iter(np.ndenumerate(AR_LIKE_U))) # E: Iterator[Tuple[builtins.tuple[builtins.int], str_]] +reveal_type(np.ndindex(1, 2, 3)) # E: numpy.ndindex +reveal_type(np.ndindex((1, 2, 3))) # E: numpy.ndindex reveal_type(iter(np.ndindex(1, 2, 3))) # E: Iterator[builtins.tuple[builtins.int]] reveal_type(next(np.ndindex(1, 2, 3))) # E: builtins.tuple[builtins.int] diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 0e91a7af..56706d8d 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -1,3 +1,4 @@ +import datetime as dt from typing import Any, List, TypeVar from pathlib import Path @@ -27,6 +28,10 @@ b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8) nditer_obj: np.nditer +date_scalar: dt.date +date_seq: list[dt.date] +timedelta_seq: list[dt.timedelta] + def func(a: int) -> bool: ... reveal_type(next(b_f8)) # E: tuple[Any] @@ -108,19 +113,26 @@ reveal_type(np.datetime_data(np.dtype(np.timedelta64))) # E: Tuple[builtins.str reveal_type(np.busday_count("2011-01", "2011-02")) # E: {int_} reveal_type(np.busday_count(["2011-01"], "2011-02")) # E: ndarray[Any, dtype[{int_}]] +reveal_type(np.busday_count(["2011-01"], date_scalar)) # E: ndarray[Any, dtype[{int_}]] reveal_type(np.busday_offset(M, m)) # E: datetime64 +reveal_type(np.busday_offset(date_scalar, m)) # E: datetime64 reveal_type(np.busday_offset(M, 5)) # E: datetime64 reveal_type(np.busday_offset(AR_M, m)) # E: ndarray[Any, dtype[datetime64]] +reveal_type(np.busday_offset(M, timedelta_seq)) # E: ndarray[Any, dtype[datetime64]] reveal_type(np.busday_offset("2011-01", "2011-02", roll="forward")) # E: datetime64 reveal_type(np.busday_offset(["2011-01"], "2011-02", roll="forward")) # E: ndarray[Any, dtype[datetime64]] reveal_type(np.is_busday("2012")) # E: bool_ +reveal_type(np.is_busday(date_scalar)) # E: bool_ reveal_type(np.is_busday(["2012"])) # E: ndarray[Any, dtype[bool_]] reveal_type(np.datetime_as_string(M)) # E: str_ reveal_type(np.datetime_as_string(AR_M)) # E: ndarray[Any, dtype[str_]] +reveal_type(np.busdaycalendar(holidays=date_seq)) # E: busdaycalendar +reveal_type(np.busdaycalendar(holidays=[M])) # E: busdaycalendar + reveal_type(np.compare_chararrays("a", "b", "!=", rstrip=False)) # E: ndarray[Any, dtype[bool_]] reveal_type(np.compare_chararrays(b"a", b"a", "==", True)) # E: ndarray[Any, dtype[bool_]] diff --git a/pavement.py b/pavement.py index 6fdaae97..3533e420 100644 --- a/pavement.py +++ b/pavement.py @@ -38,7 +38,7 @@ from paver.easy import Bunch, options, task, sh #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/1.22.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/1.22.1-notes.rst' #------------------------------------------------------- diff --git a/setup.py b/setup.py index 703fe79e..06170df5 100755 --- a/setup.py +++ b/setup.py @@ -80,6 +80,10 @@ if os.path.exists('MANIFEST'): # so that it is in sys.modules import numpy.distutils.command.sdist import setuptools +if int(setuptools.__version__.split('.')[0]) >= 60: + raise RuntimeError( + "Setuptools version is '{}', version < '60.0.0' is required. " + "See pyproject.toml".format(setuptools.__version__)) # Initialize cmdclass from versioneer from numpy.distutils.core import numpy_cmdclass