diff --git a/.github/workflows/circleci.yml b/.github/workflows/circleci.yml index c0c8876b6bbe..3c84ce3c6890 100644 --- a/.github/workflows/circleci.yml +++ b/.github/workflows/circleci.yml @@ -17,7 +17,7 @@ jobs: statuses: write steps: - name: GitHub Action step - uses: larsoner/circleci-artifacts-redirector-action@4e13a10d89177f4bfc8007a7064bdbeda848d8d1 # master + uses: larsoner/circleci-artifacts-redirector-action@7eafdb60666f57706a5525a2f5eb76224dc8779b # master with: repo-token: ${{ secrets.GITHUB_TOKEN }} api-token: ${{ secrets.CIRCLE_TOKEN }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 68ef1e811e88..0342fd92c924 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3.28.17 + uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3.28.17 + uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3.28.17 + uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index af26de3aee9d..5036a94ce399 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@38ecb5b593bf0eb19e335c03f97670f792489a8b # v4.7.0 + uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9 # v4.7.1 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index fea77068e128..453a67088adf 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # 2.23.3 + - uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index dc9ef34db71d..3452724841c3 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -82,7 +82,7 @@ jobs: persist-credentials: false - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: 'pypy3.11-v7.3.19' + python-version: 'pypy3.11-nightly' - name: Setup using scipy-openblas run: | python -m pip install -r requirements/ci_requirements.txt @@ -156,7 +156,7 @@ jobs: # TODO: gcov env: PYTHONOPTIMIZE: 2 - + aarch64_test: needs: [smoke_test] @@ -204,7 +204,7 @@ jobs: submodules: recursive fetch-tags: true persist-credentials: false - + - name: Creates new container run: | docker run --name the_container --interactive \ @@ -221,7 +221,7 @@ jobs: docker run --rm -e "TERM=xterm-256color" \ -v $(pwd):/numpy the_container \ /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build + cd /numpy && spin build '" - name: Meson Log @@ -271,7 +271,7 @@ jobs: # - name: Check docstests # shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' # run: | - # pip install scipy-doctest==1.6.0 hypothesis==6.104.1 matplotlib scipy pytz pandas + # pip install scipy-doctest>=1.8.0 hypothesis==6.104.1 matplotlib scipy pandas # spin check-docs -v # spin check-tutorials -v @@ -324,7 +324,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: repository: data-apis/array-api-tests - ref: 'c48410f96fc58e02eea844e6b7f6cc01680f77ce' # Latest commit as of 2025-04-01 + ref: '3c273cd34d51c64ed893737306d36adab23a94a1' # v2025.05.23 submodules: 'true' path: 'array-api-tests' persist-credentials: false @@ -346,7 +346,7 @@ jobs: PYTHONWARNINGS: 'ignore::UserWarning::,ignore::DeprecationWarning::,ignore::RuntimeWarning::' run: | cd ${GITHUB_WORKSPACE}/array-api-tests - pytest array_api_tests -v -c pytest.ini --ci --max-examples=100 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt + pytest array_api_tests -v -c pytest.ini -n 4 --max-examples=1000 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt custom_checks: needs: [smoke_test] diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 4cd87ab37dd4..a9f065e25cc0 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -212,7 +212,7 @@ jobs: python -m pip install pytest pytest-xdist hypothesis typing_extensions - name: Build - run: CC=gcc-13 CXX=g++-13 spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' + run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' - name: Meson Log if: always() @@ -263,7 +263,7 @@ jobs: python -m pip install pytest pytest-xdist hypothesis typing_extensions - name: Build - run: CC=gcc-13 CXX=g++-13 spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_spr + run: CC=gcc-13 CXX=g++-13 spin build -- -Denable-openmp=true -Dallow-noblas=true -Dcpu-baseline=avx512_spr - name: Meson Log if: always() diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 16862d715876..418dc7d52fc1 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -55,7 +55,7 @@ jobs: ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - name: Setup Miniforge - uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1 + uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 with: python-version: ${{ matrix.python-version }} channels: conda-forge @@ -115,7 +115,7 @@ jobs: build_runner: - [ macos-13, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] - version: ["3.11", "3.14t"] + version: ["3.11", "3.14t-dev"] steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -124,14 +124,9 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - activate-environment: true python-version: ${{ matrix.version }} - enable-cache: false - - - run: - uv pip install --python=${{ matrix.version }} pip - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 if: ${{ matrix.build_runner[0] == 'macos-13' }} diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 36e89504def7..81fa57239b9b 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -46,9 +46,9 @@ jobs: fail-fast: false matrix: os_python: + - [macos-latest, '3.13'] - [ubuntu-latest, '3.12'] - [windows-latest, '3.11'] - - [macos-latest, '3.11'] steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 360261b6a186..e64789006e2c 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -30,7 +30,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 + uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 with: results_file: results.sarif results_format: sarif @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v2.1.27 + uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v2.1.27 with: sarif_file: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index e96021775f3c..68352eb1fc7c 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -89,13 +89,13 @@ jobs: # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile - [macos-13, macosx_x86_64, accelerate] - [macos-14, macosx_arm64, accelerate] # always use accelerate - - [windows-2019, win_amd64, ""] - - [windows-2019, win32, ""] + - [windows-2022, win_amd64, ""] + - [windows-2022, win32, ""] - [windows-11-arm, win_arm64, ""] - python: ["cp311", "cp312", "cp313", "cp313t", "pp311"] + python: ["cp311", "cp312", "cp313", "cp313t", "cp314", "cp314t", "pp311"] exclude: # Don't build PyPy 32-bit windows - - buildplat: [windows-2019, win32, ""] + - buildplat: [windows-2022, win32, ""] python: "pp311" # Don't build PyPy arm64 windows - buildplat: [windows-11-arm, win_arm64, ""] @@ -107,6 +107,8 @@ jobs: python: "pp311" - buildplat: [ macos13, macosx_x86_64, openblas ] python: "cp313t" + - buildplat: [ macos13, macosx_x86_64, openblas ] + python: "cp314t" env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} @@ -175,7 +177,7 @@ jobs: fi - name: Build wheels - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # v2.23.3 + uses: pypa/cibuildwheel@5f22145df44122af0f5a201f93cf0207171beca7 # v3.0.0 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} @@ -184,7 +186,8 @@ jobs: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + - name: install micromamba + uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment with: # for installation of anaconda-client, required for upload to @@ -198,6 +201,16 @@ jobs: create-args: >- anaconda-client + - name: win-arm64 install anaconda client + if: ${{ matrix.buildplat[1] == 'win_arm64' }} + run: | + # Rust installation needed for rpds-py. + Invoke-WebRequest https://static.rust-lang.org/rustup/dist/aarch64-pc-windows-msvc/rustup-init.exe -UseBasicParsing -Outfile rustup-init.exe + .\rustup-init.exe -y + $env:PATH="$env:PATH;$env:USERPROFILE\.cargo\bin" + pip install anaconda-client + + - name: Upload wheels if: success() && github.repository == 'numpy/numpy' shell: bash -el {0} @@ -267,7 +280,7 @@ jobs: name: sdist path: ./dist/* - - uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 # v3.1.1 + - uses: conda-incubator/setup-miniconda@835234971496cad1653abb28a638a281cf32541f # v3.2.0 with: # for installation of anaconda-client, required for upload to # anaconda.org diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 72d23e27b897..e760e37780a7 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -16,7 +16,7 @@ permissions: jobs: python64bit_openblas: name: x86-64, LP64 OpenBLAS - runs-on: windows-2019 + runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' strategy: @@ -24,7 +24,7 @@ jobs: matrix: compiler-pyversion: - ["MSVC", "3.11"] - - ["Clang-cl", "3.14t"] + - ["Clang-cl", "3.14t-dev"] steps: - name: Checkout @@ -35,14 +35,9 @@ jobs: persist-credentials: false - name: Setup Python - uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - activate-environment: true python-version: ${{ matrix.compiler-pyversion[1] }} - enable-cache: false - - - run: - uv pip install --python=${{ matrix.version }} pip - name: Install build dependencies from PyPI run: | @@ -97,7 +92,7 @@ jobs: fail-fast: false matrix: include: - - os: windows-2019 + - os: windows-2022 architecture: x86 - os: windows-11-arm architecture: arm64 diff --git a/.github/workflows/windows_arm64.yml b/.github/workflows/windows_arm64.yml index 0a691bff9b21..3eaf02eb062c 100644 --- a/.github/workflows/windows_arm64.yml +++ b/.github/workflows/windows_arm64.yml @@ -15,7 +15,7 @@ permissions: jobs: windows_arm: - runs-on: windows-2019 + runs-on: windows-2022 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' @@ -174,7 +174,7 @@ jobs: path: ./*.whl - name: Setup Mamba - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + uses: mamba-org/setup-micromamba@b09ef9b599704322748535812ca03efb2625677b with: # for installation of anaconda-client, required for upload to # anaconda.org diff --git a/.gitignore b/.gitignore index df7f084e3645..c4de68c1a9a7 100644 --- a/.gitignore +++ b/.gitignore @@ -64,7 +64,7 @@ GTAGS ################ # meson build/installation directories build -build-install +build-* # meson python output .mesonpy-native-file.ini # sphinx build directory diff --git a/.mailmap b/.mailmap index f33dfddb6492..e3e3bb56ecdf 100644 --- a/.mailmap +++ b/.mailmap @@ -11,6 +11,7 @@ !Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> !DWesl <22566757+DWesl@users.noreply.github.com> !Dreamge +!EarlMilktea <66886825+EarlMilktea@users.noreply.github.com> !Endolith !GalaxySnail !Illviljan <14371165+Illviljan@users.noreply.github.com> @@ -21,6 +22,7 @@ !Scian <65375075+hoony6134@users.noreply.github.com> !Searchingdays !amagicmuffin <2014wcheng@gmail.com> +!amotzop !bersbersbers <12128514+bersbersbers@users.noreply.github.com> !code-review-doctor !cook-1229 <70235336+cook-1229@users.noreply.github.com> @@ -34,6 +36,7 @@ !hutauf !jbCodeHub !juztamau5 +!karl3wm !legoffant <58195095+legoffant@users.noreply.github.com> !liang3zy22 <35164941+liang3zy22@users.noreply.github.com> !luzpaz @@ -51,6 +54,7 @@ !pmvz !pojaghi <36278217+pojaghi@users.noreply.github.com> !pratiklp00 +!samir539 !sfolje0 !spacescientist !stefan6419846 @@ -59,12 +63,15 @@ !tautaus !undermyumbrella1 !vahidmech +!wenlong2 !xoviat <49173759+xoviat@users.noreply.github.com> !xoviat <49173759+xoviat@users.noreply.github.com> !yan-wyb !yetanothercheer Aaron Baecker Adrin Jalali +Abhishek Kumar +Abhishek Kumar <142383124+abhishek-iitmadras@users.noreply.github.com> Abraham Medina Arun Kota Arun Kota Arun Kota @@ -140,6 +147,8 @@ Anton Prosekin Anže Starič Arfy Slowy Arnaud Ma +Arnaud Tremblay +Arnaud Tremblay <59627629+Msa360@users.noreply.github.com> Aron Ahmadia Arun Kota Arun Kota @@ -190,6 +199,8 @@ Carl Kleffner Carl Leake Carlos Henrique Hermanny Moreira da Silva Carlos Henrique Hermanny Moreira da Silva <121122527+carlosilva10260@users.noreply.github.com> +Carlos Martin +Carlos Martin Cédric Hannotier Charles Stern <62192187+cisaacstern@users.noreply.github.com> Chiara Marmo @@ -200,6 +211,8 @@ Chris Burns Chris Fu (傅立业) <17433201@qq.com> Chris Holland <41524756+ChrisAHolland@users.noreply.github.com> Chris Kerr +Chris Navarro +Chris Navarro <24905907+lvllvl@users.noreply.github.com> Chris Vavaliaris Christian Clauss Christopher Dahlin @@ -270,6 +283,7 @@ Eric Fode Eric Fode Eric Quintero Eric Xie <161030123+EngineerEricXie@users.noreply.github.com> Ernest N. Mamikonyan +Ernst Peng Eskild Eriksen Eskild Eriksen <42120229+iameskild@users.noreply.github.com> Eskild Eriksen @@ -300,8 +314,11 @@ Gregory R. Lee Gregory R. Lee Guo Ci guoci Guo Shuai +Gyeongjae Choi Habiba Hye Habiba Hye <145866308+HabibiHye@users.noreply.github.com> +Halle Loveday +Halle Loveday Hameer Abbasi Hannah Aizenman Han Genuit @@ -311,6 +328,10 @@ Helder Oliveira Hemil Desai Himanshu Hiroyuki V. Yamazaki +Daniel Hrisca +Daniel Hrisca +François de Coatpont +François de Coatpont <93073405+Chevali2004@users.noreply.github.com> Hugo van Kemenade Iantra Solari I-Shen Leong @@ -363,6 +384,7 @@ Jessé Pires Jessi J Zhao <35235453+jessijzhao@users.noreply.github.com> Jhong-Ken Chen (陳仲肯) Jhong-Ken Chen (陳仲肯) <37182101+kennychenfs@users.noreply.github.com> +Jiuding Tan (谭九鼎) <109224573@qq.com> Johann Faouzi Johann Rohwer Johann Rohwer jmrohwer @@ -447,10 +469,13 @@ Luke Zoltan Kelley Madhulika Jain Chambers <53166646+madhulikajc@users.noreply.github.com> Magdalena Proszewska Magdalena Proszewska <38814059+mproszewska@users.noreply.github.com> +Makima C. Yang Malik Idrees Hasan Khan <77000356+MalikIdreesHasanKhan@users.noreply.github.com>C Manoj Kumar Marcel Loose Marcin Podhajski <36967358+m-podhajski@users.noreply.github.com> +Marco Edward Gorelli +Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Margret Pax Margret Pax <13646646+paxcodes@users.noreply.github.com> Mark DePristo @@ -508,6 +533,7 @@ Michael Schnaitter Michael Seifert Michel Fruchart +Mike O'Brien Mike Toews Miki Watanabe (渡邉 美希) Miles Cranmer @@ -516,9 +542,12 @@ Milica Dančuk love-bees <33499899+love-bees@users.noreply.g Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> +Mohammed Abdul Rahman +Mohammed Abdul Rahman <130785777+that-ar-guy@users.noreply.github.com> Mohaned Qunaibit Muhammad Kasim Muhammed Muhsin +Mugundan Selvanayagam Mukulika Pahari Mukulika Pahari <60316606+Mukulikaa@users.noreply.github.com> Munira Alduraibi @@ -571,6 +600,7 @@ Peter J Cock Peter Kämpf Peyton Murray Phil Elson +Filipe Laíns Pierre GM Pierre GM pierregm Piotr Gaiński @@ -591,6 +621,8 @@ Rehas Sachdeva Richard Howe <45905457+rmhowe425@users.noreply.github.com> Ritta Narita Riya Sharma +Rob Timpe +Rob Timpe Robert Kern Robert LU Robert T. McGibbon @@ -660,6 +692,7 @@ Steve Stagg Steven J Kern Stuart Archibald Stuart Archibald +SUMIT SRIMANI <2301109104@ptuniv.edu.in SuryaChand P Sylvain Ferriol Takanori Hirano @@ -696,6 +729,8 @@ Vinith Kishore <85550536+vinith2@users.noreply.github Vrinda Narayan Vrinda Narayan Vrinda Narayan <48102157+vrindaaa@users.noreply.github.com> +Wang Yang (杨旺) +Wang Yang (杨旺) <1113177880@qq.com> Wansoo Kim Warrick Ball Warrick Ball @@ -711,11 +746,11 @@ Xiangyi Wang Yamada Fuyuka Yang Hau Yang Hau -Yang Wang Yash Pethe Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra Yashasvi Misra <54177363+yashasvimisra2798@users.noreply.github.com> +Yichi Zhang Yogesh Raisinghani <46864533+raisinghanii@users.noreply.github.com> Younes Sandi Younes Sandi <65843206+Unessam@users.noreply.github.com> @@ -723,6 +758,8 @@ Yu Feng Yuji Kanagawa Yuki K Yury Kirienko +Yuvraj Pradhan +Yuvraj Pradhan Zac Hatfield-Dodds Zach Brugh <111941670+zachbrugh@users.noreply.github.com> Zé Vinícius diff --git a/.spin/cmds.py b/.spin/cmds.py index f9c7658fbf17..66885de630e0 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -1,6 +1,6 @@ +import importlib import os import pathlib -import importlib import shutil import subprocess import sys @@ -48,8 +48,8 @@ def changelog(token, revision_range): $ spin authors -t $GH_TOKEN --revision-range v1.25.0..v1.26.0 """ try: - from github.GithubException import GithubException from git.exc import GitError + from github.GithubException import GithubException changelog = _get_numpy_tools(pathlib.Path('changelog.py')) except ModuleNotFoundError as e: raise click.ClickException( @@ -193,9 +193,11 @@ def check_docs(*, parent_callback, pytest_args, **kwargs): """ # noqa: E501 try: # prevent obscure error later - import scipy_doctest + import scipy_doctest # noqa: F401 except ModuleNotFoundError as e: raise ModuleNotFoundError("scipy-doctest not installed") from e + if scipy_doctest.__version__ < '1.8.0': + raise ModuleNotFoundError("please update scipy_doctests to >= 1.8.0") if (not pytest_args): pytest_args = ('--pyargs', 'numpy') @@ -203,6 +205,7 @@ def check_docs(*, parent_callback, pytest_args, **kwargs): # turn doctesting on: doctest_args = ( '--doctest-modules', + '--doctest-only-doctests=true', '--doctest-collect=api' ) @@ -263,6 +266,7 @@ def _set_mem_rlimit(max_mem=None): Set address space rlimit """ import resource + import psutil mem = psutil.virtual_memory() @@ -615,7 +619,8 @@ def notes(version_override): ) try: - test_notes = _get_numpy_tools(pathlib.Path('ci', 'test_all_newsfragments_used.py')) + cmd = pathlib.Path('ci', 'test_all_newsfragments_used.py') + test_notes = _get_numpy_tools(cmd) except ModuleNotFoundError as e: raise click.ClickException( f"{e.msg}. Install the missing packages to use this command." diff --git a/INSTALL.rst b/INSTALL.rst index 017e4de8c9d4..6e9d2cd242f5 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -135,12 +135,8 @@ For best performance, a development package providing BLAS and CBLAS should be installed. Some of the options available are: - ``libblas-dev``: reference BLAS (not very optimized) -- ``libatlas-base-dev``: generic tuned ATLAS, it is recommended to tune it to - the available hardware, see /usr/share/doc/libatlas3-base/README.Debian for - instructions -- ``libopenblas-base``: fast and runtime detected so no tuning required but a - very recent version is needed (>=0.2.15 is recommended). Older versions of - OpenBLAS suffered from correctness issues on some CPUs. +- ``libopenblas-base``: (recommended) OpenBLAS is performant, and used + in the NumPy wheels on PyPI except where Apple's Accelerate is tuned better for Apple hardware The package linked to when numpy is loaded can be chosen after installation via the alternatives mechanism:: @@ -148,10 +144,6 @@ the alternatives mechanism:: update-alternatives --config libblas.so.3 update-alternatives --config liblapack.so.3 -Or by preloading a specific BLAS library with:: - - LD_PRELOAD=/usr/lib/atlas-base/atlas/libblas.so.3 python ... - Build issues ============ diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 36362f6cacc7..af6e5cf52ac4 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -75,7 +75,7 @@ stages: - job: Windows timeoutInMinutes: 120 pool: - vmImage: 'windows-2019' + vmImage: 'windows-2022' strategy: maxParallel: 3 matrix: diff --git a/benchmarks/asv_pip_nopep517.py b/benchmarks/asv_pip_nopep517.py index cffc42a55c7d..fc231d1db5d0 100644 --- a/benchmarks/asv_pip_nopep517.py +++ b/benchmarks/asv_pip_nopep517.py @@ -3,6 +3,7 @@ """ import subprocess import sys + # pip ignores '--global-option' when pep517 is enabled therefore we disable it. cmd = [sys.executable, '-mpip', 'wheel', '--no-use-pep517'] try: diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py index 8372be467005..9be15825edda 100644 --- a/benchmarks/benchmarks/__init__.py +++ b/benchmarks/benchmarks/__init__.py @@ -1,6 +1,8 @@ -from . import common -import sys import os +import sys + +from . import common + def show_cpu_features(): from numpy.lib._utils_impl import _opt_info diff --git a/benchmarks/benchmarks/bench_app.py b/benchmarks/benchmarks/bench_app.py index d22aa2e09604..06a9401b02f5 100644 --- a/benchmarks/benchmarks/bench_app.py +++ b/benchmarks/benchmarks/bench_app.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class LaplaceInplace(Benchmark): params = ['inplace', 'normal'] diff --git a/benchmarks/benchmarks/bench_array_coercion.py b/benchmarks/benchmarks/bench_array_coercion.py index ca1f3cc83a3f..ae9c040970d8 100644 --- a/benchmarks/benchmarks/bench_array_coercion.py +++ b/benchmarks/benchmarks/bench_array_coercion.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class ArrayCoercionSmall(Benchmark): # More detailed benchmarks for array coercion, @@ -38,7 +38,7 @@ def time_asarray(self, array_like): def time_asarray_dtype(self, array_like): np.asarray(array_like, dtype=self.int64) - def time_asarray_dtype(self, array_like): + def time_asarray_dtype_order(self, array_like): np.asarray(array_like, dtype=self.int64, order="F") def time_asanyarray(self, array_like): @@ -47,7 +47,7 @@ def time_asanyarray(self, array_like): def time_asanyarray_dtype(self, array_like): np.asanyarray(array_like, dtype=self.int64) - def time_asanyarray_dtype(self, array_like): + def time_asanyarray_dtype_order(self, array_like): np.asanyarray(array_like, dtype=self.int64, order="F") def time_ascontiguousarray(self, array_like): diff --git a/benchmarks/benchmarks/bench_clip.py b/benchmarks/benchmarks/bench_clip.py index ce0511da82a4..953fc383e20b 100644 --- a/benchmarks/benchmarks/bench_clip.py +++ b/benchmarks/benchmarks/bench_clip.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class ClipFloat(Benchmark): param_names = ["dtype", "size"] diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index 434407d62b8b..a9a6c88b87a0 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class Core(Benchmark): def setup(self): @@ -151,7 +151,8 @@ class CountNonzero(Benchmark): params = [ [1, 2, 3], [100, 10000, 1000000], - [bool, np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, str, object] + [bool, np.int8, np.int16, np.int32, np.int64, np.float32, + np.float64, str, object] ] def setup(self, numaxes, size, dtype): diff --git a/benchmarks/benchmarks/bench_creation.py b/benchmarks/benchmarks/bench_creation.py index 8c06c2125940..f76a9c78f867 100644 --- a/benchmarks/benchmarks/bench_creation.py +++ b/benchmarks/benchmarks/bench_creation.py @@ -1,7 +1,7 @@ -from .common import Benchmark, TYPES1, get_squares_ - import numpy as np +from .common import TYPES1, Benchmark, get_squares_ + class MeshGrid(Benchmark): """ Benchmark meshgrid generation diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index 9745545fba17..f72d50eb74ce 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + try: # SkipNotImplemented is available since 6.0 from asv_runner.benchmarks.mark import SkipNotImplemented @@ -236,12 +236,13 @@ class Sort(Benchmark): param_names = ['kind', 'dtype', 'array_type'] # The size of the benchmarked arrays. - ARRAY_SIZE = 10000 + ARRAY_SIZE = 1000000 def setup(self, kind, dtype, array_type): rnd = np.random.RandomState(507582308) array_class = array_type[0] - self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) + generate_array_method = getattr(SortGenerator, array_class) + self.arr = generate_array_method(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) def time_sort(self, kind, dtype, array_type): # Using np.sort(...) instead of arr.sort(...) because it makes a copy. diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py index 5d270f788164..6ac124cac88d 100644 --- a/benchmarks/benchmarks/bench_indexing.py +++ b/benchmarks/benchmarks/bench_indexing.py @@ -1,12 +1,12 @@ -from .common import ( - Benchmark, get_square_, get_indexes_, get_indexes_rand_, TYPES1) - -from os.path import join as pjoin import shutil -from numpy import memmap, float32, array -import numpy as np +from os.path import join as pjoin from tempfile import mkdtemp +import numpy as np +from numpy import array, float32, memmap + +from .common import TYPES1, Benchmark, get_indexes_, get_indexes_rand_, get_square_ + class Indexing(Benchmark): params = [TYPES1 + ["object", "O,i"], diff --git a/benchmarks/benchmarks/bench_io.py b/benchmarks/benchmarks/bench_io.py index 80b3739e0be9..eea4a4ed4309 100644 --- a/benchmarks/benchmarks/bench_io.py +++ b/benchmarks/benchmarks/bench_io.py @@ -1,7 +1,8 @@ -from .common import Benchmark, get_squares, get_squares_ +from io import SEEK_SET, BytesIO, StringIO import numpy as np -from io import SEEK_SET, StringIO, BytesIO + +from .common import Benchmark, get_squares, get_squares_ class Copy(Benchmark): diff --git a/benchmarks/benchmarks/bench_itemselection.py b/benchmarks/benchmarks/bench_itemselection.py index c6c74da569c7..90f9efc77d90 100644 --- a/benchmarks/benchmarks/bench_itemselection.py +++ b/benchmarks/benchmarks/bench_itemselection.py @@ -1,7 +1,7 @@ -from .common import Benchmark, TYPES1 - import numpy as np +from .common import TYPES1, Benchmark + class Take(Benchmark): params = [ diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index dc8815ffe95b..0e60468308bb 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -1,10 +1,10 @@ """Benchmarks for `numpy.lib`.""" -from .common import Benchmark - import numpy as np +from .common import Benchmark + class Pad(Benchmark): """Benchmarks for `numpy.pad`. diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index b4fe6e58b3c7..03e2fd77f4f2 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -1,7 +1,7 @@ -from .common import Benchmark, get_squares_, get_indexes_rand, TYPES1 - import numpy as np +from .common import TYPES1, Benchmark, get_indexes_rand, get_squares_ + class Eindot(Benchmark): def setup(self): diff --git a/benchmarks/benchmarks/bench_ma.py b/benchmarks/benchmarks/bench_ma.py index 896f4923c5c8..e815f5fc0cdb 100644 --- a/benchmarks/benchmarks/bench_ma.py +++ b/benchmarks/benchmarks/bench_ma.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class MA(Benchmark): def setup(self): diff --git a/benchmarks/benchmarks/bench_manipulate.py b/benchmarks/benchmarks/bench_manipulate.py index 1c8d2b96388e..5bb867c10e89 100644 --- a/benchmarks/benchmarks/bench_manipulate.py +++ b/benchmarks/benchmarks/bench_manipulate.py @@ -1,7 +1,9 @@ -from .common import Benchmark, get_squares_, TYPES1, DLPACK_TYPES +from collections import deque import numpy as np -from collections import deque + +from .common import TYPES1, Benchmark + class BroadcastArrays(Benchmark): params = [[(16, 32), (128, 256), (512, 1024)], diff --git a/benchmarks/benchmarks/bench_polynomial.py b/benchmarks/benchmarks/bench_polynomial.py index ee52bdc65086..7bd7334e3c14 100644 --- a/benchmarks/benchmarks/bench_polynomial.py +++ b/benchmarks/benchmarks/bench_polynomial.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class Polynomial(Benchmark): diff --git a/benchmarks/benchmarks/bench_random.py b/benchmarks/benchmarks/bench_random.py index ee73f4d0eb52..d15d25941f93 100644 --- a/benchmarks/benchmarks/bench_random.py +++ b/benchmarks/benchmarks/bench_random.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + try: from numpy.random import Generator except ImportError: diff --git a/benchmarks/benchmarks/bench_records.py b/benchmarks/benchmarks/bench_records.py index badeac0edc40..8c24a4715709 100644 --- a/benchmarks/benchmarks/bench_records.py +++ b/benchmarks/benchmarks/bench_records.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class Records(Benchmark): def setup(self): diff --git a/benchmarks/benchmarks/bench_reduce.py b/benchmarks/benchmarks/bench_reduce.py index 8898135408e1..1d78e1bba03a 100644 --- a/benchmarks/benchmarks/bench_reduce.py +++ b/benchmarks/benchmarks/bench_reduce.py @@ -1,7 +1,7 @@ -from .common import Benchmark, TYPES1, get_squares - import numpy as np +from .common import TYPES1, Benchmark, get_squares + class AddReduce(Benchmark): def setup(self): diff --git a/benchmarks/benchmarks/bench_scalar.py b/benchmarks/benchmarks/bench_scalar.py index c99e99d7c8a7..40164926ade3 100644 --- a/benchmarks/benchmarks/bench_scalar.py +++ b/benchmarks/benchmarks/bench_scalar.py @@ -1,7 +1,7 @@ -from .common import Benchmark, TYPES1 - import numpy as np +from .common import TYPES1, Benchmark + class ScalarMath(Benchmark): # Test scalar math, note that each of these is run repeatedly to offset diff --git a/benchmarks/benchmarks/bench_shape_base.py b/benchmarks/benchmarks/bench_shape_base.py index 56d0eec0d1ea..db66fa46371e 100644 --- a/benchmarks/benchmarks/bench_shape_base.py +++ b/benchmarks/benchmarks/bench_shape_base.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + class Block(Benchmark): params = [1, 10, 100] diff --git a/benchmarks/benchmarks/bench_strings.py b/benchmarks/benchmarks/bench_strings.py index 88d20069e75b..8df866f273c0 100644 --- a/benchmarks/benchmarks/bench_strings.py +++ b/benchmarks/benchmarks/bench_strings.py @@ -1,8 +1,8 @@ -from .common import Benchmark +import operator import numpy as np -import operator +from .common import Benchmark _OPERATORS = { '==': operator.eq, diff --git a/benchmarks/benchmarks/bench_trim_zeros.py b/benchmarks/benchmarks/bench_trim_zeros.py index 4e25a8b021b7..4a9751681e9e 100644 --- a/benchmarks/benchmarks/bench_trim_zeros.py +++ b/benchmarks/benchmarks/bench_trim_zeros.py @@ -1,7 +1,7 @@ -from .common import Benchmark - import numpy as np +from .common import Benchmark + _FLOAT = np.dtype('float64') _COMPLEX = np.dtype('complex128') _INT = np.dtype('int64') diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 4d9f3c9c8f61..7dc321ac2980 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -1,10 +1,11 @@ -from .common import Benchmark, get_squares_, TYPES1, DLPACK_TYPES - -import numpy as np import itertools -from packaging import version import operator +from packaging import version + +import numpy as np + +from .common import DLPACK_TYPES, TYPES1, Benchmark, get_squares_ ufuncs = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_count', 'bitwise_not', diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index 6d5ff9b8c491..0c80b1877b3a 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -1,7 +1,7 @@ -from .common import Benchmark, get_data - import numpy as np +from .common import Benchmark, get_data + UFUNCS = [obj for obj in np._core.umath.__dict__.values() if isinstance(obj, np.ufunc)] UFUNCS_UNARY = [uf for uf in UFUNCS if "O->O" in uf.types] @@ -10,7 +10,7 @@ class _AbstractBinary(Benchmark): params = [] param_names = ['ufunc', 'stride_in0', 'stride_in1', 'stride_out', 'dtype'] timeout = 10 - arrlen = 10000 + arrlen = 1000000 data_finite = True data_denormal = False data_zeros = False @@ -63,7 +63,7 @@ class _AbstractUnary(Benchmark): params = [] param_names = ['ufunc', 'stride_in', 'stride_out', 'dtype'] timeout = 10 - arrlen = 10000 + arrlen = 1000000 data_finite = True data_denormal = False data_zeros = False @@ -208,8 +208,9 @@ def train(self, max_epoch): for epoch in range(max_epoch): z = np.matmul(self.X_train, self.W) A = 1 / (1 + np.exp(-z)) # sigmoid(z) - loss = -np.mean(self.Y_train * np.log(A) + (1 - self.Y_train) * np.log(1 - A)) - dz = A - self.Y_train + Y_train = self.Y_train + loss = -np.mean(Y_train * np.log(A) + (1 - Y_train) * np.log(1 - A)) + dz = A - Y_train dw = (1 / self.size) * np.matmul(self.X_train.T, dz) self.W = self.W - self.alpha * dw diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index 064255e185eb..7ed528e8d518 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -1,8 +1,9 @@ -import numpy as np import random from functools import lru_cache from pathlib import Path +import numpy as np + # Various pre-crafted datasets/variables for testing # !!! Must not be changed -- only appended !!! # while testing numpy we better not rely on numpy to produce random diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index 702803172477..6d2194b5c4e6 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -344,6 +344,8 @@ This assumes that you have forked ``_:: to the previous links for example. - For the ``*.0`` release in a cycle, add a new section at the top with a short description of the new features and point the news link to it. +- Edit the newsHeader and date fields at the top of news.md +- Also edit the butttonText on line 14 in content/en/config.yaml commit and push:: diff --git a/doc/changelog/1.21.5-changelog.rst b/doc/changelog/1.21.5-changelog.rst index acd3599d48ef..04ff638d42a3 100644 --- a/doc/changelog/1.21.5-changelog.rst +++ b/doc/changelog/1.21.5-changelog.rst @@ -22,7 +22,7 @@ A total of 11 pull requests were merged for this release. * `#20462 `__: BUG: Fix float16 einsum fastpaths using wrong tempvar * `#20463 `__: BUG, DIST: Print os error message when the executable not exist * `#20464 `__: BLD: Verify the ability to compile C++ sources before initiating... -* `#20465 `__: BUG: Force ``npymath` ` to respect ``npy_longdouble`` +* `#20465 `__: BUG: Force ``npymath`` to respect ``npy_longdouble`` * `#20466 `__: BUG: Fix failure to create aligned, empty structured dtype * `#20467 `__: ENH: provide a convenience function to replace npy_load_module * `#20495 `__: MAINT: update wheel to version that supports python3.10 diff --git a/doc/changelog/2.2.6-changelog.rst b/doc/changelog/2.2.6-changelog.rst new file mode 100644 index 000000000000..16c62da4a927 --- /dev/null +++ b/doc/changelog/2.2.6-changelog.rst @@ -0,0 +1,32 @@ + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Ilhan Polat +* Joren Hammudoglu +* Marco Gorelli + +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Sayed Adel + +Pull requests merged +==================== + +A total of 11 pull requests were merged for this release. + +* `#28778 `__: MAINT: Prepare 2.2.x for further development +* `#28851 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28852 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28853 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28864 `__: BUG: fix stringdtype singleton thread safety +* `#28865 `__: MAINT: use OpenBLAS 0.3.29 +* `#28889 `__: MAINT: from_dlpack thread safety fixes (#28883) +* `#28913 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28915 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28916 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28966 `__: TYP: reject complex scalar types in ndarray.__ifloordiv__ diff --git a/doc/changelog/2.3.0-changelog.rst b/doc/changelog/2.3.0-changelog.rst new file mode 100644 index 000000000000..7ca672ba8dbf --- /dev/null +++ b/doc/changelog/2.3.0-changelog.rst @@ -0,0 +1,704 @@ + +Contributors +============ + +A total of 134 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !DWesl +* !EarlMilktea + +* !amotzop + +* !fengluoqiuwu +* !h-vetinari +* !karl3wm + +* !partev +* !samir539 + +* !wenlong2 + +* Aarni Koskela + +* Abdu Zoghbi + +* Abhishek Kumar +* Adam J. Stewart +* Aditi Juneja +* Adrin Jalali +* Agriya Khetarpal +* Alicia Boya García + +* Andrej Zhilenkov +* Andrew Nelson +* Angus Gibson + +* Antonio Rech Santos + +* Ari Gato + +* Arnaud Tremblay + +* Arvid Bessen + +* Baskar Gopinath + +* Carlos Martin + +* Charles Harris +* Chris Navarro + +* Chris Sidebottom +* Christian Lorentzen +* Christine P. Chai + +* Christopher Sidebottom +* Clément Robert +* Colin Gilgenbach + +* Craig Peters + +* Cédric Hannotier +* Daniel Hrisca +* Derek Homeier +* Diego Baldassar + +* Dimitri Papadopoulos Orfanos +* Eoghan O'Connell + +* Eric Larson +* Ernst Peng + +* Evgeni Burovski +* Filipe Laíns +* François Rozet + +* François de Coatpont + +* GUAN MING +* Giovanni Del Monte +* Guan Ming(Wesley) Chiu + +* Guido Imperiale + +* Gyeongjae Choi + +* Halle Loveday + +* Hannah Wheeler + +* Hao Chen + +* Harmen Stoppels + +* Hin-Tak Leung + +* Ian DesJardin + +* Ihar Hrachyshka + +* Ilhan Polat +* Inessa Pawson +* J. Steven Dodge + +* Jake VanderPlas +* Jiachen An + +* Jiuding Tan (谭九鼎) +* Joe Rickerby + +* John Kirkham +* John Stilley + +* Jonathan Albrecht + +* Joren Hammudoglu +* Kai Germaschewski + +* Krishna Bindumadhavan + +* Lucas Colley +* Luka Krmpotić + +* Lysandros Nikolaou +* Maanas Arora +* Makima C. Yang + +* Marco Barbosa + +* Marco Edward Gorelli + +* Mark Harfouche +* Marten van Kerkwijk +* Mateusz Sokół +* Matt Haberland +* Matthew Brett +* Matthew Goldsberry + +* Matthew Sterrett +* Matthias Diener +* Matthieu Darbois +* Matti Picus +* Melissa Weber Mendonça +* Michael Siebert +* Mike O'Brien + +* Mohammed Abdul Rahman + +* Mugundan Selvanayagam + +* Musharaf Aijaz Baba + +* Musharraffaijaz + +* Nathan Goldbaum +* Nicholas Christensen + +* Nitish Satyavolu + +* Omid Rajaei +* PTUsumit + +* Peter Hawkins +* Peyton Murray +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Rob Timpe + +* Robert Kern +* Rohit Goswami +* Ross Barnowski +* Roy Smart +* Saransh Chopra +* Saraswathy Kalaiselvan + +* Sayed Adel +* Sebastian Berg +* Shantanu Jain + +* Shashwat Pandey + +* Shi Entong + +* Simon Altrogge +* Stan Ulbrych +* Thomas A Caswell +* Théotime Grohens + +* Tyler Reddy +* WANG Xuerui + +* WEN Hao + +* Wang Yang (杨旺) + +* Warren Weckesser +* Warrick Ball +* William Andrea +* Yakov Danishevsky + +* Yichi Zhang + +* Yuvraj Pradhan + +* dependabot[bot] +* hfloveday12 + + +Pull requests merged +==================== + +A total of 556 pull requests were merged for this release. + +* `#22718 `__: DOC: Add docs on using GitHub Codespaces for NumPy development +* `#25675 `__: ENH: add matvec and vecmat gufuncs +* `#25934 `__: ENH: Convert tanh from C universal intrinsics to C++ using Highway +* `#25991 `__: ENH: Optimize polyutils as_series +* `#26018 `__: ENH add hash based unique +* `#26745 `__: ENH, DOC: Add support for interactive examples for NumPy with... +* `#26958 `__: BUG: index overlap copy +* `#27288 `__: BUG: Scalar array comparison should return np.bool +* `#27300 `__: CI: pycodestyle → ruff +* `#27309 `__: MNT: Enforce ruff/Pyflakes rules (F) +* `#27324 `__: DOC: Removing module name from by-topic docs +* `#27343 `__: ENH: Add support for flat indexing on flat iterator +* `#27404 `__: DOC: document type promotion with Python types +* `#27522 `__: ENH: Cleanup npy_find_array_wrap +* `#27523 `__: ENH: Improve performance of np.count_nonzero for float arrays +* `#27648 `__: MAINT: Fix the code style to our C-Style-Guide +* `#27738 `__: DEP: testing: disable deprecated use of keywords x/y +* `#27784 `__: BUG: ``sinc``\ : fix underflow for float16 +* `#27789 `__: ENH: Implement np.strings.slice as a gufunc +* `#27819 `__: CI: add windows free-threaded CI +* `#27823 `__: BEG, MAINT: Begin NumPy 2.3.0 development. +* `#27824 `__: BUG: Fix mismatch in definition and declaration for a couple... +* `#27826 `__: CI: update circleci to python3.11.10, limit parallel builds. +* `#27827 `__: CI: skip ninja installation in linux_qemu workflows +* `#27830 `__: ENH: speedup evaluation of numpy.polynomial.legendre.legval. +* `#27839 `__: DOC: Correct version-added for mean arg for nanvar and nanstd +* `#27841 `__: BUG: Never negate strides in reductions (for now) +* `#27847 `__: MAINT: Bump pypa/cibuildwheel from 2.21.3 to 2.22.0 +* `#27848 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.1 to 2.0.2 +* `#27850 `__: DOC: Correct versionadded for vecmat and matvec. +* `#27854 `__: MAINT: Use mask_store instead of store for compiler bug workaround +* `#27856 `__: SIMD: add lsx optimization for loongarch, and add Qemu tests +* `#27858 `__: DOC: Fix typo +* `#27860 `__: MAINT: Add helper for static or heap allocated scratch space +* `#27862 `__: MAINT: Drop Python 3.10 support. +* `#27864 `__: ENH: stack custom multiarray import exception with the original... +* `#27868 `__: BUG: fix importing numpy in Python's optimized mode +* `#27869 `__: TYP: Fix ``np.interp`` signature for scalar types +* `#27875 `__: MAINT: Ensure correct handling for very large unicode strings +* `#27877 `__: ENH: Refactor ``__qualname__`` across API +* `#27878 `__: DOC: Fix double import in docs +* `#27879 `__: DEV: Add venv files to .gitignore +* `#27883 `__: MAINT,ENH: Reorganize buffered iteration setup +* `#27884 `__: ENH: Remove unnecessary list collection +* `#27886 `__: MAINT: Move uint aligned check to actual transfer function setup +* `#27887 `__: MAINT: A few other small nditer fixes +* `#27896 `__: PERF: improve multithreaded ufunc scaling +* `#27897 `__: MAINT: Bump github/codeql-action from 3.27.5 to 3.27.6 +* `#27898 `__: MAINT: Remove ``25675.new_feature.rst`` snippet. +* `#27899 `__: TST: add timeouts for github actions tests and wheel builds. +* `#27901 `__: MAINT: simplify power fast path logic +* `#27910 `__: MAINT: Make qualname tests more specific and fix code where needed +* `#27914 `__: DOC: Remove 27896-snippet. +* `#27915 `__: MAINT: Bump actions/cache from 4.1.2 to 4.2.0 +* `#27917 `__: CI: Use hashes in specifying some actions. +* `#27920 `__: DOC: Fix invalid URL in the index.rst file. +* `#27921 `__: MAINT: Bump actions/checkout from 4.1.1 to 4.2.2 +* `#27922 `__: MAINT: Move user pointers out of axisdata and simplify iternext +* `#27923 `__: ENH: Add cython wrappers for NpyString API +* `#27927 `__: DOC: Use internal/intersphinx links for neps. +* `#27930 `__: MAINT: Fix cirrus MacOs wheel builds [wheel build] +* `#27931 `__: CI: audit with zizmor +* `#27933 `__: BUG: fix building numpy on musl s390x +* `#27936 `__: MAINT: Update main after 2.2.0 release. +* `#27940 `__: BUG: Fix potential inconsistent behaviour for high-demnsional... +* `#27943 `__: TEST: cleanups +* `#27947 `__: BUG:fix compile error libatomic link test to meson.build +* `#27955 `__: BUG: fix use-after-free error in npy_hashtable.cpp +* `#27956 `__: BLD: add missing include to fix build with freethreading +* `#27962 `__: MAINT: Bump github/codeql-action from 3.27.6 to 3.27.7 +* `#27963 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.2 to 2.0.3 +* `#27967 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27973 `__: MAINT: Apply assorted ruff/flake8-pie rules (PIE) +* `#27974 `__: MAINT: Apply ruff/flake8-implicit-str-concat rules (ISC) +* `#27975 `__: MAINT: Apply ruff/flake8-comprehensions rules (C4) +* `#27976 `__: MAINT: Apply assorted ruff/flake8-pyi rules (PYI) +* `#27978 `__: MAINT: Apply assorted ruff/flake8-simplify rules (SIM) +* `#27981 `__: DOC: Document abi3 compat +* `#27992 `__: BUG: Fix segfault in stringdtype lexsort +* `#27996 `__: MAINT: Bump github/codeql-action from 3.27.7 to 3.27.9 +* `#27997 `__: MAINT: Remove unnecessary (and not safe in free-threaded) 1-D... +* `#27998 `__: API,MAINT: Make ``NpyIter_GetTransferFlags`` public and avoid... +* `#27999 `__: DOC, MAINT: Fix typos found by codespell +* `#28001 `__: DOC: Fix documentation for np.dtype.kind to include 'T' for StringDType +* `#28003 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28004 `__: DOC: fix several doctests in dtype method docstrings +* `#28005 `__: BUG: Cython API was missing NPY_UINTP. +* `#28008 `__: BUG: Fix handling of matrix class in np.unique. +* `#28009 `__: TST: lib: Test average with object dtype and no weights. +* `#28013 `__: DOC: Fixed typos in development_advanced_debugging.rst +* `#28015 `__: MAINT: run ruff from the repository root +* `#28020 `__: CI: pin scipy-doctest to 1.5.1 +* `#28022 `__: MAINT: Add all submodules to ruff exclusion list. +* `#28023 `__: DOC: update to scipy-doctest 1.6.0 and fix tests +* `#28029 `__: MAINT: Bump actions/upload-artifact from 4.4.3 to 4.5.0 +* `#28032 `__: BUG,MAINT: Fix size bug in new alloc helper and use it in one... +* `#28033 `__: MAINT: Use userpointers to avoid NPY_MAXARGS in iternext() +* `#28035 `__: MAINT: Move ``lib.format`` and ``ctypeslib`` to submodules/private... +* `#28036 `__: Replace Twitter with X +* `#28039 `__: TYP: allow ``None`` in operand sequence of nditer +* `#28043 `__: BUG: Ensure einsum uses chunking (now that nditer doesn't) +* `#28051 `__: MAINT: Update main after 2.2.1 release. +* `#28053 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28056 `__: BUG: Fix f2py directives and ``--lower`` casing +* `#28058 `__: MAINT: Update ``spin lint`` command +* `#28060 `__: CI: fix check that GIL remains disabled for free-threaded wheels +* `#28065 `__: TYP: fix unnecessarily broad ``integer`` binop return types +* `#28069 `__: MAINT: update NEP 29 +* `#28073 `__: TYP: use mypy_primer to surface type checking regressions +* `#28074 `__: DOC: clarify np.gradient varargs requirement for axis parameter +* `#28075 `__: MAINT: Replace usage of fixed strides with inner strides in einsum +* `#28080 `__: ENH: Allow an arbitrary number of operands in nditer +* `#28081 `__: DOC: Add release snippets for iteration changes +* `#28083 `__: MAINT: Update LICENSE Copyright to 2025 +* `#28088 `__: BUG: update check for highway compiler support +* `#28089 `__: MAINT: bump ``mypy`` to ``1.14.1`` +* `#28090 `__: DOC:Fixed docstring with example use of np.select +* `#28091 `__: MAINT: Refactor stringdtype casts.c to use cpp templates +* `#28092 `__: MAINT: LoongArch: switch away from the __loongarch64 preprocessor... +* `#28094 `__: DOC: Fix documentation example for numpy.ma.masked +* `#28100 `__: DOC: Move linalg.outer from Decompositions to Matrix and vector... +* `#28101 `__: DOC: Fix sphinx markup in source/reference/random/extending.rst +* `#28102 `__: MAINT: update oldest supported GCC version from 8.4 to 9.3 +* `#28103 `__: MAINT: random: Call np.import_array() in _examples/cython/extending_distribution... +* `#28105 `__: ENH: support no-copy pickling for any array that can be transposed... +* `#28108 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28109 `__: TYP: Fix the incorrect ``bool`` return type of ``issubdtype`` +* `#28110 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28116 `__: MAINT: random: Explicitly cast RAND_INT_MAX to double to avoid... +* `#28118 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28121 `__: MAINT: Correct NumPy 2.3 C-API versioning and version information +* `#28123 `__: BUG: move reduction initialization to ufunc initialization +* `#28127 `__: DOC: Improve slice docstrings +* `#28128 `__: BUG: Don't use C99 construct in import_array +* `#28129 `__: DEP: Deprecate ``numpy.typing.mypy_plugin`` +* `#28130 `__: CI: Fix mypy_primer comment workflow +* `#28133 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28134 `__: DEP: Deprecate ``numpy.typing.mypy_plugin``\ : The sequel +* `#28141 `__: DOC: Add instructions to build NumPy on WoA +* `#28142 `__: ENH: inline UTF-8 byte counter and make it branchless +* `#28144 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28148 `__: MAINT: Replace usage of outdated fixed strides with inner strides... +* `#28149 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28154 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28161 `__: DOC: Clarify ``np.loadtxt`` encoding argument default value in... +* `#28163 `__: MAINT: Avoid a redundant copy on ``a[...] = b`` +* `#28167 `__: DOC: fix formatting typo in basics.copies.rst +* `#28168 `__: TYP: Fix overlapping overloads issue in "2 in, 1 out" ufuncs +* `#28169 `__: TYP: preserve shape-type in ``ndarray.astype()`` +* `#28170 `__: TYP: Fix missing and spurious top-level exports +* `#28172 `__: BUG: Include Python-including headers first +* `#28179 `__: DOC: Remove duplicate wishlist tab in NEPs. +* `#28180 `__: DOC: Update links in HOWTO_RELEASE.rst +* `#28181 `__: CI: replace quansight-labs/setup-python with astral-sh/setup-uv +* `#28183 `__: MAINT: testing: specify python executable to use in extbuild +* `#28186 `__: MAINT: Update main after 2.2.2 release. +* `#28189 `__: MAINT, DOC: Add sphinx extension to allow svg images in PDF docs... +* `#28202 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28203 `__: BUG: fix data race in ``np.repeat`` +* `#28206 `__: BUG: Remove unnecessary copying and casting from out array in... +* `#28210 `__: corrected the numpy logo visibility issues on darkmode with the... +* `#28211 `__: MAINT: Hide decorator from pytest traceback +* `#28214 `__: ENH: add pkg_config entrypoint +* `#28219 `__: DOC: Add versionadded directive for axis argument in trim_zeros... +* `#28221 `__: BUG: allclose does not warn for invalid value encountered in... +* `#28222 `__: MAINT: Update highway to latest +* `#28223 `__: MAINT: Add [[maybe_unused] to silence some warnings +* `#28226 `__: DOC: Clarify ``__array__`` protocol arguments +* `#28228 `__: BUG: handle case when StringDType na_object is nan in float to... +* `#28229 `__: DOC: Fix a typo in doc/source/dev/development_workflow.rst +* `#28230 `__: DOC: FIx a link in Roadmap +* `#28231 `__: DOC: Fix external links in the navbar of neps webpage +* `#28232 `__: BUG: Fix float128 FPE handling on ARM64 with Clang compiler +* `#28234 `__: BUG: Add cpp atomic support +* `#28235 `__: MAINT: Compile fix for clang-cl on WoA +* `#28241 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28242 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28246 `__: BLD: better fix for clang / ARM compiles +* `#28250 `__: dtype.__repr__: prefer __name__ for user-defined types. +* `#28252 `__: test_casting_unittests.py: remove tuple +* `#28254 `__: MAINT: expire deprecations +* `#28258 `__: DOC: Change the scientific page link in NumPy/MATLAB +* `#28259 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28262 `__: TYP: expire deprecations +* `#28263 `__: ENH: Add ARM64 (aarch64) CI testing +* `#28264 `__: DOC: Remove an invalid link in f2py-examples.rst +* `#28270 `__: TYP: Fixed missing typing information of set_printoptions +* `#28273 `__: CI: update sanitizer CI to use python compiled with ASAN and... +* `#28276 `__: BUG: fix incorrect bytes to StringDType coercion +* `#28279 `__: TYP: Fix scalar constructors +* `#28280 `__: TYP: stub ``numpy.matlib`` +* `#28281 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28288 `__: DOC: Correct a typo in Intel License URL +* `#28290 `__: BUG: fix race initializing legacy dtype casts +* `#28291 `__: BUG: Prevent class-bound attr mutation in ``lib._iotools.NameValidator`` +* `#28294 `__: MAINT: Enable building tanh on vector length agnostic architectures +* `#28295 `__: TYP: stub ``numpy._globals`` +* `#28296 `__: TYP: stub ``numpy._expired_attrs_2_0`` +* `#28297 `__: TYP: stub ``numpy._configtool`` and ``numpy._distributor_init`` +* `#28298 `__: TYP: stub ``numpy.lib._iotools`` +* `#28299 `__: TYP: stub ``lib.user_array`` and ``lib._user_array_impl`` +* `#28300 `__: TYP: stub ``lib.introspect`` +* `#28301 `__: TYP: stub ``lib.recfunctions`` +* `#28302 `__: TYP: fix and improve ``numpy._core.arrayprint`` +* `#28303 `__: TYP: stub ``lib._datasource`` and fix ``lib._npyio_impl`` +* `#28304 `__: DOC: Remove reference to python2 +* `#28307 `__: MAINT: bump ``mypy`` to ``1.15.0`` +* `#28312 `__: DOC: remove references to Python 2 +* `#28319 `__: BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows +* `#28320 `__: MAINT: Update actions/cache and use hash. +* `#28323 `__: DOC: Correct a typo in Exception TooHardError +* `#28327 `__: TYP: fix positional- and keyword-only params in ``astype``\ ,... +* `#28328 `__: CI: Update FreeBSD base image in ``cirrus_arm.yml`` +* `#28330 `__: ENH: Ensure ``lib._format_impl.read_array`` handles file reading... +* `#28332 `__: BUG: avoid segfault in np._core.multiarray.scalar +* `#28335 `__: MAINT: Update main after 2.2.3 release. +* `#28336 `__: DOC: Update link to Anaconda Eclipse/PyDev documentation +* `#28338 `__: MAINT: use OpenBLAS 0.3.29 +* `#28339 `__: MAIN: Update c,c++ line length to 88 +* `#28343 `__: BUG: Fix ``linalg.norm`` to handle empty matrices correctly. +* `#28350 `__: DOC: fix typo +* `#28353 `__: DOC: Make numpy.fft a clickable link to module +* `#28355 `__: BUG: safer bincount casting +* `#28358 `__: MAINT: No need to check for check for FPEs in casts to/from object +* `#28359 `__: DOC: Make the first paragraph more concise in internals.rst +* `#28361 `__: BUG: Make np.nonzero threading safe +* `#28370 `__: DOC: Revise bullet point formatting in ``arrays.promotions.rst`` +* `#28382 `__: DOC: fix C API docs for ``PyArray_Size`` +* `#28383 `__: DOC: Added links to CTypes and CFFI in Numba +* `#28386 `__: MAINT: Extend the default ruff exclude files +* `#28387 `__: DOC: fix expected exception from StringDType without string coercion +* `#28390 `__: MAINT: speed up slow test under TSAN +* `#28391 `__: CI: use free-threaded build for ASAN tests +* `#28392 `__: CI: build Linux aarch64 wheels on GitHub Actions +* `#28393 `__: BUG: Fix building on s390x with clang +* `#28396 `__: BUG: Fix crackfortran parsing error when a division occurs within... +* `#28404 `__: MAINT: remove legacy ucsnarrow module +* `#28406 `__: BUG: Include Python.h first +* `#28407 `__: BUG: sanity check ``__array_interface__`` number of dimensions +* `#28408 `__: DOC: Update link to Nix in Cross Compilation +* `#28411 `__: CI: use QEMU 9.2.2 for Linux Qemu tests +* `#28413 `__: DOC: add scimath in np.lib submodules listing +* `#28414 `__: DOC: Add missing punctuation to the random sampling page +* `#28415 `__: BLD: update cibuildwheel and build PyPy 3.11 wheels [wheel build] +* `#28421 `__: BUG: skip legacy dtype multithreaded test on 32 bit runners +* `#28426 `__: BUG: Limit the maximal number of bins for automatic histogram... +* `#28427 `__: DOC: remove mention of Poly.nickname +* `#28431 `__: MAINT: PY_VERSION_HEX simplify +* `#28436 `__: BUILD: move to manylinux_2_28 wheel builds +* `#28437 `__: DOC: fix documentation for Flag checking functions and macros +* `#28442 `__: ENH: Check for floating point exceptions in dot +* `#28444 `__: DOC: fix URL redirects +* `#28447 `__: DOC: repositioned bitwise_count under bit-wise operations +* `#28451 `__: DOC: Add -avx512_spr to disable AVX512 in build options +* `#28452 `__: TYP: stub ``random._pickle`` +* `#28453 `__: BUG: Fix return type of NpyIter_GetIterNext in Cython declarations +* `#28455 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.3 to 2.0.4 +* `#28456 `__: MAINT: Bump actions/cache from 4.2.0 to 4.2.2 +* `#28458 `__: MAINT: Bump actions/upload-artifact from 4.5.0 to 4.6.1 +* `#28459 `__: MAINT: Bump github/codeql-action from 3.27.9 to 3.28.11 +* `#28460 `__: MAINT: Bump astral-sh/setup-uv from 5.2.1 to 5.3.1 +* `#28461 `__: MAINT: Update dependabot.yml file +* `#28462 `__: TYP: Add specializations to meshgrid stubs +* `#28464 `__: MAINT: Bump actions/setup-python from 5.3.0 to 5.4.0 +* `#28465 `__: MAINT: Bump ossf/scorecard-action from 2.4.0 to 2.4.1 +* `#28466 `__: MAINT: Bump actions/checkout from 4.1.1 to 4.2.2 +* `#28467 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.1.0 to 3.1.1 +* `#28468 `__: MAINT: Bump actions/download-artifact from 4.1.8 to 4.1.9 +* `#28473 `__: DOC: add typing badge to README +* `#28475 `__: MAINT: skip slow_pypy tests on pypy +* `#28477 `__: MAINT: fix typo in normal distribution functions docstrings +* `#28480 `__: ENH: Convert logical from C universal intrinsics to C++ using... +* `#28483 `__: DOC: only change tp_name on CPython +* `#28485 `__: MAINT: Bump actions/setup-python from 5.3.0 to 5.4.0 +* `#28488 `__: fix aarch64 CI run +* `#28489 `__: MAINT: Enable building loop_logical on vector length agnostic... +* `#28491 `__: TYP: fix typing errors in ``_core.shape_base`` +* `#28492 `__: TYP: fix typing errors in ``_core.strings`` +* `#28494 `__: TYP: fix typing errors in ``_core.records`` +* `#28495 `__: DOC: let docstring mention that unique_values is now unsorted +* `#28497 `__: TYP: don't use literals in shape-types +* `#28498 `__: TYP: accept non-integer shapes in array constructor without a... +* `#28499 `__: TYP: remove unneseccary cast +* `#28500 `__: TYP: stub ``numpy.random._bounded_integers`` +* `#28502 `__: TYP: stub ``numpy.random._common`` +* `#28503 `__: API: Always allow ``sorted=False`` and make a note about it +* `#28505 `__: TYP: stub ``numpy._core.umath`` +* `#28506 `__: TYP: fix typing errors in ``numpy.lib._arrayterator_impl`` +* `#28507 `__: MAINT: remove ``ma.timer_comparison`` +* `#28508 `__: TYP: fix signatures of ``ndarray.put`` and ``ndarray.view`` +* `#28509 `__: TYP: annotate the missing ``ufunc.resolve_dtypes`` method +* `#28511 `__: TYP: fix stubtest errors in ``numpy._core`` +* `#28513 `__: TYP: stub ``numpy._core.overrides`` +* `#28514 `__: TYP: stub ``numpy._utils`` +* `#28515 `__: TYP: stub ``numpy._core._dtype[_ctypes]`` +* `#28517 `__: TYP: stub the remaining ``numpy._core.\*`` modules +* `#28518 `__: TYP: stub the missing submodules of ``numpy.linalg`` +* `#28519 `__: TYP: stub ``numpy._pyinstaller`` +* `#28520 `__: TYP: stub ``numpy.fft.helper`` (deprecated) +* `#28522 `__: TYP: stub ``numpy.f2py`` +* `#28523 `__: TYP: annotate the missing deprecated ``row_stack`` function +* `#28524 `__: CI, TST: Bump to cibuildwheel 2.23 (Pyodide 0.27.0) for WASM... +* `#28525 `__: TYP: fix stubtest errors in ``numpy.dtype`` and ``numpy.dtypes.\*`` +* `#28526 `__: TYP: fix stubtest errors in ``timedelta64`` and ``object_`` +* `#28527 `__: TYP: fix stubtest errors in ``numpy.lib._function_base_impl`` +* `#28528 `__: TYP: fix stubtest errors in ``numpy.lib._arraysetops_impl`` +* `#28529 `__: TYP: fix stubtest errors in ``numpy.lib._index_tricks_impl`` +* `#28530 `__: TYP: fix stubtest errors in ``numpy.lib._twodim_base_impl`` +* `#28531 `__: ENH: Add Cygwin extensions to list to copy to CWD in f2py meson... +* `#28532 `__: DOC: minor editorial change +* `#28535 `__: TYP: fix stubtest errors in ``numpy._core`` +* `#28536 `__: TYP: fix stubtest errors in ``numpy._globals`` +* `#28537 `__: TYP: fix stubtest errors in ``numpy.mat[rix]lib`` +* `#28538 `__: TYP: fix stubtest errors in ``numpy.random`` +* `#28539 `__: TYP: fix stubtest errors in ``numpy.testing`` +* `#28540 `__: TYP: fix typing errors in ``numpy.ndarray`` +* `#28541 `__: TYP: fix stubtest error in ``numpy.ma`` +* `#28546 `__: MAINT: Update main after NumPy 2.2.4 release. +* `#28547 `__: MAINT: Bump pypa/cibuildwheel from 2.23.0 to 2.23.1 +* `#28555 `__: MAINT: Bump actions/download-artifact from 4.1.9 to 4.2.0 +* `#28556 `__: NEP 54: Change status to Accepted +* `#28560 `__: MAINT: Bump actions/download-artifact from 4.2.0 to 4.2.1 +* `#28561 `__: MAINT: Bump github/codeql-action from 3.28.11 to 3.28.12 +* `#28562 `__: MAINT: Bump actions/upload-artifact from 4.6.1 to 4.6.2 +* `#28563 `__: MAINT: Bump actions/cache from 4.2.2 to 4.2.3 +* `#28568 `__: MAINT: Bump astral-sh/setup-uv from 5.3.1 to 5.4.0 +* `#28569 `__: Fixing various spelling errors +* `#28571 `__: BLD: use ``manylinux_2_28:2025.03.23-1`` [wheel build] +* `#28576 `__: API,ENH: Allow forcing an array result in ufuncs +* `#28577 `__: BUG: avoid deadlocks with C++ shared mutex in dispatch cache +* `#28581 `__: MAINT: Bump github/codeql-action from 3.28.12 to 3.28.13 +* `#28586 `__: MAINT: Bump pypa/cibuildwheel from 2.23.1 to 2.23.2 +* `#28587 `__: MAINT: Bump actions/setup-python from 5.4.0 to 5.5.0 +* `#28591 `__: TYP: Type masked array shape, dtype, __int__, and __float__ +* `#28593 `__: TYP: Type ``numpy.ma.min`` +* `#28600 `__: BUG: Set writeable flag for writeable dlpacks. +* `#28601 `__: MAINT: Bump scientific-python/upload-nightly-action from 0.6.1... +* `#28607 `__: CI: fix cirrus config [wheel build] +* `#28611 `__: MAINT: Bump astral-sh/setup-uv from 5.4.0 to 5.4.1 +* `#28612 `__: TYP: Type ``ma.max`` and ``ma.ptp`` +* `#28615 `__: ENH: Upgrade Array API version to 2024.12 +* `#28616 `__: TYP: Type ``ma.MaskedArray.min`` +* `#28617 `__: MAINT: Bump actions/dependency-review-action from 4.5.0 to 4.6.0 +* `#28618 `__: MAINT, CI: Update Ubuntu to 22.04 in azure-pipelines +* `#28619 `__: ENH: Use openmp on x86-simd-sort to speed up np.sort and np.argsort +* `#28621 `__: DOC: Fix typo in ``numpy/typing/__init__.py`` +* `#28623 `__: TYP: Type ``ma.MaskedArray.max`` and ``ma.MaskedArray.ptp`` +* `#28624 `__: BUG: fix ``np.vectorize`` for object dtype +* `#28626 `__: DOC: update array API standard version in compatibility page +* `#28627 `__: MAINT: replace string.format() with f-strings +* `#28635 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28637 `__: TYP: Fix overload for ``ma.MaskedArray.{min,max,ptp}`` and ``ma.{min,max,ptp}`` ... +* `#28638 `__: TYP: Type ``MaskedArray.{argmin, argmax}`` and ``np.ma.{argmin,``... +* `#28643 `__: BUG: fix deepcopying StringDType arrays +* `#28644 `__: TYP: fix ``ndarray.tolist()`` and ``.item()`` for unknown dtype +* `#28645 `__: DOC: fixes classes decorated with set_module not showing its... +* `#28647 `__: DOC: Fix typos found by codespell +* `#28649 `__: ENH: Improve np.linalg.det performance +* `#28653 `__: CI: Replace QEMU armhf with native (32-bit compatibility mode) +* `#28657 `__: TYP: simplified type-aliases in ``numpy._typing`` +* `#28660 `__: TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` +* `#28662 `__: MAINT: Remove distutils CPU dispatcher compatibility code +* `#28664 `__: TYP: Type ``MaskedArray.sort`` +* `#28666 `__: MAINT: Bump github/codeql-action from 3.28.13 to 3.28.14 +* `#28667 `__: TYP: replace ``_ScalarType`` with ``_SCT`` +* `#28668 `__: TYP: replace ``_ArrayType`` with ``_ArrayT`` +* `#28669 `__: TYP: default to ``dtype[Any]`` +* `#28671 `__: SIMD: Fix Highway QSort symbol linking error on aarch32/ASIMD +* `#28672 `__: MAINT: Bump github/codeql-action from 3.28.14 to 3.28.15 +* `#28674 `__: TYP: add missing ``"b1"`` literals for ``dtype[bool]`` +* `#28675 `__: TYP: fix and improve ``numpy.lib._type_check_impl`` +* `#28676 `__: TYP: fix mypy test failures +* `#28677 `__: TYP: Type ``MaskedArray.partition`` and ``MaskedArray.argpartition`` +* `#28678 `__: DEP: Deprecate ``.T`` property for non-2dim arrays and scalars +* `#28680 `__: TYP: Type ``MaskedArray.take`` and ``np.ma.take`` +* `#28684 `__: TYP: replace ``_DType`` with ``_DTypeT`` +* `#28688 `__: TYP: rename ``_ShapeType`` TypeVar to ``_ShapeT`` +* `#28689 `__: TYP: Type ``MaskedArray.__{ge,gt,le,lt}__`` +* `#28690 `__: TYP: replace ``_SCT`` with ``_ScalarT`` +* `#28693 `__: BLD: fix meson_version warning +* `#28695 `__: DOC: linalg.matrix_transpose: add alias note +* `#28699 `__: TYP: Fix false rejection of ``NDArray[object_].__abs__()`` +* `#28702 `__: TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return... +* `#28703 `__: MAINT: Improve float16 and float32 printing +* `#28710 `__: ENH: Improve performance for np.result_type +* `#28712 `__: MAINT: ``%i`` → ``%d`` +* `#28715 `__: TYP: Type ``np.ma.{is_masked,ndim,size,ids,iscontiguous}`` +* `#28717 `__: TYP: fix string-like ``ndarray`` rich comparison operators +* `#28719 `__: MAINT: switching from ``%i`` to ``fstrings`` +* `#28720 `__: TYP: drop py310 support +* `#28724 `__: STY: Apply assorted ruff rules (RUF) +* `#28725 `__: STY: Enforce ruff/pycodestyle warnings (W) +* `#28726 `__: STY: Apply assorted ruff/refurb rules (FURB) +* `#28728 `__: STY: Apply assorted ruff/pyupgrade rules (UP) +* `#28731 `__: BUG: Prevent nanmax/nanmin from copying memmap arrays +* `#28733 `__: TYP: remove ``_typing._UnknownType`` and ``_ArrayLikeUnknown`` +* `#28735 `__: TYP: Type ``MaskedArray.count`` and ``np.ma.count`` +* `#28738 `__: TYP: fix incorrect ``random.Generator.integers`` return type +* `#28739 `__: MNT: get rid of references to Python 3.10 +* `#28740 `__: MAINT: Bump astral-sh/setup-uv from 5.4.1 to 5.4.2 +* `#28741 `__: BUG: Re-enable overriding functions in the ``np.strings`` module. +* `#28742 `__: TYP: Type ``MaskedArray.filled`` and ``np.ma.filled`` +* `#28743 `__: MNT: Enforce ruff/pygrep-hooks rules (PGH) +* `#28744 `__: STY: Apply more ruff rules (RUF) +* `#28745 `__: TYP: Type ``MaskedArray.put``\ , ``np.ma.put``\ , ``np.ma.putmask`` +* `#28746 `__: TYP: ``numpy.ma`` squiggly line cleanup +* `#28747 `__: TYP: some ``[arg]partition`` fixes +* `#28748 `__: ENH: Support Python 3.14 +* `#28750 `__: TYP: fix ``count_nonzero`` signature +* `#28751 `__: MNT: discard Python 2 leftover +* `#28752 `__: MNT: Apply ruff/Pylint rule PLW0129 (assertions that never fail) +* `#28754 `__: MNT: Enforce ruff/Pylint Error rules (PLE) +* `#28755 `__: MNT: Apply assorted ruff/Pylint Refactor rules (PLR) +* `#28756 `__: MNT: Apply assorted ruff/Pylint Warning rules (PLW) +* `#28757 `__: BUG: Fix AVX512_SPR dispatching for SVML half-precision operations +* `#28760 `__: STY: Apply ruff/pyupgrade rule UP032 +* `#28763 `__: STY: Use f-string instead of ``format`` call +* `#28764 `__: MNT: Enforce ruff rules: Flynt (FLY) and flake8-pie (PIE) +* `#28765 `__: MNT: Enforce ruff/flake8-bugbear rules (B) +* `#28766 `__: TYP: Type ``MaskedArray.compressed`` and ``np.ma.compressed`` +* `#28768 `__: MAINT: getting rid of old ``%`` and ``.format(...)`` strings... +* `#28769 `__: ENH: Improve Floating Point Cast Performance on ARM +* `#28770 `__: MNT: Enforce ruff/pyupgrade rules (UP) +* `#28771 `__: ENH: Include offset in error message when fallocate() fails +* `#28775 `__: STY: Partially apply ruff/pycodestyle rules (E) +* `#28779 `__: MAINT: Update main after Numpy 2.2.5 release +* `#28789 `__: BUG: Re-enable GCC function-specific optimization attributes +* `#28793 `__: TYP: Type ``np.ma.allclose`` and ``np.ma.allequal`` +* `#28798 `__: TST: skip test if spawning threads triggers a RuntimeError +* `#28803 `__: MAINT: Bump github/codeql-action from 3.28.15 to 3.28.16 +* `#28804 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28806 `__: BUG: Fix `` __array__(None)`` to preserve dtype +* `#28807 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28808 `__: CI: Make clang_TSAN CI job use cpython_sanity docker image +* `#28809 `__: TYP: write ``dtype[Any]`` as ``dtype`` +* `#28810 `__: TYP: replace ``_Self`` type parameters with ``typing.Self`` +* `#28811 `__: TYP: remove unnecessary scalar-type ``Any`` type-args +* `#28816 `__: MAINT: Bump actions/setup-python from 5.5.0 to 5.6.0 +* `#28817 `__: MAINT: Bump astral-sh/setup-uv from 5.4.2 to 6.0.0 +* `#28818 `__: MAINT: Bump actions/download-artifact from 4.2.1 to 4.3.0 +* `#28819 `__: TYP: simplify redundant unions of builtin scalar types +* `#28820 `__: TYP: ``None`` at the end of a union +* `#28821 `__: BUG: Use unrotated companion matrix in polynomial.polyroots. +* `#28831 `__: TYP: Fix type annotations for ``np.ma.nomask`` and ``np.ma.MaskType`` +* `#28832 `__: TYP: Type ``np.ma.getmask`` +* `#28833 `__: TYP: Type ``np.ma.is_mask`` +* `#28836 `__: ENH: Provide Windows 11 ARM64 wheels (#22530) +* `#28841 `__: BUG: Fix Clang warning in loops_half.dispatch.c.src +* `#28845 `__: TYP: Type ``MaskedArray.nonzero`` +* `#28847 `__: TYP: Use _Array1D alias in ``numpy.ma.core.pyi`` +* `#28848 `__: TYP: Type ``MaskedArray.ravel`` +* `#28849 `__: TYP: Type ``MaskedArray.repeat``\ , improve overloads for ``NDArray.repeat``\... +* `#28850 `__: TYP: Type ``MaskedArray.swapaxes`` +* `#28854 `__: MAINT: Bump pypa/cibuildwheel from 2.23.2 to 2.23.3 +* `#28855 `__: TYP: add missing ``mod`` params to ``__[r]pow__`` +* `#28856 `__: TYP: generic ``StringDType`` +* `#28857 `__: TYP: implicit ``linalg`` private submodule re-exports +* `#28858 `__: TYP: fix the ``set_module`` signature +* `#28859 `__: DOC: Replace http:// with https:// +* `#28860 `__: BLD: update vendored Meson: v1.6.1 and iOS support +* `#28862 `__: BUG: fix stringdtype singleton thread safety +* `#28863 `__: TYP: Improve consistency of (masked) array typing aliases +* `#28867 `__: TYP: Type ``MaskedArray.{__setmask__,mask,harden_mask,soften_mask,hardmask,unsha``... +* `#28868 `__: TYP: Type ``MaskedArray.{imag, real, baseclass, mT}`` +* `#28869 `__: MAINT: Bump astral-sh/setup-uv from 6.0.0 to 6.0.1 +* `#28870 `__: MNT: retire old script for SVN repositories +* `#28871 `__: MNT: retire script superseded by ruff rule W605 +* `#28872 `__: DOC: consistent and updated LICENSE files for wheels +* `#28874 `__: DOC: ``numpy.i`` will not be included as part of SWIG +* `#28876 `__: MNT: discard unused function using os.system() +* `#28877 `__: DOC: update content of cross compilation build docs +* `#28878 `__: STY: Enforce more ruff rules +* `#28879 `__: STY: Apply assorted ruff/refurb rules (FURB) +* `#28880 `__: TYP: Type ``MaskedArray.all`` and ``MaskedArray.any`` +* `#28882 `__: MAINT: address warning in SWIG tests +* `#28883 `__: MAINT: from_dlpack thread safety fixes +* `#28884 `__: DEP: deprecate ``numpy.typing.NBitBase`` +* `#28887 `__: MAINT: Bump github/codeql-action from 3.28.16 to 3.28.17 +* `#28888 `__: DOC: math mode x to \times in docstring for numpy.linalg.multi_dot +* `#28892 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28893 `__: TYP: remove non-existent extended-precision scalar types +* `#28898 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28904 `__: BLD: update vendored Meson to include iOS fix +* `#28905 `__: TYP: Test ``MaskedArray.transpose`` and ``MaskedArray.T``\ ,... +* `#28906 `__: TYP: np.argmin and np.argmax overload changes +* `#28908 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28912 `__: TYP: add ``float64`` overloads to ``{lin,log,geom}space`` +* `#28918 `__: DOC: Fixes absent line numbers on link to classes decorated with... +* `#28923 `__: BUG: Use string conversion defined on dtype for .str +* `#28927 `__: MAINT: Remove outdated ``MaskedArray.__div__`` and ``MaskedArray.__idiv__`` +* `#28928 `__: MNT: add support for 3.14.0b1 +* `#28929 `__: MAINT: remove py2 ``__div__`` methods from ``poly1d`` and ``ABCPolyBase`` +* `#28930 `__: MAINT: remove py2 ``__div__`` remnants from the tests +* `#28931 `__: MAINT: remove py2 ``__div__`` methods from ``lib.user_array.container`` +* `#28932 `__: MAINT: remove references to 256-bits extended precision types +* `#28933 `__: MAINT: Use consistent naming for ``numpy/typing/tests/data/fail/ma.pyi`` +* `#28934 `__: TYP, TST: improved type-testing +* `#28935 `__: MAINT: Enable ruff E251 +* `#28936 `__: TST: Prevent import error when tests are not included in the... +* `#28937 `__: CI: fix TSAN CI by using a different docker image +* `#28938 `__: MNT: clean up free-threaded CI configuration +* `#28939 `__: MAINT: Bump actions/dependency-review-action from 4.6.0 to 4.7.0 +* `#28940 `__: TYP: optional type parameters for ``ndarray`` and ``flatiter`` +* `#28941 `__: DOC: Fix titles in ``development_ghcodespaces.rst`` +* `#28945 `__: MAINT: Enable linting with ruff E501 +* `#28952 `__: MAINT: Bump actions/dependency-review-action from 4.7.0 to 4.7.1 +* `#28954 `__: MAINT: Enable linting with ruff E501 for numpy._core +* `#28956 `__: DOC: Remove references to Python 2/3 +* `#28958 `__: TYP: reject complex scalar types in ``ndarray.__ifloordiv__`` +* `#28959 `__: TYP: remove redundant ``ndarray`` inplace operator overloads +* `#28960 `__: TYP: fix mypy & pyright errors in ``np.matrix`` +* `#28961 `__: DEP: finalize removal of ``numpy.compat`` +* `#28962 `__: TYP: type-testing without the mypy plugin +* `#28963 `__: MAINT: Update ruff to 0.11.9 in linting requirements +* `#28969 `__: MNT: Enforce ruff/isort rules (I) +* `#28971 `__: MAINT: Enable linting with ruff E501 +* `#28972 `__: MNT: Get rif of ``# pylint: `` pragma controls +* `#28974 `__: MNT: Get rid of ``version: $Id`` CVS tags +* `#28975 `__: MNT: import numpy as np +* `#28976 `__: MNT: Get rid of Pyflakes / flake8 +* `#28977 `__: MNT: Enforce ruff/flake8-implicit-str-concat rules (ISC) +* `#28978 `__: MNT: Enforce ruff/pandas-vet rules (PD) +* `#28981 `__: STY: reformat the ``_typing`` imports without trailing commas +* `#28982 `__: TYP: Gradual shape type defaults +* `#28984 `__: MNT: Use isinstance() instead of comparing type() +* `#28986 `__: TYP: Type ``MaskedArray.__{iadd,isub,imul,itruediv,ifloordiv,ipow}__`` +* `#28987 `__: MNT: Align ruff pin between ``requirements/linter_requirements.txt``... +* `#28988 `__: TYP: add missing ``ndarray.__{add,mul}__`` ``character`` type... +* `#28989 `__: MAINT: Bump github/codeql-action from 3.28.17 to 3.28.18 +* `#28990 `__: Revert "DEP: Deprecate ``.T`` property for non-2dim arrays and... +* `#28993 `__: MAINT: update NPY_FEATURE_VERSION after dropping python 3.10 +* `#28994 `__: TYP: allow inplace division of ``NDArray[timedelta64]`` by floats +* `#28995 `__: TYP: remove ``from __future__ import annotations`` +* `#28998 `__: MAINT: Update main after 2.2.6 release. +* `#29002 `__: MAINT: Update download-wheels for multiple pages +* `#29006 `__: ENH: Disable the alloc cache under address and memory sanitizers +* `#29008 `__: MNT: fix CI issues on main +* `#29018 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#29033 `__: BUG: Fix workflow error +* `#29042 `__: MNT: constant string arrays instead of pointers in C +* `#29043 `__: BUG: Avoid compile errors in f2py modules +* `#29044 `__: BUG: Fix f2py derived types in modules +* `#29046 `__: BUG: Fix cache use regression +* `#29047 `__: REL: Prepare for the NumPy 2.3.0rc1 release [wheel build] +* `#29070 `__: TYP: Various typing fixes. +* `#29072 `__: MAINT: use pypy 3.11 nightly which has a fix for ctypeslib +* `#29073 `__: BLD: use sonoma image on Cirrus for wheel build +* `#29074 `__: BUG: add bounds-checking to in-place string multiply +* `#29082 `__: BLD: bump OpenBLAS version, use OpenBLAS for win-arm64 [wheel... +* `#29089 `__: MNT: Avoid use of deprecated _PyDict_GetItemStringWithError in... +* `#29099 `__: BUG: f2py: thread-safe forcomb (#29091) +* `#29100 `__: TYP: fix NDArray[integer] inplace operator mypy issue +* `#29101 `__: PERF: Make NpzFile member existence constant time +* `#29116 `__: MAINT: Update to vs2022 in NumPy 2.3.x [wheel build] +* `#29118 `__: MAINT: fix SPDX license expressions for LAPACK, GCC runtime libs +* `#29132 `__: MAINT: Fix for segfaults with GCC 15 + diff --git a/doc/conftest.py b/doc/conftest.py index 176759c1816b..99d6797d8a06 100644 --- a/doc/conftest.py +++ b/doc/conftest.py @@ -1,10 +1,12 @@ """ Pytest configuration and fixtures for the Numpy test suite. """ +import doctest + +import matplotlib import pytest + import numpy -import matplotlib -import doctest matplotlib.use('agg', force=True) diff --git a/doc/neps/conf.py b/doc/neps/conf.py index 343c32681c91..33faaf17ff64 100644 --- a/doc/neps/conf.py +++ b/doc/neps/conf.py @@ -16,6 +16,7 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. # from datetime import datetime + # import sys # sys.path.insert(0, os.path.abspath('.')) diff --git a/doc/neps/nep-0016-benchmark.py b/doc/neps/nep-0016-benchmark.py index 58b5164ab3df..e3783baa2de5 100644 --- a/doc/neps/nep-0016-benchmark.py +++ b/doc/neps/nep-0016-benchmark.py @@ -1,14 +1,17 @@ -import perf import abc + +import perf + import numpy as np + class NotArray: pass class AttrArray: __array_implementer__ = True -class ArrayBase(abc.ABC): +class ArrayBase(abc.ABC): # noqa: B024 pass class ABCArray1(ArrayBase): diff --git a/doc/neps/nep-0021-advanced-indexing.rst b/doc/neps/nep-0021-advanced-indexing.rst index badd41875af2..11ef238d6179 100644 --- a/doc/neps/nep-0021-advanced-indexing.rst +++ b/doc/neps/nep-0021-advanced-indexing.rst @@ -123,7 +123,7 @@ with shape ``(1,)``, not a 2D sub-matrix with shape ``(1, 1)``. Mixed indexing seems so tricky that it is tempting to say that it never should be used. However, it is not easy to avoid, because NumPy implicitly adds full slices if there are fewer indices than the full dimensionality of the indexed -array. This means that indexing a 2D array like `x[[0, 1]]`` is equivalent to +array. This means that indexing a 2D array like ``x[[0, 1]]`` is equivalent to ``x[[0, 1], :]``. These cases are not surprising, but they constrain the behavior of mixed indexing. @@ -236,7 +236,7 @@ be deduced: For the beginning, this probably means cases where ``arr[ind]`` and ``arr.oindex[ind]`` return different results give deprecation warnings. This includes every use of vectorized indexing with multiple integer arrays. - Due to the transposing behaviour, this means that``arr[0, :, index_arr]`` + Due to the transposing behaviour, this means that ``arr[0, :, index_arr]`` will be deprecated, but ``arr[:, 0, index_arr]`` will not for the time being. 7. To ensure that existing subclasses of `ndarray` that override indexing @@ -285,7 +285,7 @@ Open Questions Copying always "fixes" this possible inconsistency. * The final state to morph plain indexing in is not fixed in this PEP. - It is for example possible that `arr[index]`` will be equivalent to + It is for example possible that ``arr[index]`` will be equivalent to ``arr.oindex`` at some point in the future. Since such a change will take years, it seems unnecessary to make specific decisions at this time. diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst index 7fb8c9734900..4a3d268697a2 100644 --- a/doc/neps/nep-0030-duck-array-protocol.rst +++ b/doc/neps/nep-0030-duck-array-protocol.rst @@ -102,14 +102,14 @@ a complete implementation would look like the following: The implementation above exemplifies the simplest case, but the overall idea is that libraries will implement a ``__duckarray__`` method that returns the original object, and an ``__array__`` method that either creates and returns an -appropriate NumPy array, or raises a``TypeError`` to prevent unintentional use +appropriate NumPy array, or raises a ``TypeError`` to prevent unintentional use as an object in a NumPy array (if ``np.asarray`` is called on an arbitrary object that does not implement ``__array__``, it will create a NumPy array scalar). In case of existing libraries that don't already implement ``__array__`` but would like to use duck array typing, it is advised that they introduce -both ``__array__`` and``__duckarray__`` methods. +both ``__array__`` and ``__duckarray__`` methods. Usage ----- diff --git a/doc/neps/tools/build_index.py b/doc/neps/tools/build_index.py index a3a597066040..f727f0b0cc81 100644 --- a/doc/neps/tools/build_index.py +++ b/doc/neps/tools/build_index.py @@ -4,11 +4,12 @@ categories. """ -import os -import jinja2 import glob +import os import re +import jinja2 + def render(tpl_path, context): path, filename = os.path.split(tpl_path) diff --git a/doc/preprocess.py b/doc/preprocess.py index 8539487ab185..b2e64ab6393a 100755 --- a/doc/preprocess.py +++ b/doc/preprocess.py @@ -2,6 +2,7 @@ import os from string import Template + def main(): doxy_gen(os.path.abspath(os.path.join('..'))) diff --git a/doc/release/upcoming_changes/26018.change.rst b/doc/release/upcoming_changes/26018.change.rst deleted file mode 100644 index 9d7c139be183..000000000000 --- a/doc/release/upcoming_changes/26018.change.rst +++ /dev/null @@ -1,7 +0,0 @@ -``unique_values`` may return unsorted data ------------------------------------------- -The relatively new function (added in NumPy 2.0) ``unique_values`` may now -return unsorted results. Just as ``unique_counts`` and ``unique_all`` -these never guaranteed a sorted result, however, the result -was sorted until now. In cases where these do return a sorted result, this -may change in future releases to improve performance. diff --git a/doc/release/upcoming_changes/26018.performance.rst b/doc/release/upcoming_changes/26018.performance.rst deleted file mode 100644 index ffeab51dbdf6..000000000000 --- a/doc/release/upcoming_changes/26018.performance.rst +++ /dev/null @@ -1,7 +0,0 @@ -Performance improvements to ``np.unique`` ------------------------------------------ -``np.unique`` now tries to use a hash table to find unique values instead of sorting -values before finding unique values. This is limited to certain dtypes for now, and -the function is now faster for those dtypes. The function now also exposes a ``sorted`` -parameter to allow returning unique values as they were found, instead of sorting them -afterwards. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26745.highlight.rst b/doc/release/upcoming_changes/26745.highlight.rst deleted file mode 100644 index 5636f919c80d..000000000000 --- a/doc/release/upcoming_changes/26745.highlight.rst +++ /dev/null @@ -1,10 +0,0 @@ -Interactive examples in the NumPy documentation ------------------------------------------------ - -The NumPy documentation includes a number of examples that -can now be run interactively in your browser using WebAssembly -and Pyodide. - -Please note that the examples are currently experimental in -nature and may not work as expected for all methods in the -public API. diff --git a/doc/release/upcoming_changes/27288.improvement.rst b/doc/release/upcoming_changes/27288.improvement.rst deleted file mode 100644 index c7319554c63f..000000000000 --- a/doc/release/upcoming_changes/27288.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Scalar comparisons between non-comparable dtypes such as - `np.array(1) == np.array('s')` now return a NumPy bool instead of - a Python bool. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27789.new_function.rst b/doc/release/upcoming_changes/27789.new_function.rst deleted file mode 100644 index 734a0c3bc2b5..000000000000 --- a/doc/release/upcoming_changes/27789.new_function.rst +++ /dev/null @@ -1,5 +0,0 @@ -New function `numpy.strings.slice` ----------------------------------- -The new function `numpy.strings.slice` was added, which implements fast -native slicing of string arrays. It supports the full slicing API including -negative slice offsets and steps. \ No newline at end of file diff --git a/doc/release/upcoming_changes/27883.c_api.rst b/doc/release/upcoming_changes/27883.c_api.rst deleted file mode 100644 index 107e0036c5c2..000000000000 --- a/doc/release/upcoming_changes/27883.c_api.rst +++ /dev/null @@ -1,4 +0,0 @@ -* `NpyIter_GetTransferFlags` is now available to check if - the iterator needs the Python API or if casts may cause floating point - errors (FPE). FPEs can for example be set when casting ``float64(1e300)`` - to ``float32`` (overflow to infinity) or a NaN to an integer (invalid value). \ No newline at end of file diff --git a/doc/release/upcoming_changes/27883.change.rst b/doc/release/upcoming_changes/27883.change.rst deleted file mode 100644 index ea68771efba3..000000000000 --- a/doc/release/upcoming_changes/27883.change.rst +++ /dev/null @@ -1,17 +0,0 @@ -Changes to the main iterator and potential numerical changes ------------------------------------------------------------- -The main iterator, used in math functions and via ``np.nditer`` from Python -and ``NpyIter`` in C, now behaves differently for some buffered iterations. -This means that: - -* The buffer size used will often be smaller than the maximum buffer sized - allowed by the ``buffersize`` parameter. -* The "growinner" flag is now honored with buffered reductions when no operand - requires buffering. - -For ``np.sum()`` such changes in buffersize may slightly change numerical -results of floating point operations. -Users who use "growinner" for custom reductions could notice -changes in precision (for example, in NumPy we removed it from -``einsum`` to avoid most precision changes and improve precision -for some 64bit floating point inputs). diff --git a/doc/release/upcoming_changes/27998.c_api.rst b/doc/release/upcoming_changes/27998.c_api.rst deleted file mode 100644 index edc6371af1f9..000000000000 --- a/doc/release/upcoming_changes/27998.c_api.rst +++ /dev/null @@ -1,10 +0,0 @@ -New `NpyIter_GetTransferFlags` and ``NpyIter_IterationNeedsAPI`` change ------------------------------------------------------------------------ -NumPy now has the new `NpyIter_GetTransferFlags` function as a more precise -way checking of iterator/buffering needs. I.e. whether the Python API/GIL is -required or floating point errors may occur. -This function is also faster if you already know your needs without buffering. - -The ``NpyIter_IterationNeedsAPI`` function now performs all the checks that were -previously performed at setup time. While it was never necessary to call it -multiple times, doing so will now have a larger cost. diff --git a/doc/release/upcoming_changes/28080.c_api.rst b/doc/release/upcoming_changes/28080.c_api.rst deleted file mode 100644 index f72be7ef52fe..000000000000 --- a/doc/release/upcoming_changes/28080.c_api.rst +++ /dev/null @@ -1 +0,0 @@ -* ``NpyIter`` now has no limit on the number of operands it supports. diff --git a/doc/release/upcoming_changes/28080.improvement.rst b/doc/release/upcoming_changes/28080.improvement.rst deleted file mode 100644 index 19b85ae3c96a..000000000000 --- a/doc/release/upcoming_changes/28080.improvement.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``np.nditer`` now has no limit on the number of supported operands - (C-integer). diff --git a/doc/release/upcoming_changes/28102.change.rst b/doc/release/upcoming_changes/28102.change.rst deleted file mode 100644 index bd54378a652e..000000000000 --- a/doc/release/upcoming_changes/28102.change.rst +++ /dev/null @@ -1,6 +0,0 @@ -The minimum supported GCC version is now 9.3.0 ----------------------------------------------- -The minimum supported version was updated from 8.4.0 to 9.3.0, -primarily in order to reduce the chance of platform-specific bugs in old GCC -versions from causing issues. - diff --git a/doc/release/upcoming_changes/28129.deprecation.rst b/doc/release/upcoming_changes/28129.deprecation.rst deleted file mode 100644 index b1beb0c5cca3..000000000000 --- a/doc/release/upcoming_changes/28129.deprecation.rst +++ /dev/null @@ -1,4 +0,0 @@ -* The ``numpy.typing.mypy_plugin`` has been deprecated in favor of platform-agnostic - static type inference. Please remove ``numpy.typing.mypy_plugin`` from the ``plugins`` - section of your mypy configuration. If this change results in new errors being - reported, kindly open an issue. diff --git a/doc/release/upcoming_changes/28214.new_feature.rst b/doc/release/upcoming_changes/28214.new_feature.rst deleted file mode 100644 index eb95a0739e79..000000000000 --- a/doc/release/upcoming_changes/28214.new_feature.rst +++ /dev/null @@ -1,23 +0,0 @@ -NumPy now registers its pkg-config paths with the pkgconf_ PyPI package ------------------------------------------------------------------------ - -The pkgconf_ PyPI package provides an interface for projects like NumPy to -register their own paths to be added to the pkg-config search path. This means -that when using pkgconf_ from PyPI, NumPy will be discoverable without needing -for any custom environment configuration. - -.. attention:: Attention - - This only applies when using the pkgconf_ package from PyPI_, or put another - way, this only applies when installing pkgconf_ via a Python package - manager. - - If you are using ``pkg-config`` or ``pkgconf`` provided by your system, or - any other source that does not use the pkgconf-pypi_ project, the NumPy - pkg-config directory will not be automatically added to the search path. In - these situations, you might want to use ``numpy-config``. - - -.. _pkgconf: https://github.com/pypackaging-native/pkgconf-pypi -.. _PyPI: https://pypi.org/ -.. _pkgconf-pypi: https://github.com/pypackaging-native/pkgconf-pypi diff --git a/doc/release/upcoming_changes/28250.improvement.rst b/doc/release/upcoming_changes/28250.improvement.rst deleted file mode 100644 index 703a8bb0c2e1..000000000000 --- a/doc/release/upcoming_changes/28250.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* The ``__repr__`` for user-defined dtypes now prefers the ``__name__`` of the - custom dtype over a more generic name constructed from its ``kind`` and - ``itemsize``. diff --git a/doc/release/upcoming_changes/28254.expired.rst b/doc/release/upcoming_changes/28254.expired.rst deleted file mode 100644 index 5f391eb6cbe2..000000000000 --- a/doc/release/upcoming_changes/28254.expired.rst +++ /dev/null @@ -1,29 +0,0 @@ -* Remove deprecated macros like ``NPY_OWNDATA`` from cython interfaces in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) -* Remove ``numpy/npy_1_7_deprecated_api.h`` and C macros like ``NPY_OWNDATA`` in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) -* Remove alias ``generate_divbyzero_error`` to ``npy_set_floatstatus_divbyzero`` and ``generate_overflow_error`` to ``npy_set_floatstatus_overflow`` (deprecated since 1.10) -* Remove ``np.tostring`` (deprecated since 1.19) -* Raise on ``np.conjugate`` of non-numeric types (deprecated since 1.13) -* Raise when using ``np.bincount(...minlength=None)``, use 0 instead (deprecated since 1.14) -* Passing ``shape=None`` to functions with a non-optional shape argument errors, use ``()`` instead (deprecated since 1.20) -* Inexact matches for ``mode`` and ``searchside`` raise (deprecated since 1.20) -* Setting ``__array_finalize__ = None`` errors (deprecated since 1.23) -* ``np.fromfile`` and ``np.fromstring`` error on bad data, previously they would guess (deprecated since 1.18) -* ``datetime64`` and ``timedelta64`` construction with a tuple no longer accepts an ``event`` value, either use a two-tuple of (unit, num) or a 4-tuple of (unit, num, den, 1) (deprecated since 1.14) -* When constructing a ``dtype`` from a class with a ``dtype`` attribute, that attribute must be a dtype-instance rather than a thing that can be parsed as a dtype instance (deprecated in 1.19). At some point the whole construct of using a dtype attribute will be deprecated (see #25306) -* Passing booleans as partition index errors (deprecated since 1.23) -* Out-of-bounds indexes error even on empty arrays (deprecated since 1.20) -* ``np.tostring`` has been removed, use ``tobytes`` instead (deprecated since 1.19) -* Disallow make a non-writeable array writeable for arrays with a base that do not own their data (deprecated since 1.17) -* ``concatenate()`` with ``axis=None`` uses ``same-kind`` casting by default, not ``unsafe`` (deprecated since 1.20) -* Unpickling a scalar with object dtype errors (deprecated since 1.20) -* The binary mode of ``fromstring`` now errors, use ``frombuffer`` instead (deprecated since 1.14) -* Converting ``np.inexact`` or ``np.floating`` to a dtype errors (deprecated since 1.19) -* Converting ``np.complex``, ``np.integer``, ``np.signedinteger``, ``np.unsignedinteger``, ``np.generic`` to a dtype errors (deprecated since 1.19) -* The Python built-in ``round`` errors for complex scalars. Use ``np.round`` or ``scalar.round`` instead (deprecated since 1.19) -* 'np.bool' scalars can no longer be interpreted as an index (deprecated since 1.19) -* Parsing an integer via a float string is no longer supported. (deprecated since 1.23) To avoid this error you can - * make sure the original data is stored as integers. - * use the ``converters=float`` keyword argument. - * Use ``np.loadtxt(...).astype(np.int64)`` -* The use of a length 1 tuple for the ufunc ``signature`` errors. Use ``dtype`` or fill the tuple with ``None`` (deprecated since 1.19) -* Special handling of matrix is in np.outer is removed. Convert to a ndarray via ``matrix.A`` (deprecated since 1.20) diff --git a/doc/release/upcoming_changes/28343.change.rst b/doc/release/upcoming_changes/28343.change.rst deleted file mode 100644 index 378ef775b62e..000000000000 --- a/doc/release/upcoming_changes/28343.change.rst +++ /dev/null @@ -1 +0,0 @@ -* The vector norm ``ord=inf`` and the matrix norms ``ord={1, 2, inf, 'nuc'}`` now always returns zero for empty arrays. Empty arrays have at least one axis of size zero. This affects `np.linalg.norm`, `np.linalg.vector_norm`, and `np.linalg.matrix_norm`. Previously, NumPy would raises errors or return zero depending on the shape of the array. diff --git a/doc/release/upcoming_changes/28426.change.rst b/doc/release/upcoming_changes/28426.change.rst deleted file mode 100644 index d1c48640eed0..000000000000 --- a/doc/release/upcoming_changes/28426.change.rst +++ /dev/null @@ -1,6 +0,0 @@ -Changes to automatic bin selection in numpy.histogram ------------------------------------------------------ -The automatic bin selection algorithm in ``numpy.histogram`` has been modified -to avoid out-of-memory errors for samples with low variation. -For full control over the selected bins the user can use set -the ``bin`` or ``range`` parameters of ``numpy.histogram``. diff --git a/doc/release/upcoming_changes/28436.change.rst b/doc/release/upcoming_changes/28436.change.rst deleted file mode 100644 index 60149e55a4d0..000000000000 --- a/doc/release/upcoming_changes/28436.change.rst +++ /dev/null @@ -1,10 +0,0 @@ -Build manylinux_2_28 wheels ---------------------------- - -Wheels for linux systems will use the ``manylinux_2_28`` tag (instead of the ``manylinux2014`` tag), which means -dropping support for redhat7/centos7, amazonlinux2, debian9, ubuntu18.04, and -other pre-glibc2.28 operating system versions, as per the `PEP 600 support -table`_. - -.. _`PEP 600 support table`: https://github.com/mayeut/pep600_compliance?tab=readme-ov-file#pep600-compliance-check - diff --git a/doc/release/upcoming_changes/28442.improvement.rst b/doc/release/upcoming_changes/28442.improvement.rst deleted file mode 100644 index 16d71bde19c5..000000000000 --- a/doc/release/upcoming_changes/28442.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -* ``np.dot`` now reports floating point exceptions. diff --git a/doc/release/upcoming_changes/28569.change.rst b/doc/release/upcoming_changes/28569.change.rst deleted file mode 100644 index f9d26fda0484..000000000000 --- a/doc/release/upcoming_changes/28569.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* A spelling error in the error message returned when converting a string to a float with the - method ``np.format_float_positional`` has been fixed. diff --git a/doc/release/upcoming_changes/28576.new_feature.rst b/doc/release/upcoming_changes/28576.new_feature.rst deleted file mode 100644 index 2c50887a49f2..000000000000 --- a/doc/release/upcoming_changes/28576.new_feature.rst +++ /dev/null @@ -1,15 +0,0 @@ -Allow ``out=...`` in ufuncs to ensure array result --------------------------------------------------- -NumPy has the sometimes difficult behavior that it currently usually -returns scalars rather than 0-D arrays (even if the inputs were 0-D arrays). -This is especially problematic for non-numerical dtypes (e.g. ``object``). - -For ufuncs (i.e. most simple math functions) it is now possible -to use ``out=...`` (literally `...`, e.g. ``out=Ellipsis``) which is identical in behavior to ``out`` not -being passed, but will ensure a non-scalar return. -This spelling is borrowed from ``arr1d[0, ...]`` where the ``...`` -also ensures a non-scalar return. - -Other functions with an ``out=`` kwarg should gain support eventually. -Downstream libraries that interoperate via ``__array_ufunc__`` or -``__array_function__`` may need to adapt to support this. diff --git a/doc/release/upcoming_changes/28615.change.rst b/doc/release/upcoming_changes/28615.change.rst deleted file mode 100644 index 58b751e40704..000000000000 --- a/doc/release/upcoming_changes/28615.change.rst +++ /dev/null @@ -1,5 +0,0 @@ -* NumPy's ``__array_api_version__`` was upgraded from ``2023.12`` to ``2024.12``. -* `numpy.count_nonzero` for ``axis=None`` (default) now returns a NumPy scalar - instead of a Python integer. -* The parameter ``axis`` in `numpy.take_along_axis` function has now a default - value of ``-1``. diff --git a/doc/release/upcoming_changes/28669.new_feature.rst b/doc/release/upcoming_changes/28669.new_feature.rst deleted file mode 100644 index 2953a5123ccc..000000000000 --- a/doc/release/upcoming_changes/28669.new_feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -* The type parameter of ``np.dtype`` now defaults to ``typing.Any``. - This way, static type-checkers will infer ``dtype: np.dtype`` as - ``dtype: np.dtype[Any]``, without reporting an error. diff --git a/doc/release/upcoming_changes/28703.change.rst b/doc/release/upcoming_changes/28703.change.rst deleted file mode 100644 index 87bb431951f9..000000000000 --- a/doc/release/upcoming_changes/28703.change.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Printing of ``np.float16`` and ``np.float32`` scalars and arrays have been improved by - adjusting the transition to scientific notation based on the floating point precision. - A new legacy ``np.printoptions`` mode ``'2.2'`` has been added for backwards compatibility. diff --git a/doc/release/upcoming_changes/28713.change.rst b/doc/release/upcoming_changes/28713.change.rst deleted file mode 100644 index 5e5c5adde88b..000000000000 --- a/doc/release/upcoming_changes/28713.change.rst +++ /dev/null @@ -1 +0,0 @@ -Remove use of -Wl,-ld_classic on macOS. This hack is no longer needed by Spack, and results in libraries that cannot link to other libraries built with ld (new). diff --git a/doc/release/upcoming_changes/28741.change.rst b/doc/release/upcoming_changes/28741.change.rst deleted file mode 100644 index ca9531f490d8..000000000000 --- a/doc/release/upcoming_changes/28741.change.rst +++ /dev/null @@ -1 +0,0 @@ -Re-enable overriding functions in the :mod:`numpy.strings` module. diff --git a/doc/release/upcoming_changes/28769.performance.rst b/doc/release/upcoming_changes/28769.performance.rst deleted file mode 100644 index 7fb8f02282f6..000000000000 --- a/doc/release/upcoming_changes/28769.performance.rst +++ /dev/null @@ -1,8 +0,0 @@ -Performance improvements for ``np.float16`` casts --------------------------------------------------- -Earlier, floating point casts to and from ``np.float16`` types -were emulated in software on all platforms. - -Now, on ARM devices that support Neon float16 intrinsics (such as -recent Apple Silicon), the native float16 path is used to achieve -the best performance. diff --git a/doc/release/upcoming_changes/28856.improvement.rst b/doc/release/upcoming_changes/28856.improvement.rst deleted file mode 100644 index 83911035f097..000000000000 --- a/doc/release/upcoming_changes/28856.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -* ``np.dtypes.StringDType`` is now a - `generic type `_ which - accepts a type argument for ``na_object`` that defaults to ``typing.Never``. - For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, - and ``StringDType()`` returns a ``StringDType[typing.Never]``. diff --git a/doc/release/upcoming_changes/28884.deprecation.rst b/doc/release/upcoming_changes/28884.deprecation.rst deleted file mode 100644 index c1be55fb0dd3..000000000000 --- a/doc/release/upcoming_changes/28884.deprecation.rst +++ /dev/null @@ -1,28 +0,0 @@ -``numpy.typing.NBitBase`` deprecation -------------------------------------- -The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in a future version. - -This type was previously intended to be used as a generic upper bound for type-parameters, for example: - -.. code-block:: python - - import numpy as np - import numpy.typing as npt - - def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... - -But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete subtypes, causing static type-checkers to reject ``x: np.float64 = f(np.complex128(42j))``. - -So instead, the better approach is to use ``typing.overload``: - -.. code-block:: python - - import numpy as np - from typing import overload - - @overload - def f(x: np.complex64) -> np.float32: ... - @overload - def f(x: np.complex128) -> np.float64: ... - @overload - def f(x: np.clongdouble) -> np.longdouble: ... diff --git a/doc/release/upcoming_changes/29030.compatibility.rst b/doc/release/upcoming_changes/29030.compatibility.rst new file mode 100644 index 000000000000..cf08551e28ee --- /dev/null +++ b/doc/release/upcoming_changes/29030.compatibility.rst @@ -0,0 +1,6 @@ +* NumPy's C extension modules have begun to use multi-phase initialisation, + as defined by :pep:`489`. As part of this, a new explicit check has been added + that each such module is only imported once per Python process. This comes with + the side-effect that deleting ``numpy`` from ``sys.modules`` and re-importing + it will now fail with an ``ImportError``. This has always been unsafe, with + unexpected side-effects, though did not previously raise an error. diff --git a/doc/release/upcoming_changes/29060.change.rst b/doc/release/upcoming_changes/29060.change.rst new file mode 100644 index 000000000000..1561da7bf94e --- /dev/null +++ b/doc/release/upcoming_changes/29060.change.rst @@ -0,0 +1,3 @@ +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. diff --git a/doc/release/upcoming_changes/29094.compatibility.rst b/doc/release/upcoming_changes/29094.compatibility.rst new file mode 100644 index 000000000000..961ee6504dae --- /dev/null +++ b/doc/release/upcoming_changes/29094.compatibility.rst @@ -0,0 +1,7 @@ +The Macro NPY_ALIGNMENT_REQUIRED has been removed +------------------------------------------------- +The macro was defined in the `npy_cpu.h` file, so might be regarded as +semipublic. As it turns out, with modern compilers and hardware it is almost +always the case that alignment is required, so numpy no longer uses the macro. +It is unlikely anyone uses it, but you might want to compile with the `-Wundef` +flag or equivalent to be sure. diff --git a/doc/release/upcoming_changes/29179.change.rst b/doc/release/upcoming_changes/29179.change.rst new file mode 100644 index 000000000000..12eb6804d3dd --- /dev/null +++ b/doc/release/upcoming_changes/29179.change.rst @@ -0,0 +1,4 @@ +Fix bug in ``matmul`` for non-contiguous out kwarg parameter +------------------------------------------------------------ +In some cases, if ``out`` was non-contiguous, ``np.matmul`` would cause +memory corruption or a c-level assert. This was new to v2.3.0 and fixed in v2.3.1. diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 9df2f6c546c5..1555dafb5539 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -20,19 +20,7 @@ body { width: 15%; } -/* Version switcher colors from PyData Sphinx Theme */ - -.version-switcher__button[data-active-version-name*="devdocs"] { - background-color: var(--pst-color-warning); - border-color: var(--pst-color-warning); - opacity: 0.9; -} - -.version-switcher__button:not([data-active-version-name*="stable"]):not([data-active-version-name*="dev"]):not([data-active-version-name*="pull"]) { - background-color: var(--pst-color-danger); - border-color: var(--pst-color-danger); - opacity: 0.9; -} +/* Version switcher from PyData Sphinx Theme */ .version-switcher__menu a.list-group-item { font-size: small; diff --git a/doc/source/conf.py b/doc/source/conf.py index f437cbb6e83e..eba0bd014fb0 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -1,10 +1,11 @@ +import importlib import os import re import sys -import importlib +from datetime import datetime + from docutils import nodes from docutils.parsers.rst import Directive -from datetime import datetime # Minimum version, enforced by sphinx needs_sphinx = '4.3' @@ -20,7 +21,8 @@ def replace_scalar_type_names(): """ Rename numpy types to use the canonical names to make sphinx behave """ import ctypes - Py_ssize_t = ctypes.c_int64 if ctypes.sizeof(ctypes.c_void_p) == 8 else ctypes.c_int32 + sizeof_void_p = ctypes.sizeof(ctypes.c_void_p) + Py_ssize_t = ctypes.c_int64 if sizeof_void_p == 8 else ctypes.c_int32 class PyObject(ctypes.Structure): pass @@ -66,6 +68,7 @@ class PyTypeObject(ctypes.Structure): # As of NumPy 1.25, a deprecation of `str`/`bytes` attributes happens. # For some reasons, the doc build accesses these, so ignore them. import warnings + warnings.filterwarnings("ignore", "In the future.*NumPy scalar", FutureWarning) @@ -122,6 +125,7 @@ class PyTypeObject(ctypes.Structure): # other places throughout the built documents. # import numpy + # The short X.Y version (including .devXXXX, rcX, b1 suffixes if present) version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__) version = re.sub(r'(\.dev\d+).*?$', r'\1', version) @@ -146,8 +150,26 @@ class PyTypeObject(ctypes.Structure): exclude_dirs = [] exclude_patterns = [] +suppress_warnings = [] +nitpick_ignore = [] + if sys.version_info[:2] >= (3, 12): - exclude_patterns += ["reference/distutils.rst"] + exclude_patterns += [ + "reference/distutils.rst", + "reference/distutils/misc_util.rst", + ] + suppress_warnings += [ + 'toc.excluded', # Suppress warnings about excluded toctree entries + ] + nitpicky = True + nitpick_ignore += [ + ('ref', 'numpy-distutils-refguide'), + # The first ignore is not catpured without nitpicky = True. + # These three ignores are required once nitpicky = True is set. + ('py:mod', 'numpy.distutils'), + ('py:class', 'Extension'), + ('py:class', 'numpy.distutils.misc_util.Configuration'), + ] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False @@ -460,6 +482,7 @@ def setup(app): plot_formats = [('png', 100), 'pdf'] import math + phi = (math.sqrt(5) + 1) / 2 plot_rcparams = { @@ -484,7 +507,7 @@ def setup(app): # ----------------------------------------------------------------------------- import inspect -from os.path import relpath, dirname +from os.path import dirname, relpath for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']: try: @@ -539,14 +562,14 @@ def linkcode_resolve(domain, info): fn = None lineno = None - # Make a poor effort at linking C extension types - if isinstance(obj, type) and obj.__module__ == 'numpy': - fn = _get_c_source_file(obj) + if isinstance(obj, type): + # Make a poor effort at linking C extension types + if obj.__module__ == 'numpy': + fn = _get_c_source_file(obj) - # This can be removed when removing the decorator set_module. Fix issue #28629 - if hasattr(obj, '_module_file'): - fn = obj._module_file - fn = relpath(fn, start=dirname(numpy.__file__)) + # This can be removed when removing the decorator set_module. Fix issue #28629 + if hasattr(obj, '_module_source'): + obj.__module__, obj._module_source = obj._module_source, obj.__module__ if fn is None: try: @@ -573,6 +596,9 @@ def linkcode_resolve(domain, info): else: linespec = "" + if isinstance(obj, type) and hasattr(obj, '_module_source'): + obj.__module__, obj._module_source = obj._module_source, obj.__module__ + if 'dev' in numpy.__version__: return f"https://github.com/numpy/numpy/blob/main/numpy/{fn}{linespec}" else: @@ -580,10 +606,11 @@ def linkcode_resolve(domain, info): numpy.__version__, fn, linespec) -from pygments.lexers import CLexer from pygments.lexer import inherit +from pygments.lexers import CLexer from pygments.token import Comment + class NumPyLexer(CLexer): name = 'NUMPYLEXER' @@ -603,7 +630,7 @@ class NumPyLexer(CLexer): breathe_default_members = ("members", "undoc-members", "protected-members") # See https://github.com/breathe-doc/breathe/issues/696 -nitpick_ignore = [ +nitpick_ignore += [ ('c:identifier', 'FILE'), ('c:identifier', 'size_t'), ('c:identifier', 'PyHeapTypeObject'), diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index 2664ee745579..50dac45e475a 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -190,8 +190,8 @@ Stylistic guidelines -------------------- * Set up your editor to follow `PEP 8 `_ (remove trailing white space, no tabs, etc.). Check code with - pyflakes / flake8. + pep-0008/>`_ (remove trailing white space, no tabs, etc.). Check code + with ruff. * Use NumPy data types instead of strings (``np.uint8`` instead of ``"uint8"``). diff --git a/doc/source/dev/internals.code-explanations.rst b/doc/source/dev/internals.code-explanations.rst index b1ee9b114aa8..1bb8f60528c1 100644 --- a/doc/source/dev/internals.code-explanations.rst +++ b/doc/source/dev/internals.code-explanations.rst @@ -401,7 +401,7 @@ Iterators for the output arguments are then processed. Finally, the decision is made about how to execute the looping mechanism to ensure that all elements of the input arrays are combined to produce the output arrays of the correct type. The options for loop -execution are one-loop (for :term`contiguous`, aligned, and correct data +execution are one-loop (for :term:`contiguous`, aligned, and correct data type), strided-loop (for non-contiguous but still aligned and correct data type), and a buffered loop (for misaligned or incorrect data type situations). Depending on which execution method is called for, diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst index b78e8e75cb1f..ebe3f6b68918 100644 --- a/doc/source/reference/arrays.interface.rst +++ b/doc/source/reference/arrays.interface.rst @@ -13,13 +13,9 @@ The array interface protocol This page describes the NumPy-specific API for accessing the contents of a NumPy array from other C extensions. :pep:`3118` -- :c:func:`The Revised Buffer Protocol ` introduces - similar, standardized API to Python 2.6 and 3.0 for any extension - module to use. Cython__'s buffer array support - uses the :pep:`3118` API; see the `Cython NumPy - tutorial`__. Cython provides a way to write code that supports the buffer - protocol with Python versions older than 2.6 because it has a - backward-compatible implementation utilizing the array interface - described here. + similar, standardized API for any extension module to use. Cython__'s + buffer array support uses the :pep:`3118` API; see the `Cython NumPy + tutorial`__. __ https://cython.org/ __ https://github.com/cython/cython/wiki/tutorials-numpy diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 124ab572296f..f859db4620d4 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -65,10 +65,10 @@ Some of the scalar types are essentially equivalent to fundamental Python types and therefore inherit from them as well as from the generic array scalar type: -==================== =========================== ============= +==================== =========================== ========= Array scalar type Related Python type Inherits? -==================== =========================== ============= -:class:`int_` :class:`int` Python 2 only +==================== =========================== ========= +:class:`int_` :class:`int` no :class:`double` :class:`float` yes :class:`cdouble` :class:`complex` yes :class:`bytes_` :class:`bytes` yes @@ -76,7 +76,7 @@ Array scalar type Related Python type Inherits? :class:`bool_` :class:`bool` no :class:`datetime64` :class:`datetime.datetime` no :class:`timedelta64` :class:`datetime.timedelta` no -==================== =========================== ============= +==================== =========================== ========= The :class:`bool_` data type is very similar to the Python :class:`bool` but does not inherit from it because Python's @@ -86,9 +86,9 @@ Python Boolean scalar. .. warning:: - The :class:`int_` type does **not** inherit from the - :class:`int` built-in under Python 3, because type :class:`int` is no - longer a fixed-width integer type. + The :class:`int_` type does **not** inherit from the built-in + :class:`int`, because type :class:`int` is not a fixed-width + integer type. .. tip:: The default data type in NumPy is :class:`double`. diff --git a/doc/source/reference/c-api/data_memory.rst b/doc/source/reference/c-api/data_memory.rst index f041c1a6a32a..a542bcf7c713 100644 --- a/doc/source/reference/c-api/data_memory.rst +++ b/doc/source/reference/c-api/data_memory.rst @@ -134,9 +134,8 @@ A better technique would be to use a ``PyCapsule`` as a base object: Example of memory tracing with ``np.lib.tracemalloc_domain`` ------------------------------------------------------------ -Note that since Python 3.6 (or newer), the builtin ``tracemalloc`` module can be used to -track allocations inside NumPy. NumPy places its CPU memory allocations into the -``np.lib.tracemalloc_domain`` domain. +The builtin ``tracemalloc`` module can be used to track allocations inside NumPy. +NumPy places its CPU memory allocations into the ``np.lib.tracemalloc_domain`` domain. For additional information, check: https://docs.python.org/3/library/tracemalloc.html. Here is an example on how to use ``np.lib.tracemalloc_domain``: diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 4565e602193f..3f16b5f4dbc4 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -36,10 +36,10 @@ New types are defined in C by two basic steps: Instead of special method names which define behavior for Python classes, there are "function tables" which point to functions that -implement the desired results. Since Python 2.2, the PyTypeObject -itself has become dynamic which allows C types that can be "sub-typed -"from other C-types in C, and sub-classed in Python. The children -types inherit the attributes and methods from their parent(s). +implement the desired results. The PyTypeObject itself is dynamic +which allows C types that can be "sub-typed" from other C-types in C, +and sub-classed in Python. The children types inherit the attributes +and methods from their parent(s). There are two major new types: the ndarray ( :c:data:`PyArray_Type` ) and the ufunc ( :c:data:`PyUFunc_Type` ). Additional types play a @@ -1618,7 +1618,7 @@ NumPy C-API and C complex When you use the NumPy C-API, you will have access to complex real declarations ``npy_cdouble`` and ``npy_cfloat``, which are declared in terms of the C standard types from ``complex.h``. Unfortunately, ``complex.h`` contains -`#define I ...`` (where the actual definition depends on the compiler), which +``#define I ...`` (where the actual definition depends on the compiler), which means that any downstream user that does ``#include `` could get ``I`` defined, and using something like declaring ``double I;`` in their code will result in an obscure compiler error like @@ -1636,4 +1636,4 @@ to your code. .. versionchanged:: 2.0 The inclusion of ``complex.h`` was new in NumPy 2, so that code defining a different ``I`` may not have required the ``#undef I`` on older versions. - NumPy 2.0.1 briefly included the ``#under I`` \ No newline at end of file + NumPy 2.0.1 briefly included the ``#under I`` diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst index 9c44ebcbc589..3fbe25d5b03c 100644 --- a/doc/source/reference/maskedarray.generic.rst +++ b/doc/source/reference/maskedarray.generic.rst @@ -66,7 +66,7 @@ attributes and methods are described in more details in the .. try_examples:: -The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: + The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: >>> import numpy as np >>> import numpy.ma as ma @@ -521,7 +521,7 @@ Numerical operations -------------------- Numerical operations can be easily performed without worrying about missing -values, dividing by zero, square roots of negative numbers, etc.:: +values, dividing by zero, square roots of negative numbers, etc.: .. try_examples:: diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 01a5bcff7fbc..98e3dda54e7b 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -5,7 +5,7 @@ NumPy's module structure ************************ NumPy has a large number of submodules. Most regular usage of NumPy requires -only the main namespace and a smaller set of submodules. The rest either either +only the main namespace and a smaller set of submodules. The rest either have special-purpose or niche namespaces. Main namespaces diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst index 99b7ec781b55..17c6a515cdbc 100644 --- a/doc/source/reference/random/multithreading.rst +++ b/doc/source/reference/random/multithreading.rst @@ -9,8 +9,8 @@ well-behaved (writable and aligned). Under normal circumstances, arrays created using the common constructors such as :meth:`numpy.empty` will satisfy these requirements. -This example makes use of Python 3 :mod:`concurrent.futures` to fill an array -using multiple threads. Threads are long-lived so that repeated calls do not +This example makes use of:mod:`concurrent.futures` to fill an array using +multiple threads. Threads are long-lived so that repeated calls do not require any additional overheads from thread creation. The random numbers generated are reproducible in the sense that the same diff --git a/doc/source/reference/random/performance.py b/doc/source/reference/random/performance.py index b419f3f61100..87c07c3262a6 100644 --- a/doc/source/reference/random/performance.py +++ b/doc/source/reference/random/performance.py @@ -3,7 +3,7 @@ import pandas as pd import numpy as np -from numpy.random import MT19937, PCG64, PCG64DXSM, Philox, SFC64 +from numpy.random import MT19937, PCG64, PCG64DXSM, SFC64, Philox PRNGS = [MT19937, PCG64, PCG64DXSM, Philox, SFC64] @@ -63,7 +63,7 @@ print(table.to_csv(float_format='%0.1f')) -rel = table.loc[:, ['RandomState']].values @ np.ones( +rel = table.loc[:, ['RandomState']].to_numpy() @ np.ones( (1, table.shape[1])) / table rel.pop('RandomState') rel = rel.T @@ -74,9 +74,10 @@ print(rel.to_csv(float_format='%0d')) # Cross-platform table -rows = ['32-bit Unsigned Ints', '64-bit Unsigned Ints', 'Uniforms', 'Normals', 'Exponentials'] +rows = ['32-bit Unsigned Ints', '64-bit Unsigned Ints', 'Uniforms', + 'Normals', 'Exponentials'] xplat = rel.reindex(rows, axis=0) -xplat = 100 * (xplat / xplat.MT19937.values[:, None]) +xplat = 100 * (xplat / xplat.MT19937.to_numpy()[:, None]) overall = np.exp(np.log(xplat).mean(0)) xplat = xplat.T.copy() xplat['Overall'] = overall diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index 524a1532ca57..2b7039136e75 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -234,7 +234,7 @@ The need to align certain CPU features that are assured to be supported by successive generations of the same architecture, some cases: - On ppc64le ``VSX(ISA 2.06)`` and ``VSX2(ISA 2.07)`` both imply one another since the - first generation that supports little-endian mode is Power-8`(ISA 2.07)` + first generation that supports little-endian mode is ``Power-8(ISA 2.07)`` - On AArch64 ``NEON NEON_FP16 NEON_VFPV4 ASIMD`` implies each other since they are part of the hardware baseline. diff --git a/doc/source/reference/simd/gen_features.py b/doc/source/reference/simd/gen_features.py index eb516e3ff2ac..3394f67f23ef 100644 --- a/doc/source/reference/simd/gen_features.py +++ b/doc/source/reference/simd/gen_features.py @@ -5,6 +5,7 @@ from numpy.distutils.ccompiler_opt import CCompilerOpt + class FakeCCompilerOpt(CCompilerOpt): # disable caching no need for it conf_nocache = True diff --git a/doc/source/release.rst b/doc/source/release.rst index 675b91352772..6c6a853b06f5 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,7 +5,9 @@ Release notes .. toctree:: :maxdepth: 2 + 2.4.0 2.3.0 + 2.2.6 2.2.5 2.2.4 2.2.3 diff --git a/doc/source/release/1.11.0-notes.rst b/doc/source/release/1.11.0-notes.rst index 36cd1d65a266..f6fe84a4b17f 100644 --- a/doc/source/release/1.11.0-notes.rst +++ b/doc/source/release/1.11.0-notes.rst @@ -205,7 +205,7 @@ New Features - ``np.int16``, ``np.uint16``, - ``np.int32``, ``np.uint32``, - ``np.int64``, ``np.uint64``, - - ``np.int_ ``, ``np.intp`` + - ``np.int_``, ``np.intp`` The specification is by precision rather than by C type. Hence, on some platforms ``np.int64`` may be a ``long`` instead of ``long long`` even if diff --git a/doc/source/release/1.13.0-notes.rst b/doc/source/release/1.13.0-notes.rst index 3bfaf1ea5169..400c9553fbd3 100644 --- a/doc/source/release/1.13.0-notes.rst +++ b/doc/source/release/1.13.0-notes.rst @@ -136,7 +136,7 @@ implement ``__*slice__`` on the derived class, as ``__*item__`` will intercept these calls correctly. Any code that did implement these will work exactly as before. Code that -invokes``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will +invokes ``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will now issue a DeprecationWarning - ``.__getitem__(slice(start, end))`` should be used instead. diff --git a/doc/source/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst index 68040b470caa..055a933291b9 100644 --- a/doc/source/release/1.14.0-notes.rst +++ b/doc/source/release/1.14.0-notes.rst @@ -409,8 +409,8 @@ This new default changes the float output relative to numpy 1.13. The old behavior can be obtained in 1.13 "legacy" printing mode, see compatibility notes above. -``hermitian`` option added to``np.linalg.matrix_rank`` ------------------------------------------------------- +``hermitian`` option added to ``np.linalg.matrix_rank`` +------------------------------------------------------- The new ``hermitian`` option allows choosing between standard SVD based matrix rank calculation and the more efficient eigenvalue based method for symmetric/hermitian matrices. diff --git a/doc/source/release/1.15.0-notes.rst b/doc/source/release/1.15.0-notes.rst index e84386f0fa5d..7aa85d167d29 100644 --- a/doc/source/release/1.15.0-notes.rst +++ b/doc/source/release/1.15.0-notes.rst @@ -213,7 +213,7 @@ C API changes New functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` ----------------------------------------------------------------------------------- Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier`` -have been added and should be used in place of the ``npy_get_floatstatus``and +have been added and should be used in place of the ``npy_get_floatstatus`` and ``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang were rearranging the order of operations when the previous functions were used in the ufunc SIMD functions, resulting in the floatstatus flags being checked @@ -326,8 +326,8 @@ passed explicitly, and are not yet computed automatically. No longer does an IQR of 0 result in ``n_bins=1``, rather the number of bins chosen is related to the data size in this situation. -The edges returned by `histogram`` and ``histogramdd`` now match the data float type ------------------------------------------------------------------------------------- +The edges returned by ``histogram`` and ``histogramdd`` now match the data float type +------------------------------------------------------------------------------------- When passed ``np.float16``, ``np.float32``, or ``np.longdouble`` data, the returned edges are now of the same dtype. Previously, ``histogram`` would only return the same type if explicit bins were given, and ``histogram`` would diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst index 15e0ad77f5d1..a90dbb7a67d9 100644 --- a/doc/source/release/1.18.0-notes.rst +++ b/doc/source/release/1.18.0-notes.rst @@ -202,9 +202,9 @@ exception will require adaptation, and code that mistakenly called Moved modules in ``numpy.random`` --------------------------------- As part of the API cleanup, the submodules in ``numpy.random`` -``bit_generator``, ``philox``, ``pcg64``, ``sfc64, ``common``, ``generator``, +``bit_generator``, ``philox``, ``pcg64``, ``sfc64``, ``common``, ``generator``, and ``bounded_integers`` were moved to ``_bit_generator``, ``_philox``, -``_pcg64``, ``_sfc64, ``_common``, ``_generator``, and ``_bounded_integers`` +``_pcg64``, ``_sfc64``, ``_common``, ``_generator``, and ``_bounded_integers`` respectively to indicate that they are not part of the public interface. (`gh-14608 `__) diff --git a/doc/source/release/1.21.5-notes.rst b/doc/source/release/1.21.5-notes.rst index c69d26771268..b3e810b51c06 100644 --- a/doc/source/release/1.21.5-notes.rst +++ b/doc/source/release/1.21.5-notes.rst @@ -33,7 +33,7 @@ A total of 11 pull requests were merged for this release. * `#20462 `__: BUG: Fix float16 einsum fastpaths using wrong tempvar * `#20463 `__: BUG, DIST: Print os error message when the executable not exist * `#20464 `__: BLD: Verify the ability to compile C++ sources before initiating... -* `#20465 `__: BUG: Force ``npymath` ` to respect ``npy_longdouble`` +* `#20465 `__: BUG: Force ``npymath`` to respect ``npy_longdouble`` * `#20466 `__: BUG: Fix failure to create aligned, empty structured dtype * `#20467 `__: ENH: provide a convenience function to replace npy_load_module * `#20495 `__: MAINT: update wheel to version that supports python3.10 diff --git a/doc/source/release/2.2.6-notes.rst b/doc/source/release/2.2.6-notes.rst new file mode 100644 index 000000000000..974f59d640db --- /dev/null +++ b/doc/source/release/2.2.6-notes.rst @@ -0,0 +1,43 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.6 Release Notes +========================== + +NumPy 2.2.6 is a patch release that fixes bugs found after the 2.2.5 release. +It is a mix of typing fixes/improvements as well as the normal bug +fixes and some CI maintenance. + +This release supports Python versions 3.10-3.13. + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Ilhan Polat +* Joren Hammudoglu +* Marco Gorelli + +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Sayed Adel + +Pull requests merged +==================== + +A total of 11 pull requests were merged for this release. + +* `#28778 `__: MAINT: Prepare 2.2.x for further development +* `#28851 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28852 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28853 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28864 `__: BUG: fix stringdtype singleton thread safety +* `#28865 `__: MAINT: use OpenBLAS 0.3.29 +* `#28889 `__: MAINT: from_dlpack thread safety fixes +* `#28913 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28915 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28916 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28966 `__: TYP: reject complex scalar types in ndarray.__ifloordiv__ diff --git a/doc/source/release/2.3.0-notes.rst b/doc/source/release/2.3.0-notes.rst index 74f11a0b4537..4c3c923b3b5e 100644 --- a/doc/source/release/2.3.0-notes.rst +++ b/doc/source/release/2.3.0-notes.rst @@ -4,16 +4,529 @@ NumPy 2.3.0 Release Notes ========================== +The NumPy 2.3.0 release continues the work to improve free threaded Python +support and annotations together with the usual set of bug fixes. It is unusual +in the number of expired deprecations, code modernizations, and style cleanups. +The latter may not be visible to users, but is important for code maintenance +over the long term. Note that we have also upgraded from manylinux2014 to +manylinux_2_28. + +Users running on a Mac having an M4 cpu might see various warnings about +invalid values and such. The warnings are a known problem with Accelerate. +They are annoying, but otherwise harmless. Apple promises to fix them. + +This release supports Python versions 3.11-3.13, Python 3.14 will be supported +when it is released. + Highlights ========== -*We'll choose highlights for this release near the end of the release cycle.* +* Interactive examples in the NumPy documentation. +* Building NumPy with OpenMP Parallelization. +* Preliminary support for Windows on ARM. +* Improved support for free threaded Python. +* Improved annotations. + + +New functions +============= + +New function ``numpy.strings.slice`` +------------------------------------ +The new function ``numpy.strings.slice`` was added, which implements fast +native slicing of string arrays. It supports the full slicing API including +negative slice offsets and steps. + +(`gh-27789 `__) + + +Deprecations +============ + +* The ``numpy.typing.mypy_plugin`` has been deprecated in favor of platform-agnostic + static type inference. Please remove ``numpy.typing.mypy_plugin`` from the ``plugins`` + section of your mypy configuration. If this change results in new errors being + reported, kindly open an issue. + + (`gh-28129 `__) + +* The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in + a future version. + + This type was previously intended to be used as a generic upper bound for + type-parameters, for example: + + .. code-block:: python + + import numpy as np + import numpy.typing as npt + + def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... + + But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete + subtypes, causing static type-checkers to reject ``x: np.float64 = + f(np.complex128(42j))``. + + So instead, the better approach is to use ``typing.overload``: + + .. code-block:: python + + import numpy as np + from typing import overload + + @overload + def f(x: np.complex64) -> np.float32: ... + @overload + def f(x: np.complex128) -> np.float64: ... + @overload + def f(x: np.clongdouble) -> np.longdouble: ... + + (`gh-28884 `__) + + +Expired deprecations +==================== + +* Remove deprecated macros like ``NPY_OWNDATA`` from Cython interfaces in favor + of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) + + (`gh-28254 `__) + +* Remove ``numpy/npy_1_7_deprecated_api.h`` and C macros like ``NPY_OWNDATA`` + in favor of ``NPY_ARRAY_OWNDATA`` (deprecated since 1.7) + + (`gh-28254 `__) + +* Remove alias ``generate_divbyzero_error`` to + ``npy_set_floatstatus_divbyzero`` and ``generate_overflow_error`` to + ``npy_set_floatstatus_overflow`` (deprecated since 1.10) + + (`gh-28254 `__) + +* Remove ``np.tostring`` (deprecated since 1.19) + + (`gh-28254 `__) + +* Raise on ``np.conjugate`` of non-numeric types (deprecated since 1.13) + + (`gh-28254 `__) + +* Raise when using ``np.bincount(...minlength=None)``, use 0 instead + (deprecated since 1.14) + + (`gh-28254 `__) + +* Passing ``shape=None`` to functions with a non-optional shape argument + errors, use ``()`` instead (deprecated since 1.20) + + (`gh-28254 `__) + +* Inexact matches for ``mode`` and ``searchside`` raise (deprecated since 1.20) + + (`gh-28254 `__) + +* Setting ``__array_finalize__ = None`` errors (deprecated since 1.23) + + (`gh-28254 `__) + +* ``np.fromfile`` and ``np.fromstring`` error on bad data, previously they + would guess (deprecated since 1.18) + + (`gh-28254 `__) + +* ``datetime64`` and ``timedelta64`` construction with a tuple no longer + accepts an ``event`` value, either use a two-tuple of (unit, num) or a + 4-tuple of (unit, num, den, 1) (deprecated since 1.14) + + (`gh-28254 `__) + +* When constructing a ``dtype`` from a class with a ``dtype`` attribute, that + attribute must be a dtype-instance rather than a thing that can be parsed as + a dtype instance (deprecated in 1.19). At some point the whole construct of + using a dtype attribute will be deprecated (see #25306) + + (`gh-28254 `__) + +* Passing booleans as partition index errors (deprecated since 1.23) + + (`gh-28254 `__) + +* Out-of-bounds indexes error even on empty arrays (deprecated since 1.20) + + (`gh-28254 `__) + +* ``np.tostring`` has been removed, use ``tobytes`` instead (deprecated since 1.19) + + (`gh-28254 `__) + +* Disallow make a non-writeable array writeable for arrays with a base that do + not own their data (deprecated since 1.17) + + (`gh-28254 `__) + +* ``concatenate()`` with ``axis=None`` uses ``same-kind`` casting by default, + not ``unsafe`` (deprecated since 1.20) + + (`gh-28254 `__) + +* Unpickling a scalar with object dtype errors (deprecated since 1.20) + + (`gh-28254 `__) + +* The binary mode of ``fromstring`` now errors, use ``frombuffer`` instead + (deprecated since 1.14) + + (`gh-28254 `__) + +* Converting ``np.inexact`` or ``np.floating`` to a dtype errors (deprecated + since 1.19) + + (`gh-28254 `__) + +* Converting ``np.complex``, ``np.integer``, ``np.signedinteger``, + ``np.unsignedinteger``, ``np.generic`` to a dtype errors (deprecated since + 1.19) + + (`gh-28254 `__) + +* The Python built-in ``round`` errors for complex scalars. Use ``np.round`` or + ``scalar.round`` instead (deprecated since 1.19) + + (`gh-28254 `__) + +* 'np.bool' scalars can no longer be interpreted as an index (deprecated since 1.19) + + (`gh-28254 `__) + +* Parsing an integer via a float string is no longer supported. (deprecated + since 1.23) To avoid this error you can + * make sure the original data is stored as integers. + * use the ``converters=float`` keyword argument. + * Use ``np.loadtxt(...).astype(np.int64)`` + + (`gh-28254 `__) + +* The use of a length 1 tuple for the ufunc ``signature`` errors. Use ``dtype`` + or fill the tuple with ``None`` (deprecated since 1.19) + + (`gh-28254 `__) + +* Special handling of matrix is in np.outer is removed. Convert to a ndarray + via ``matrix.A`` (deprecated since 1.20) + + (`gh-28254 `__) + +* Removed the ``np.compat`` package source code (removed in 2.0) + + (`gh-28961 `__) + + +C API changes +============= + +* ``NpyIter_GetTransferFlags`` is now available to check if + the iterator needs the Python API or if casts may cause floating point + errors (FPE). FPEs can for example be set when casting ``float64(1e300)`` + to ``float32`` (overflow to infinity) or a NaN to an integer (invalid value). + + (`gh-27883 `__) + +* ``NpyIter`` now has no limit on the number of operands it supports. + + (`gh-28080 `__) + +New ``NpyIter_GetTransferFlags`` and ``NpyIter_IterationNeedsAPI`` change +------------------------------------------------------------------------- +NumPy now has the new ``NpyIter_GetTransferFlags`` function as a more precise +way checking of iterator/buffering needs. I.e. whether the Python API/GIL is +required or floating point errors may occur. +This function is also faster if you already know your needs without buffering. + +The ``NpyIter_IterationNeedsAPI`` function now performs all the checks that were +previously performed at setup time. While it was never necessary to call it +multiple times, doing so will now have a larger cost. + +(`gh-27998 `__) + + +New Features +============ + +* The type parameter of ``np.dtype`` now defaults to ``typing.Any``. + This way, static type-checkers will infer ``dtype: np.dtype`` as + ``dtype: np.dtype[Any]``, without reporting an error. + + (`gh-28669 `__) + +* Static type-checkers now interpret: + + - ``_: np.ndarray`` as ``_: npt.NDArray[typing.Any]``. + - ``_: np.flatiter`` as ``_: np.flatiter[np.ndarray]``. + + This is because their type parameters now have default values. + + (`gh-28940 `__) + +NumPy now registers its pkg-config paths with the pkgconf_ PyPI package +----------------------------------------------------------------------- +The pkgconf_ PyPI package provides an interface for projects like NumPy to +register their own paths to be added to the pkg-config search path. This means +that when using pkgconf_ from PyPI, NumPy will be discoverable without needing +for any custom environment configuration. + +.. attention:: Attention + + This only applies when using the pkgconf_ package from PyPI_, or put another + way, this only applies when installing pkgconf_ via a Python package + manager. + + If you are using ``pkg-config`` or ``pkgconf`` provided by your system, or + any other source that does not use the pkgconf-pypi_ project, the NumPy + pkg-config directory will not be automatically added to the search path. In + these situations, you might want to use ``numpy-config``. + + +.. _pkgconf: https://github.com/pypackaging-native/pkgconf-pypi +.. _PyPI: https://pypi.org/ +.. _pkgconf-pypi: https://github.com/pypackaging-native/pkgconf-pypi + +(`gh-28214 `__) + +Allow ``out=...`` in ufuncs to ensure array result +-------------------------------------------------- +NumPy has the sometimes difficult behavior that it currently usually +returns scalars rather than 0-D arrays (even if the inputs were 0-D arrays). +This is especially problematic for non-numerical dtypes (e.g. ``object``). + +For ufuncs (i.e. most simple math functions) it is now possible to use +``out=...`` (literally \`...\`, e.g. ``out=Ellipsis``) which is identical in +behavior to ``out`` not being passed, but will ensure a non-scalar return. +This spelling is borrowed from ``arr1d[0, ...]`` where the ``...`` also ensures +a non-scalar return. + +Other functions with an ``out=`` kwarg should gain support eventually. +Downstream libraries that interoperate via ``__array_ufunc__`` or +``__array_function__`` may need to adapt to support this. + +(`gh-28576 `__) + +Building NumPy with OpenMP Parallelization +------------------------------------------ +NumPy now supports OpenMP parallel processing capabilities when built with the +``-Denable_openmp=true`` Meson build flag. This feature is disabled by default. +When enabled, ``np.sort`` and ``np.argsort`` functions can utilize OpenMP for +parallel thread execution, improving performance for these operations. + +(`gh-28619 `__) + +Interactive examples in the NumPy documentation +----------------------------------------------- +The NumPy documentation includes a number of examples that +can now be run interactively in your browser using WebAssembly +and Pyodide. + +Please note that the examples are currently experimental in +nature and may not work as expected for all methods in the +public API. + +(`gh-26745 `__) + + +Improvements +============ + +* Scalar comparisons between non-comparable dtypes such as + ``np.array(1) == np.array('s')`` now return a NumPy bool instead of + a Python bool. + + (`gh-27288 `__) + +* ``np.nditer`` now has no limit on the number of supported operands + (C-integer). + + (`gh-28080 `__) + +* No-copy pickling is now supported for any + array that can be transposed to a C-contiguous array. + + (`gh-28105 `__) + +* The ``__repr__`` for user-defined dtypes now prefers the ``__name__`` of the + custom dtype over a more generic name constructed from its ``kind`` and + ``itemsize``. + + (`gh-28250 `__) + +* ``np.dot`` now reports floating point exceptions. + + (`gh-28442 `__) + +* ``np.dtypes.StringDType`` is now a + `generic type `_ which + accepts a type argument for ``na_object`` that defaults to ``typing.Never``. + For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, + and ``StringDType()`` returns a ``StringDType[typing.Never]``. + + (`gh-28856 `__) + +Added warnings to ``np.isclose`` +-------------------------------- +Added warning messages if at least one of atol or rtol are either ``np.nan`` or +``np.inf`` within ``np.isclose``. + +* Warnings follow the user's ``np.seterr`` settings + +(`gh-28205 `__) + + +Performance improvements and changes +==================================== + +Performance improvements to ``np.unique`` +----------------------------------------- +``np.unique`` now tries to use a hash table to find unique values instead of +sorting values before finding unique values. This is limited to certain dtypes +for now, and the function is now faster for those dtypes. The function now also +exposes a ``sorted`` parameter to allow returning unique values as they were +found, instead of sorting them afterwards. + +(`gh-26018 `__) + +Performance improvements to ``np.sort`` and ``np.argsort`` +---------------------------------------------------------- +``np.sort`` and ``np.argsort`` functions now can leverage OpenMP for parallel +thread execution, resulting in up to 3.5x speedups on x86 architectures with +AVX2 or AVX-512 instructions. This opt-in feature requires NumPy to be built +with the -Denable_openmp Meson flag. Users can control the number of threads +used by setting the OMP_NUM_THREADS environment variable. + +(`gh-28619 `__) + +Performance improvements for ``np.float16`` casts +------------------------------------------------- +Earlier, floating point casts to and from ``np.float16`` types +were emulated in software on all platforms. + +Now, on ARM devices that support Neon float16 intrinsics (such as +recent Apple Silicon), the native float16 path is used to achieve +the best performance. + +(`gh-28769 `__) + +Performance improvements for ``np.matmul`` +------------------------------------------ +Enable using BLAS for ``matmul`` even when operands are non-contiguous by copying +if needed. + +(`gh-23752 `__) + +Changes +======= + +* The vector norm ``ord=inf`` and the matrix norms ``ord={1, 2, inf, 'nuc'}`` + now always returns zero for empty arrays. Empty arrays have at least one axis + of size zero. This affects ``np.linalg.norm``, ``np.linalg.vector_norm``, and + ``np.linalg.matrix_norm``. Previously, NumPy would raises errors or return + zero depending on the shape of the array. + + (`gh-28343 `__) + +* A spelling error in the error message returned when converting a string to a + float with the method ``np.format_float_positional`` has been fixed. + + (`gh-28569 `__) + +* NumPy's ``__array_api_version__`` was upgraded from ``2023.12`` to ``2024.12``. + +* ``numpy.count_nonzero`` for ``axis=None`` (default) now returns a NumPy scalar + instead of a Python integer. + +* The parameter ``axis`` in ``numpy.take_along_axis`` function has now a default + value of ``-1``. + + (`gh-28615 `__) + +* Printing of ``np.float16`` and ``np.float32`` scalars and arrays have been improved by + adjusting the transition to scientific notation based on the floating point precision. + A new legacy ``np.printoptions`` mode ``'2.2'`` has been added for backwards compatibility. + + (`gh-28703 `__) + +* Multiplication between a string and integer now raises OverflowError instead + of MemoryError if the result of the multiplication would create a string that + is too large to be represented. This follows Python's behavior. + + (`gh-29060 `__) + +``unique_values`` may return unsorted data +------------------------------------------ +The relatively new function (added in NumPy 2.0) ``unique_values`` may now +return unsorted results. Just as ``unique_counts`` and ``unique_all`` these +never guaranteed a sorted result, however, the result was sorted until now. In +cases where these do return a sorted result, this may change in future releases +to improve performance. + +(`gh-26018 `__) + +Changes to the main iterator and potential numerical changes +------------------------------------------------------------ +The main iterator, used in math functions and via ``np.nditer`` from Python and +``NpyIter`` in C, now behaves differently for some buffered iterations. This +means that: + +* The buffer size used will often be smaller than the maximum buffer sized + allowed by the ``buffersize`` parameter. + +* The "growinner" flag is now honored with buffered reductions when no operand + requires buffering. + +For ``np.sum()`` such changes in buffersize may slightly change numerical +results of floating point operations. Users who use "growinner" for custom +reductions could notice changes in precision (for example, in NumPy we removed +it from ``einsum`` to avoid most precision changes and improve precision for +some 64bit floating point inputs). + +(`gh-27883 `__) + +The minimum supported GCC version is now 9.3.0 +---------------------------------------------- +The minimum supported version was updated from 8.4.0 to 9.3.0, primarily in +order to reduce the chance of platform-specific bugs in old GCC versions from +causing issues. + +(`gh-28102 `__) + +Changes to automatic bin selection in numpy.histogram +----------------------------------------------------- +The automatic bin selection algorithm in ``numpy.histogram`` has been modified +to avoid out-of-memory errors for samples with low variation. For full control +over the selected bins the user can use set the ``bin`` or ``range`` parameters +of ``numpy.histogram``. + +(`gh-28426 `__) + +Build manylinux_2_28 wheels +--------------------------- +Wheels for linux systems will use the ``manylinux_2_28`` tag (instead of the +``manylinux2014`` tag), which means dropping support for redhat7/centos7, +amazonlinux2, debian9, ubuntu18.04, and other pre-glibc2.28 operating system +versions, as per the `PEP 600 support table`_. + +.. _`PEP 600 support table`: https://github.com/mayeut/pep600_compliance?tab=readme-ov-file#pep600-compliance-check + +(`gh-28436 `__) +Remove use of -Wl,-ld_classic on macOS +-------------------------------------- +Remove use of -Wl,-ld_classic on macOS. This hack is no longer needed by Spack, +and results in libraries that cannot link to other libraries built with ld +(new). -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +(`gh-28713 `__) -.. **Content from release note snippets in doc/release/upcoming_changes:** +Re-enable overriding functions in the ``numpy.strings`` +------------------------------------------------------- +Re-enable overriding functions in the ``numpy.strings`` module. -.. include:: notes-towncrier.rst +(`gh-28741 `__) diff --git a/doc/source/release/2.4.0-notes.rst b/doc/source/release/2.4.0-notes.rst new file mode 100644 index 000000000000..29a7e5ce6073 --- /dev/null +++ b/doc/source/release/2.4.0-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.4.0 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 6b1aca65ed00..76e8af63462f 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -157,7 +157,7 @@ the module. return m; } -To use the ``setup.py file``, place ``setup.py`` and ``spammodule.c`` +To use the ``setup.py`` file, place ``setup.py`` and ``spammodule.c`` in the same folder. Then ``python setup.py build`` will build the module to import, or ``python setup.py install`` will install the module to your site-packages directory. @@ -240,8 +240,8 @@ and then the ``setup.py`` file used to create the module containing the ufunc. The place in the code corresponding to the actual computations for -the ufunc are marked with ``/\* BEGIN main ufunc computation \*/`` and -``/\* END main ufunc computation \*/``. The code in between those lines is +the ufunc are marked with ``/* BEGIN main ufunc computation */`` and +``/* END main ufunc computation */``. The code in between those lines is the primary thing that must be changed to create your own ufunc. .. code-block:: c @@ -339,7 +339,7 @@ the primary thing that must be changed to create your own ufunc. return m; } -This is a ``setup.py file`` for the above code. As before, the module +This is a ``setup.py`` file for the above code. As before, the module can be build via calling ``python setup.py build`` at the command prompt, or installed to site-packages via ``python setup.py install``. The module can also be placed into a local folder e.g. ``npufunc_directory`` below @@ -408,8 +408,8 @@ sections we first give the ``.c`` file and then the corresponding ``setup.py`` file. The places in the code corresponding to the actual computations for -the ufunc are marked with ``/\* BEGIN main ufunc computation \*/`` and -``/\* END main ufunc computation \*/``. The code in between those lines +the ufunc are marked with ``/* BEGIN main ufunc computation */`` and +``/* END main ufunc computation */``. The code in between those lines is the primary thing that must be changed to create your own ufunc. diff --git a/doc/source/user/conftest.py b/doc/source/user/conftest.py index 54f9d6d3158c..c9fefb92932a 100644 --- a/doc/source/user/conftest.py +++ b/doc/source/user/conftest.py @@ -1,4 +1,4 @@ # doctesting configuration from the main conftest -from numpy.conftest import dt_config # noqa: F401 +from numpy.conftest import dt_config # noqa: F401 #breakpoint() diff --git a/doc/source/user/how-to-io.rst b/doc/source/user/how-to-io.rst index a90fbecfdec4..81055d42b9ac 100644 --- a/doc/source/user/how-to-io.rst +++ b/doc/source/user/how-to-io.rst @@ -206,7 +206,7 @@ Human-readable :func:`numpy.save` and :func:`numpy.savez` create binary files. To **write a human-readable file**, use :func:`numpy.savetxt`. The array can only be 1- or -2-dimensional, and there's no ` savetxtz` for multiple files. +2-dimensional, and there's no ``savetxtz`` for multiple files. Large arrays ------------ diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 8c7914ea8dec..9e8093b20f02 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -676,8 +676,7 @@ are only a handful of key differences between the two. - For ``array``, **``*`` means element-wise multiplication**, while **``@`` means matrix multiplication**; they have associated functions - ``multiply()`` and ``dot()``. (Before Python 3.5, ``@`` did not exist - and one had to use ``dot()`` for matrix multiplication). + ``multiply()`` and ``dot()``. - For ``matrix``, **``*`` means matrix multiplication**, and for element-wise multiplication one has to use the ``multiply()`` function. diff --git a/doc/source/user/plots/matplotlib1.py b/doc/source/user/plots/matplotlib1.py index 1c3009a93e66..8c1b516752e1 100644 --- a/doc/source/user/plots/matplotlib1.py +++ b/doc/source/user/plots/matplotlib1.py @@ -1,4 +1,5 @@ import matplotlib.pyplot as plt + import numpy as np a = np.array([2, 1, 5, 7, 4, 6, 8, 14, 10, 9, 18, 20, 22]) diff --git a/doc/source/user/plots/matplotlib2.py b/doc/source/user/plots/matplotlib2.py index 104e89fb3b3b..85690b24d54a 100644 --- a/doc/source/user/plots/matplotlib2.py +++ b/doc/source/user/plots/matplotlib2.py @@ -1,4 +1,5 @@ import matplotlib.pyplot as plt + import numpy as np x = np.linspace(0, 5, 20) diff --git a/doc/source/user/plots/matplotlib3.py b/doc/source/user/plots/matplotlib3.py index 135afe823c08..212088b78464 100644 --- a/doc/source/user/plots/matplotlib3.py +++ b/doc/source/user/plots/matplotlib3.py @@ -1,6 +1,7 @@ -import numpy as np import matplotlib.pyplot as plt +import numpy as np + fig = plt.figure() ax = fig.add_subplot(projection='3d') X = np.arange(-5, 5, 0.15) diff --git a/doc/source/user/plots/meshgrid_plot.py b/doc/source/user/plots/meshgrid_plot.py index 91032145af68..d91a9aa42e21 100644 --- a/doc/source/user/plots/meshgrid_plot.py +++ b/doc/source/user/plots/meshgrid_plot.py @@ -1,6 +1,7 @@ -import numpy as np import matplotlib.pyplot as plt +import numpy as np + x = np.array([0, 1, 2, 3]) y = np.array([0, 1, 2, 3, 4, 5]) xx, yy = np.meshgrid(x, y) diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index 6be8831d9c2a..da456dd17e36 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -83,28 +83,6 @@ on how to properly configure Eclipse/PyDev to use Anaconda Python with specific conda environments. -Raspberry Pi ------------- - -There are sometimes issues reported on Raspberry Pi setups when installing -using ``pip3 install`` (or ``pip`` install). These will typically mention:: - - libf77blas.so.3: cannot open shared object file: No such file or directory - - -The solution will be to either:: - - sudo apt-get install libatlas-base-dev - -to install the missing libraries expected by the self-compiled NumPy -(ATLAS is a possible provider of linear algebra). - -*Alternatively* use the NumPy provided by Raspbian. In which case run:: - - pip3 uninstall numpy # remove previously installed version - apt install python3-numpy - - Debug build on Windows ---------------------- diff --git a/environment.yml b/environment.yml index 7223a187915a..770a83218133 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.5.0 - - mypy=1.15.0 + - mypy=1.16.0 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 @@ -45,8 +45,8 @@ dependencies: # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting - - ruff=0.8.3 + - ruff=0.11.13 - gitpython # Used in some tests - cffi - - pytz + - tzdata diff --git a/meson.options b/meson.options index 1be05d324756..b09992fe9b91 100644 --- a/meson.options +++ b/meson.options @@ -22,6 +22,8 @@ option('disable-intel-sort', type: 'boolean', value: false, description: 'Disables SIMD-optimized operations related to Intel x86-simd-sort') option('disable-threading', type: 'boolean', value: false, description: 'Disable threading support (see `NPY_ALLOW_THREADS` docs)') +option('enable-openmp', type: 'boolean', value: false, + description: 'Enable building NumPy with openmp support') option('disable-optimization', type: 'boolean', value: false, description: 'Disable CPU optimized code (dispatch,simd,unroll...)') option('cpu-baseline', type: 'string', value: 'min', diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 8c7a0fb59a57..1276e922ff2a 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -212,6 +212,8 @@ if compiler_id == 'msvc' endif endforeach FMA3.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) + # Add floating-point contract flag to fixes transcendental function accuracy on Windows Server 2022 + FMA3.update(args: {'val': '/fp:contract'}) AVX2.update(args: {'val': '/arch:AVX2', 'match': clear_arch}) AVX512_SKX.update(args: {'val': '/arch:AVX512', 'match': clear_arch}) endif diff --git a/numpy/__init__.py b/numpy/__init__.py index 508fc0d8970b..aadc1fab3407 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -89,18 +89,16 @@ import sys import warnings -from ._globals import _NoValue, _CopyMode -from ._expired_attrs_2_0 import __expired_attributes__ - - # If a version with git hash was stored, use that instead from . import version +from ._expired_attrs_2_0 import __expired_attributes__ +from ._globals import _CopyMode, _NoValue from .version import __version__ # We first need to detect if we're being called as part of the numpy setup # procedure itself in a reliable manner. try: - __NUMPY_SETUP__ + __NUMPY_SETUP__ # noqa: B018 except NameError: __NUMPY_SETUP__ = False @@ -113,60 +111,338 @@ try: from numpy.__config__ import show_config except ImportError as e: - msg = """Error importing numpy: you should not try to import numpy from - its source directory; please exit the numpy source tree, and relaunch - your python interpreter from there.""" - raise ImportError(msg) from e + if isinstance(e, ModuleNotFoundError) and e.name == "numpy.__config__": + # The __config__ module itself was not found, so add this info: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e + raise from . import _core from ._core import ( - False_, ScalarType, True_, - abs, absolute, acos, acosh, add, all, allclose, - amax, amin, any, arange, arccos, arccosh, arcsin, arcsinh, - arctan, arctan2, arctanh, argmax, argmin, argpartition, argsort, - argwhere, around, array, array2string, array_equal, array_equiv, - array_repr, array_str, asanyarray, asarray, ascontiguousarray, - asfortranarray, asin, asinh, atan, atanh, atan2, astype, atleast_1d, - atleast_2d, atleast_3d, base_repr, binary_repr, bitwise_and, - bitwise_count, bitwise_invert, bitwise_left_shift, bitwise_not, - bitwise_or, bitwise_right_shift, bitwise_xor, block, bool, bool_, - broadcast, busday_count, busday_offset, busdaycalendar, byte, bytes_, - can_cast, cbrt, cdouble, ceil, character, choose, clip, clongdouble, - complex128, complex64, complexfloating, compress, concat, concatenate, - conj, conjugate, convolve, copysign, copyto, correlate, cos, cosh, - count_nonzero, cross, csingle, cumprod, cumsum, cumulative_prod, - cumulative_sum, datetime64, datetime_as_string, datetime_data, - deg2rad, degrees, diagonal, divide, divmod, dot, double, dtype, e, - einsum, einsum_path, empty, empty_like, equal, errstate, euler_gamma, - exp, exp2, expm1, fabs, finfo, flatiter, flatnonzero, flexible, - float16, float32, float64, float_power, floating, floor, floor_divide, - fmax, fmin, fmod, format_float_positional, format_float_scientific, - frexp, from_dlpack, frombuffer, fromfile, fromfunction, fromiter, - frompyfunc, fromstring, full, full_like, gcd, generic, geomspace, - get_printoptions, getbufsize, geterr, geterrcall, greater, - greater_equal, half, heaviside, hstack, hypot, identity, iinfo, - indices, inexact, inf, inner, int16, int32, int64, int8, int_, intc, - integer, intp, invert, is_busday, isclose, isdtype, isfinite, - isfortran, isinf, isnan, isnat, isscalar, issubdtype, lcm, ldexp, - left_shift, less, less_equal, lexsort, linspace, little_endian, log, - log10, log1p, log2, logaddexp, logaddexp2, logical_and, logical_not, - logical_or, logical_xor, logspace, long, longdouble, longlong, matmul, - matvec, matrix_transpose, max, maximum, may_share_memory, mean, memmap, - min, min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, - ndarray, ndim, nditer, negative, nested_iters, newaxis, nextafter, - nonzero, not_equal, number, object_, ones, ones_like, outer, partition, - permute_dims, pi, positive, pow, power, printoptions, prod, - promote_types, ptp, put, putmask, rad2deg, radians, ravel, recarray, - reciprocal, record, remainder, repeat, require, reshape, resize, - result_type, right_shift, rint, roll, rollaxis, round, sctypeDict, - searchsorted, set_printoptions, setbufsize, seterr, seterrcall, shape, - shares_memory, short, sign, signbit, signedinteger, sin, single, sinh, - size, sort, spacing, sqrt, square, squeeze, stack, std, - str_, subtract, sum, swapaxes, take, tan, tanh, tensordot, - timedelta64, trace, transpose, true_divide, trunc, typecodes, ubyte, - ufunc, uint, uint16, uint32, uint64, uint8, uintc, uintp, ulong, - ulonglong, unsignedinteger, unstack, ushort, var, vdot, vecdot, - vecmat, void, vstack, where, zeros, zeros_like + False_, + ScalarType, + True_, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + any, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_str, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + asin, + asinh, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + base_repr, + binary_repr, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + block, + bool, + bool_, + broadcast, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + can_cast, + cbrt, + cdouble, + ceil, + character, + choose, + clip, + clongdouble, + complex64, + complex128, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copysign, + copyto, + correlate, + cos, + cosh, + count_nonzero, + cross, + csingle, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + diagonal, + divide, + divmod, + dot, + double, + dtype, + e, + einsum, + einsum_path, + empty, + empty_like, + equal, + errstate, + euler_gamma, + exp, + exp2, + expm1, + fabs, + finfo, + flatiter, + flatnonzero, + flexible, + float16, + float32, + float64, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromstring, + full, + full_like, + gcd, + generic, + geomspace, + get_printoptions, + getbufsize, + geterr, + geterrcall, + greater, + greater_equal, + half, + heaviside, + hstack, + hypot, + identity, + iinfo, + indices, + inexact, + inf, + inner, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + invert, + is_busday, + isclose, + isdtype, + isfinite, + isfortran, + isinf, + isnan, + isnat, + isscalar, + issubdtype, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + linspace, + little_endian, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + matmul, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + memmap, + min, + min_scalar_type, + minimum, + mod, + modf, + moveaxis, + multiply, + nan, + ndarray, + ndim, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ones, + ones_like, + outer, + partition, + permute_dims, + pi, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + putmask, + rad2deg, + radians, + ravel, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + round, + sctypeDict, + searchsorted, + set_printoptions, + setbufsize, + seterr, + seterrcall, + shape, + shares_memory, + short, + sign, + signbit, + signedinteger, + sin, + single, + sinh, + size, + sort, + spacing, + sqrt, + square, + squeeze, + stack, + std, + str_, + subtract, + sum, + swapaxes, + take, + tan, + tanh, + tensordot, + timedelta64, + trace, + transpose, + true_divide, + trunc, + typecodes, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + unstack, + ushort, + var, + vdot, + vecdot, + vecmat, + void, + vstack, + where, + zeros, + zeros_like, ) # NOTE: It's still under discussion whether these aliases @@ -179,67 +455,176 @@ del ta from . import lib + from . import matrixlib as _mat from .lib import scimath as emath - from .lib._histograms_impl import ( - histogram, histogram_bin_edges, histogramdd - ) - from .lib._nanfunctions_impl import ( - nanargmax, nanargmin, nancumprod, nancumsum, nanmax, nanmean, - nanmedian, nanmin, nanpercentile, nanprod, nanquantile, nanstd, - nansum, nanvar + from .lib._arraypad_impl import pad + from .lib._arraysetops_impl import ( + ediff1d, + in1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, ) from .lib._function_base_impl import ( - select, piecewise, trim_zeros, copy, iterable, percentile, diff, - gradient, angle, unwrap, sort_complex, flip, rot90, extract, place, - vectorize, asarray_chkfinite, average, bincount, digitize, cov, - corrcoef, median, sinc, hamming, hanning, bartlett, blackman, - kaiser, trapezoid, trapz, i0, meshgrid, delete, insert, append, - interp, quantile + angle, + append, + asarray_chkfinite, + average, + bartlett, + bincount, + blackman, + copy, + corrcoef, + cov, + delete, + diff, + digitize, + extract, + flip, + gradient, + hamming, + hanning, + i0, + insert, + interp, + iterable, + kaiser, + median, + meshgrid, + percentile, + piecewise, + place, + quantile, + rot90, + select, + sinc, + sort_complex, + trapezoid, + trapz, + trim_zeros, + unwrap, + vectorize, ) - from .lib._twodim_base_impl import ( - diag, diagflat, eye, fliplr, flipud, tri, triu, tril, vander, - histogram2d, mask_indices, tril_indices, tril_indices_from, - triu_indices, triu_indices_from + from .lib._histograms_impl import histogram, histogram_bin_edges, histogramdd + from .lib._index_tricks_impl import ( + c_, + diag_indices, + diag_indices_from, + fill_diagonal, + index_exp, + ix_, + mgrid, + ndenumerate, + ndindex, + ogrid, + r_, + ravel_multi_index, + s_, + unravel_index, ) - from .lib._shape_base_impl import ( - apply_over_axes, apply_along_axis, array_split, column_stack, dsplit, - dstack, expand_dims, hsplit, kron, put_along_axis, row_stack, split, - take_along_axis, tile, vsplit + from .lib._nanfunctions_impl import ( + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, ) - from .lib._type_check_impl import ( - iscomplexobj, isrealobj, imag, iscomplex, isreal, nan_to_num, real, - real_if_close, typename, mintypecode, common_type + from .lib._npyio_impl import ( + fromregex, + genfromtxt, + load, + loadtxt, + packbits, + save, + savetxt, + savez, + savez_compressed, + unpackbits, ) - from .lib._arraysetops_impl import ( - ediff1d, in1d, intersect1d, isin, setdiff1d, setxor1d, union1d, - unique, unique_all, unique_counts, unique_inverse, unique_values + from .lib._polynomial_impl import ( + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polysub, + polyval, + roots, ) - from .lib._ufunclike_impl import fix, isneginf, isposinf - from .lib._arraypad_impl import pad - from .lib._utils_impl import ( - show_runtime, get_include, info + from .lib._shape_base_impl import ( + apply_along_axis, + apply_over_axes, + array_split, + column_stack, + dsplit, + dstack, + expand_dims, + hsplit, + kron, + put_along_axis, + row_stack, + split, + take_along_axis, + tile, + vsplit, ) from .lib._stride_tricks_impl import ( - broadcast_arrays, broadcast_shapes, broadcast_to - ) - from .lib._polynomial_impl import ( - poly, polyint, polyder, polyadd, polysub, polymul, polydiv, polyval, - polyfit, poly1d, roots + broadcast_arrays, + broadcast_shapes, + broadcast_to, ) - from .lib._npyio_impl import ( - savetxt, loadtxt, genfromtxt, load, save, savez, packbits, - savez_compressed, unpackbits, fromregex - ) - from .lib._index_tricks_impl import ( - diag_indices_from, diag_indices, fill_diagonal, ndindex, ndenumerate, - ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index, - index_exp + from .lib._twodim_base_impl import ( + diag, + diagflat, + eye, + fliplr, + flipud, + histogram2d, + mask_indices, + tri, + tril, + tril_indices, + tril_indices_from, + triu, + triu_indices, + triu_indices_from, + vander, ) - - from . import matrixlib as _mat - from .matrixlib import ( - asmatrix, bmat, matrix + from .lib._type_check_impl import ( + common_type, + imag, + iscomplex, + iscomplexobj, + isreal, + isrealobj, + mintypecode, + nan_to_num, + real, + real_if_close, + typename, ) + from .lib._ufunclike_impl import fix, isneginf, isposinf + from .lib._utils_impl import get_include, info, show_runtime + from .matrixlib import asmatrix, bmat, matrix # public submodules are imported lazily, therefore are accessible from # __getattr__. Note that `distutils` (deprecated) and `array_api` @@ -418,7 +803,7 @@ def __dir__(): ) public_symbols -= { "matrixlib", "matlib", "tests", "conftest", "version", - "compat", "distutils", "array_api" + "distutils", "array_api" } return list(public_symbols) @@ -470,18 +855,19 @@ def _mac_os_check(): from . import exceptions with warnings.catch_warnings(record=True) as w: _mac_os_check() - # Throw runtime error, if the test failed Check for warning and error_message + # Throw runtime error, if the test failed + # Check for warning and report the error_message if len(w) > 0: for _wn in w: if _wn.category is exceptions.RankWarning: - # Ignore other warnings, they may not be relevant (see gh-25433). + # Ignore other warnings, they may not be relevant (see gh-25433) error_message = ( f"{_wn.category.__name__}: {_wn.message}" ) msg = ( "Polyfit sanity test emitted a warning, most likely due " "to using a buggy Accelerate backend." - "\nIf you compiled yourself, more information is available at:" + "\nIf you compiled yourself, more information is available at:" # noqa: E501 "\nhttps://numpy.org/devdocs/building/index.html" "\nOtherwise report this to the vendor " f"that provided NumPy.\n\n{error_message}\n") diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 551921889bf5..272e52f88e83 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -44,6 +44,7 @@ from numpy._typing import ( _DTypeLikeVoid, _VoidDTypeLike, # Shapes + _AnyShape, _Shape, _ShapeLike, # Scalars @@ -215,7 +216,7 @@ from typing import ( # if not available at runtime. This is because the `typeshed` stubs for the standard # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi -from _typeshed import StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite +from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite from typing_extensions import CapsuleType, TypeVar from numpy import ( @@ -431,6 +432,8 @@ from numpy._core.shape_base import ( ) from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ +from ._globals import _CopyMode as _CopyMode +from ._globals import _NoValue as _NoValue from numpy.lib import ( scimath as emath, @@ -473,7 +476,6 @@ from numpy.lib._function_base_impl import ( place, asarray_chkfinite, average, - bincount, digitize, cov, corrcoef, @@ -495,8 +497,6 @@ from numpy.lib._function_base_impl import ( quantile, ) -from numpy._globals import _CopyMode - from numpy.lib._histograms_impl import ( histogram_bin_edges, histogram, @@ -545,8 +545,6 @@ from numpy.lib._npyio_impl import ( save, savez, savez_compressed, - packbits, - unpackbits, fromregex, ) @@ -787,19 +785,19 @@ _ImagT_co = TypeVar("_ImagT_co", covariant=True) _CallableT = TypeVar("_CallableT", bound=Callable[..., object]) _DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) -_ArrayT_co = TypeVar("_ArrayT_co", bound=NDArray[Any], covariant=True) +_ArrayT = TypeVar("_ArrayT", bound=ndarray) +_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) _IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[integer | np.bool | object_]) _RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_]) _NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) _ShapeT = TypeVar("_ShapeT", bound=_Shape) -_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) _1DShapeT = TypeVar("_1DShapeT", bound=_1D) -_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, covariant=True) +_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) _1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ... _ScalarT = TypeVar("_ScalarT", bound=generic) @@ -810,9 +808,9 @@ _FloatingT_co = TypeVar("_FloatingT_co", bound=floating, default=floating, covar _IntegerT = TypeVar("_IntegerT", bound=integer) _IntegerT_co = TypeVar("_IntegerT_co", bound=integer, default=integer, covariant=True) -_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) -_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) -_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) +_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) # pyright: ignore[reportDeprecated] _ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) _BoolItemT = TypeVar("_BoolItemT", bound=builtins.bool) @@ -829,6 +827,7 @@ _CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_ _TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=dt.timedelta | int | None, covariant=True) _DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=dt.date | int | None, covariant=True) _TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool]) ### Type Aliases (for internal use only) @@ -867,8 +866,6 @@ _SignedIntegerCType: TypeAlias = type[ ] # fmt: skip _FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] _IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType -_NumberCType: TypeAlias = _IntegerCType -_GenericCType: TypeAlias = _NumberCType | type[ct.c_bool | ct.c_char | ct.py_object[Any]] # some commonly used builtin types that are known to result in a # `dtype[object_]`, when their *type* is passed to the `dtype` constructor @@ -1068,10 +1065,6 @@ class _SupportsFileMethods(SupportsFlush, Protocol): @type_check_only class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... -@type_check_only -class _SupportsItem(Protocol[_T_co]): - def item(self, /) -> _T_co: ... - @type_check_only class _SupportsDLPack(Protocol[_T_contra]): def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... @@ -1178,7 +1171,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[float64] | None, + dtype: type[float64 | ct.c_double] | _Float64Codes | _DoubleCodes | None, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ... @@ -1208,36 +1201,31 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[builtins.bool | np.bool], + dtype: type[builtins.bool | np.bool | ct.c_bool] | _BoolCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[np.bool]: ... - # NOTE: `_: type[int]` also accepts `type[int | bool]` @overload def __new__( cls, - dtype: type[int | int_ | np.bool], + dtype: type[int], # also accepts `type[builtins.bool]` align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[int_ | np.bool]: ... - # NOTE: `_: type[float]` also accepts `type[float | int | bool]` - # NOTE: `float64` inherits from `float` at runtime; but this isn't - # reflected in these stubs. So an explicit `float64` is required here. @overload def __new__( cls, - dtype: type[float | float64 | int_ | np.bool] | None, + dtype: type[float], # also accepts `type[int | bool]` align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[float64 | int_ | np.bool]: ... - # NOTE: `_: type[complex]` also accepts `type[complex | float | int | bool]` @overload def __new__( cls, - dtype: type[complex | complex128 | float64 | int_ | np.bool], + dtype: type[complex], # also accepts `type[float | int | bool]` align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1245,7 +1233,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[bytes], # also includes `type[bytes_]` + dtype: type[bytes | ct.c_char] | _BytesCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1253,7 +1241,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[str], # also includes `type[str_]` + dtype: type[str] | _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1267,7 +1255,7 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[memoryview | void], + dtype: type[void | memoryview] | _VoidDTypeLike | _VoidCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., @@ -1277,127 +1265,182 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: type[_BuiltinObjectLike | object_], + dtype: type[object_ | _BuiltinObjectLike | ct.py_object[Any]] | _ObjectCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[str, Any] = ..., ) -> dtype[object_]: ... - # Unions of builtins. + # `unsignedinteger` string-based representations and ctypes @overload def __new__( cls, - dtype: type[bytes | str], + dtype: _UInt8Codes | _UByteCodes | type[ct.c_uint8 | ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[character]: ... + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint8]: ... @overload def __new__( cls, - dtype: type[bytes | str | memoryview], + dtype: _UInt16Codes | _UShortCodes | type[ct.c_uint16 | ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[flexible]: ... + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint16]: ... @overload def __new__( cls, - dtype: type[complex | bytes | str | memoryview | _BuiltinObjectLike], + dtype: _UInt32Codes | _UIntCCodes | type[ct.c_uint32 | ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[str, Any] = ..., - ) -> dtype[np.bool | int_ | float64 | complex128 | flexible | object_]: ... - - # `unsignedinteger` string-based representations and ctypes - @overload - def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint8]: ... - @overload - def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint16]: ... - @overload - def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint32]: ... - @overload - def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint64]: ... - @overload - def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ubyte]: ... - @overload - def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ... - @overload - def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ... - # NOTE: We're assuming here that `uint_ptr_t == size_t`, - # an assumption that does not hold in rare cases (same for `ssize_t`) + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint32]: ... @overload - def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintp]: ... + def __new__( + cls, + dtype: _UInt64Codes | _ULongLongCodes | type[ct.c_uint64 | ct.c_ulonglong], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uint64]: ... @overload - def __new__(cls, dtype: _ULongCodes | type[ct.c_ulong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulong]: ... + def __new__( + cls, + dtype: _UIntPCodes | type[ct.c_void_p | ct.c_size_t], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[uintp]: ... @overload - def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulonglong]: ... + def __new__( + cls, + dtype: _ULongCodes | type[ct.c_ulong], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[ulong]: ... # `signedinteger` string-based representations and ctypes @overload - def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int8]: ... - @overload - def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int16]: ... - @overload - def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int32]: ... - @overload - def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int64]: ... - @overload - def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[byte]: ... + def __new__( + cls, + dtype: _Int8Codes | _ByteCodes | type[ct.c_int8 | ct.c_byte], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int8]: ... @overload - def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[short]: ... + def __new__( + cls, + dtype: _Int16Codes | _ShortCodes | type[ct.c_int16 | ct.c_short], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int16]: ... @overload - def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intc]: ... + def __new__( + cls, + dtype: _Int32Codes | _IntCCodes | type[ct.c_int32 | ct.c_int], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int32]: ... @overload - def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intp]: ... + def __new__( + cls, + dtype: _Int64Codes | _LongLongCodes | type[ct.c_int64 | ct.c_longlong], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[int64]: ... @overload - def __new__(cls, dtype: _LongCodes | type[ct.c_long], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[long]: ... + def __new__( + cls, + dtype: _IntPCodes | type[intp | ct.c_ssize_t], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[intp]: ... @overload - def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longlong]: ... + def __new__( + cls, + dtype: _LongCodes | type[ct.c_long], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[long]: ... # `floating` string-based representations and ctypes @overload - def __new__(cls, dtype: _Float16Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float16]: ... - @overload - def __new__(cls, dtype: _Float32Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float32]: ... - @overload - def __new__(cls, dtype: _Float64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... - @overload - def __new__(cls, dtype: _HalfCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[half]: ... - @overload - def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[single]: ... + def __new__( + cls, + dtype: _Float16Codes | _HalfCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float16]: ... @overload - def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[double]: ... + def __new__( + cls, + dtype: _Float32Codes | _SingleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[float32]: ... + # float64 codes are covered by overload 1 @overload - def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longdouble]: ... + def __new__( + cls, + dtype: _LongDoubleCodes | type[ct.c_longdouble], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[longdouble]: ... # `complexfloating` string-based representations @overload - def __new__(cls, dtype: _Complex64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex64]: ... - @overload - def __new__(cls, dtype: _Complex128Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... - @overload - def __new__(cls, dtype: _CSingleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[csingle]: ... + def __new__( + cls, + dtype: _Complex64Codes | _CSingleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex64]: ... @overload - def __new__(cls, dtype: _CDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[cdouble]: ... + def __new__( + cls, + dtype: _Complex128Codes | _CDoubleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complex128]: ... @overload - def __new__(cls, dtype: _CLongDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[clongdouble]: ... + def __new__( + cls, + dtype: _CLongDoubleCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[clongdouble]: ... # Miscellaneous string-based representations and ctypes @overload - def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... - @overload - def __new__(cls, dtype: _TD64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[timedelta64]: ... - @overload - def __new__(cls, dtype: _DT64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[datetime64]: ... - @overload - def __new__(cls, dtype: _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... - @overload - def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... - @overload - def __new__(cls, dtype: _VoidCodes | _VoidDTypeLike, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... + def __new__( + cls, + dtype: _TD64Codes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[timedelta64]: ... @overload - def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ... + def __new__( + cls, + dtype: _DT64Codes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[datetime64]: ... # `StringDType` requires special treatment because it has no scalar type @overload @@ -1461,35 +1504,11 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @overload def __new__( cls, - dtype: _NumberCodes | _NumberCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[number]: ... - @overload - def __new__( - cls, - dtype: _CharacterCodes | type[ct.c_char], + dtype: _CharacterCodes | type[bytes | builtins.str | ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., + metadata: dict[str, Any] = ..., ) -> dtype[character]: ... - @overload - def __new__( - cls, - dtype: _FlexibleCodes | type[ct.c_char], - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[flexible]: ... - @overload - def __new__( - cls, - dtype: _GenericCodes | _GenericCType, - align: builtins.bool = ..., - copy: builtins.bool = ..., - metadata: dict[builtins.str, Any] = ..., - ) -> dtype[generic]: ... # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... @overload @@ -1502,10 +1521,11 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): ) -> dtype: ... # Catch-all overload for object-likes - # NOTE: `object_ | Any` is *not* equivalent to `Any` -- it describes some - # (static) type `T` s.t. `object_ <: T <: builtins.object` (`<:` denotes - # the subtyping relation, the (gradual) typing analogue of `issubclass()`). - # https://typing.readthedocs.io/en/latest/spec/concepts.html#union-types + # NOTE: `object_ | Any` is NOT equivalent to `Any`. It is specified to behave + # like a "sum type" (a.k.a. variant type, discriminated union, or tagged union). + # So the union of a type and `Any` is not the same "union type" that all other + # unions are (by definition). + # https://typing.python.org/en/latest/spec/concepts.html#union-types @overload def __new__( cls, @@ -1582,11 +1602,11 @@ class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): @property def num(self) -> _DTypeNum: ... @property - def shape(self) -> tuple[()] | _Shape: ... + def shape(self) -> _AnyShape: ... @property def ndim(self) -> int: ... @property - def subdtype(self) -> tuple[dtype, _Shape] | None: ... + def subdtype(self) -> tuple[dtype, _AnyShape] | None: ... def newbyteorder(self, new_order: _ByteOrder = ..., /) -> Self: ... @property def str(self) -> LiteralString: ... @@ -1630,9 +1650,9 @@ class flatiter(Generic[_ArrayT_co]): @overload def __array__(self: flatiter[ndarray[_1DShapeT, Any]], dtype: _DTypeT, /) -> ndarray[_1DShapeT, _DTypeT]: ... @overload - def __array__(self: flatiter[ndarray[_Shape, _DTypeT]], dtype: None = ..., /) -> ndarray[_Shape, _DTypeT]: ... + def __array__(self: flatiter[ndarray[Any, _DTypeT]], dtype: None = ..., /) -> ndarray[_AnyShape, _DTypeT]: ... @overload - def __array__(self, dtype: _DTypeT, /) -> ndarray[_Shape, _DTypeT]: ... + def __array__(self, dtype: _DTypeT, /) -> ndarray[_AnyShape, _DTypeT]: ... @type_check_only class _ArrayOrScalarCommon: @@ -1707,18 +1727,18 @@ class _ArrayOrScalarCommon: @overload # axis=index, out=None (default) def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... @overload # axis=index, out=ndarray - def argmax(self, /, axis: SupportsIndex | None, out: _ArrayT, *, keepdims: builtins.bool = False) -> _ArrayT: ... + def argmax(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... @overload - def argmax(self, /, axis: SupportsIndex | None = None, *, out: _ArrayT, keepdims: builtins.bool = False) -> _ArrayT: ... + def argmax(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... @overload # axis=None (default), out=None (default), keepdims=False (default) def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... @overload # axis=index, out=None (default) def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... @overload # axis=index, out=ndarray - def argmin(self, /, axis: SupportsIndex | None, out: _ArrayT, *, keepdims: builtins.bool = False) -> _ArrayT: ... + def argmin(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... @overload - def argmin(self, /, axis: SupportsIndex | None = None, *, out: _ArrayT, keepdims: builtins.bool = False) -> _ArrayT: ... + def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... @overload # out=None (default) def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... @@ -2049,13 +2069,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __array__( - self, dtype: None = ..., /, *, copy: bool | None = ... - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__( - self, dtype: _DTypeT, /, *, copy: bool | None = ... - ) -> ndarray[_ShapeT_co, _DTypeT]: ... + def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ... def __array_ufunc__( self, @@ -2087,11 +2103,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): ) -> ndarray[_ShapeT, _DTypeT]: ... @overload - def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_Shape, _DTypeT_co]: ... + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... @overload - def __getitem__(self, key: _ToIndices, /) -> ndarray[_Shape, _DTypeT_co]: ... + def __getitem__(self, key: _ToIndices, /) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co, np.dtype]: ... @overload @@ -2169,6 +2185,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): *args: SupportsIndex, ) -> str: ... + @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` + def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ... @overload def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ... @overload @@ -2190,13 +2208,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def squeeze( self, axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ..., - ) -> ndarray[_Shape, _DTypeT_co]: ... + ) -> ndarray[_AnyShape, _DTypeT_co]: ... def swapaxes( self, axis1: SupportsIndex, axis2: SupportsIndex, - ) -> ndarray[_Shape, _DTypeT_co]: ... + ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload def transpose(self, axes: _ShapeLike | None, /) -> Self: ... @@ -2323,7 +2341,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - ) -> ndarray[_Shape, _DTypeT_co]: ... + ) -> ndarray[_AnyShape, _DTypeT_co]: ... # 1D + 1D returns a scalar; # all other with at least 1 non-0D array return an ndarray. @@ -2399,7 +2417,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): axis: SupportsIndex | None = ..., out: None = ..., mode: _ModeKind = ..., - ) -> ndarray[_Shape, _DTypeT_co]: ... + ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload def take( self, @@ -2420,7 +2438,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): self, repeats: _ArrayLikeInt_co, axis: SupportsIndex, - ) -> ndarray[_Shape, _DTypeT_co]: ... + ) -> ndarray[_AnyShape, _DTypeT_co]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... @@ -2496,7 +2514,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): *shape: SupportsIndex, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[_Shape, _DTypeT_co]: ... + ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload # (sequence[index]) def reshape( self, @@ -2505,7 +2523,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): *, order: _OrderACF = "C", copy: builtins.bool | None = None, - ) -> ndarray[_Shape, _DTypeT_co]: ... + ) -> ndarray[_AnyShape, _DTypeT_co]: ... @overload def astype( @@ -2808,6 +2826,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + # Keep in sync with `MaskedArray.__add__` @overload def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2841,10 +2860,21 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload + def __add__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... + @overload + def __add__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... + @overload + def __add__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload def __add__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__radd__` @overload # signature equivalent to __add__ def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2878,10 +2908,21 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload + def __radd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... + @overload + def __radd__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... + @overload + def __radd__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload def __radd__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__sub__` @overload def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2919,6 +2960,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + # Keep in sync with `MaskedArray.__rsub__` @overload def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload @@ -2987,6 +3029,12 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload + def __mul__( + self: ndarray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload def __mul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @@ -3022,6 +3070,12 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload + def __rmul__( + self: ndarray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @@ -3326,148 +3380,83 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): # object and its value is >= 0 # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't # work, as this will lead to `false negatives` when using these inplace ops. + # Keep in sync with `MaskedArray.__iadd__` @overload def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iadd__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __iadd__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iadd__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload def __iadd__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - + def __iadd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __isub__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, + def __iadd__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, /, ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __isub__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `MaskedArray.__isub__` @overload - def __isub__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __isub__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __isub__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload def __isub__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __isub__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # Keep in sync with `MaskedArray.__imul__` @overload def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, + self: ndarray[Any, dtype[integer | character] | dtypes.StringDType], other: _ArrayLikeInt_co, / ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __imul__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imul__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imul__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # Keep in sync with `MaskedArray.__ipow__` @overload - def __itruediv__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __itruediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __itruediv__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __itruediv__( - self: NDArray[complexfloating], - other: _ArrayLikeComplex_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # Keep in sync with `MaskedArray.__itruediv__` @overload - def __ifloordiv__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ifloordiv__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ifloordiv__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ifloordiv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ifloordiv__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ifloordiv__( - self: NDArray[complexfloating], - other: _ArrayLikeComplex_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __itruediv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __itruediv__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # keep in sync with `__imod__` and `MaskedArray.__ifloordiv__` @overload - def __ipow__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ipow__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ipow__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # keep in sync with `__ifloordiv__` @overload - def __imod__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imod__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imod__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imod__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imod__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload @@ -3479,84 +3468,55 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload def __imod__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # keep in sync with `__irshift__` @overload - def __ilshift__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ilshift__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ilshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # keep in sync with `__ilshift__` @overload - def __irshift__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __irshift__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __irshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # keep in sync with `__ixor__` and `__ior__` @overload def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __iand__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __iand__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __iand__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # keep in sync with `__iand__` and `__ior__` @overload def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ixor__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ixor__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ixor__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # keep in sync with `__iand__` and `__ixor__` @overload def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ior__( - self: NDArray[unsignedinteger], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __ior__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __ior__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # @overload def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __imatmul__(self: NDArray[unsignedinteger], other: _ArrayLikeUInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload - def __imatmul__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def __imatmul__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imatmul__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __imatmul__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... - @overload def __imatmul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload def __imatmul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # def __dlpack__( self: NDArray[number], /, @@ -4272,9 +4232,9 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] -half: TypeAlias = floating[_NBitHalf] -single: TypeAlias = floating[_NBitSingle] -double: TypeAlias = floating[_NBitDouble] +half: TypeAlias = float16 +single: TypeAlias = float32 +double: TypeAlias = float64 longdouble: TypeAlias = floating[_NBitLongDouble] # The main reason for `complexfloating` having two typevars is cosmetic. @@ -4372,7 +4332,7 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): self, other: number[_NBit], mod: None = None, / ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... -complex64: TypeAlias = complexfloating[_32Bit, _32Bit] +complex64: TypeAlias = complexfloating[_32Bit] class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc] @overload @@ -4433,9 +4393,9 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... -csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] -cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] -clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble] +csingle: TypeAlias = complex64 +cdouble: TypeAlias = complex128 +clongdouble: TypeAlias = complexfloating[_NBitLongDouble] class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): @property @@ -4841,19 +4801,17 @@ arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None] -bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] -conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] -divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +divide: _UFunc_Nin2_Nout1[L['divide'], L[11], None] divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] @@ -4896,7 +4854,6 @@ matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"] matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] -mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] @@ -4920,7 +4877,6 @@ square: _UFunc_Nin1_Nout1[L['square'], L[18], None] subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] -true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] @@ -4935,10 +4891,14 @@ atanh = arctanh atan2 = arctan2 concat = concatenate bitwise_left_shift = left_shift +bitwise_not = invert bitwise_invert = invert bitwise_right_shift = right_shift +conj = conjugate +mod = remainder permute_dims = transpose pow = power +true_divide = divide class errstate: def __init__( @@ -4978,7 +4938,7 @@ class broadcast: @property def numiter(self) -> int: ... @property - def shape(self) -> _Shape: ... + def shape(self) -> _AnyShape: ... @property def size(self) -> int: ... def __next__(self) -> tuple[Any, ...]: ... @@ -5256,146 +5216,189 @@ class poly1d: ) -> poly1d: ... class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): - __array_priority__: ClassVar[float] + __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] + def __new__( - subtype, + subtype, # pyright: ignore[reportSelfClsParameterName] data: ArrayLike, dtype: DTypeLike = ..., copy: builtins.bool = ..., - ) -> matrix[_2D, Any]: ... + ) -> matrix[_2D, Incomplete]: ... def __array_finalize__(self, obj: object) -> None: ... - @overload + @overload # type: ignore[override] def __getitem__( - self, - key: ( - SupportsIndex - | _ArrayLikeInt_co - | tuple[SupportsIndex | _ArrayLikeInt_co, ...] - ), - /, - ) -> Any: ... + self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], / + ) -> Incomplete: ... @overload - def __getitem__( - self, - key: slice | EllipsisType | SupportsIndex | _ArrayLikeInt_co | tuple[slice | EllipsisType | _ArrayLikeInt_co | SupportsIndex | None, ...] | None, - /, - ) -> matrix[_2D, _DTypeT_co]: ... + def __getitem__(self, key: _ToIndices, /) -> matrix[_2D, _DTypeT_co]: ... @overload - def __getitem__(self: NDArray[void], key: str, /) -> matrix[_2D, dtype]: ... + def __getitem__(self: matrix[Any, dtype[void]], key: str, /) -> matrix[_2D, dtype]: ... @overload - def __getitem__(self: NDArray[void], key: list[str], /) -> matrix[_2DShapeT_co, dtype[void]]: ... + def __getitem__(self: matrix[Any, dtype[void]], key: list[str], /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __mul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rmul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __imul__(self, other: ArrayLike, /) -> Self: ... - def __mul__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... - def __rmul__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... - def __imul__(self, other: ArrayLike, /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... - def __pow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Any]: ... - def __rpow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Any]: ... - def __ipow__(self, other: ArrayLike, /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... + # + def __pow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ipow__(self, other: ArrayLike, /) -> Self: ... # type: ignore[misc, override] + # keep in sync with `prod` and `mean` + @overload # type: ignore[override] + def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... @overload - def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... @overload - def sum(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[_2D, Any]: ... + def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... @overload - def sum(self, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... + def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + # keep in sync with `sum` and `mean` + @overload # type: ignore[override] + def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... @overload - def mean(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... @overload - def mean(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[_2D, Any]: ... + def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... @overload - def mean(self, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... + def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + # keep in sync with `sum` and `prod` + @overload # type: ignore[override] + def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... @overload - def std(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... + def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... @overload - def std(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[_2D, Any]: ... + def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... @overload - def std(self, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., out: _ArrayT = ..., ddof: float = ...) -> _ArrayT: ... + def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + # keep in sync with `var` + @overload # type: ignore[override] + def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... @overload - def var(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... + def std( + self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0 + ) -> matrix[_2D, Incomplete]: ... @overload - def var(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[_2D, Any]: ... + def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... @overload - def var(self, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., out: _ArrayT = ..., ddof: float = ...) -> _ArrayT: ... + def std( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + # keep in sync with `std` + @overload # type: ignore[override] + def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... @overload - def prod(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... + def var( + self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0 + ) -> matrix[_2D, Incomplete]: ... @overload - def prod(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[_2D, Any]: ... + def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... @overload - def prod(self, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... + def var( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + # keep in sync with `all` + @overload # type: ignore[override] + def any(self, axis: None = None, out: None = None) -> np.bool: ... @overload - def any(self, axis: None = ..., out: None = ...) -> np.bool: ... + def any(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ... @overload - def any(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, dtype[np.bool]]: ... + def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... @overload - def any(self, axis: _ShapeLike | None = ..., out: _ArrayT = ...) -> _ArrayT: ... + def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + # keep in sync with `any` + @overload # type: ignore[override] + def all(self, axis: None = None, out: None = None) -> np.bool: ... @overload - def all(self, axis: None = ..., out: None = ...) -> np.bool: ... + def all(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ... @overload - def all(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, dtype[np.bool]]: ... + def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... @overload - def all(self, axis: _ShapeLike | None = ..., out: _ArrayT = ...) -> _ArrayT: ... + def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + # keep in sync with `min` and `ptp` + @overload # type: ignore[override] + def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... @overload - def max(self: NDArray[_ScalarT], axis: None = ..., out: None = ...) -> _ScalarT: ... + def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... @overload - def max(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, _DTypeT_co]: ... + def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... @overload - def max(self, axis: _ShapeLike | None = ..., out: _ArrayT = ...) -> _ArrayT: ... + def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + # keep in sync with `max` and `ptp` + @overload # type: ignore[override] + def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... @overload - def min(self: NDArray[_ScalarT], axis: None = ..., out: None = ...) -> _ScalarT: ... + def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... @overload - def min(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, _DTypeT_co]: ... + def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... @overload - def min(self, axis: _ShapeLike | None = ..., out: _ArrayT = ...) -> _ArrayT: ... + def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + # keep in sync with `max` and `min` + @overload + def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... @overload - def argmax(self: NDArray[_ScalarT], axis: None = ..., out: None = ...) -> intp: ... + def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... @overload - def argmax(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, dtype[intp]]: ... + def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... @overload - def argmax(self, axis: _ShapeLike | None = ..., out: _ArrayT = ...) -> _ArrayT: ... + def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + # keep in sync with `argmin` + @overload # type: ignore[override] + def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ... @overload - def argmin(self: NDArray[_ScalarT], axis: None = ..., out: None = ...) -> intp: ... + def argmax(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ... @overload - def argmin(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, dtype[intp]]: ... + def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... @overload - def argmin(self, axis: _ShapeLike | None = ..., out: _ArrayT = ...) -> _ArrayT: ... + def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + # keep in sync with `argmax` + @overload # type: ignore[override] + def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ... + @overload + def argmin(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ... + @overload + def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... @overload - def ptp(self: NDArray[_ScalarT], axis: None = ..., out: None = ...) -> _ScalarT: ... + def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + #the second overload handles the (rare) case that the matrix is not 2-d @overload - def ptp(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, _DTypeT_co]: ... + def tolist(self: matrix[_2D, dtype[generic[_T]]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] @overload - def ptp(self, axis: _ShapeLike | None = ..., out: _ArrayT = ...) -> _ArrayT: ... + def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] - def squeeze(self, axis: _ShapeLike | None = ...) -> matrix[_2D, _DTypeT_co]: ... - def tolist(self: matrix[Any, dtype[generic[_T]]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] - def ravel(self, /, order: _OrderKACF = "C") -> matrix[tuple[L[1], int], _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] - def flatten(self, /, order: _OrderKACF = "C") -> matrix[tuple[L[1], int], _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] + # these three methods will at least return a `2-d` array of shape (1, n) + def squeeze(self, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... + def ravel(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def flatten(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + # matrix.T is inherited from _ScalarOrArrayCommon + def getT(self) -> Self: ... @property - def T(self) -> matrix[_2D, _DTypeT_co]: ... - @property - def I(self) -> matrix[_2D, Any]: ... # noqa: E743 + def I(self) -> matrix[_2D, Incomplete]: ... # noqa: E743 + def getI(self) -> matrix[_2D, Incomplete]: ... @property def A(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... + def getA(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... @property - def A1(self) -> ndarray[_Shape, _DTypeT_co]: ... + def A1(self) -> ndarray[_AnyShape, _DTypeT_co]: ... + def getA1(self) -> ndarray[_AnyShape, _DTypeT_co]: ... @property def H(self) -> matrix[_2D, _DTypeT_co]: ... - def getT(self) -> matrix[_2D, _DTypeT_co]: ... - def getI(self) -> matrix[_2D, Any]: ... - def getA(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... - def getA1(self) -> ndarray[_Shape, _DTypeT_co]: ... def getH(self) -> matrix[_2D, _DTypeT_co]: ... def from_dlpack( diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py index 0167a2fe7985..067e38798718 100644 --- a/numpy/_array_api_info.py +++ b/numpy/_array_api_info.py @@ -8,21 +8,21 @@ """ from numpy._core import ( - dtype, bool, - intp, + complex64, + complex128, + dtype, + float32, + float64, int8, int16, int32, int64, + intp, uint8, uint16, uint32, uint64, - float32, - float64, - complex64, - complex128, ) @@ -94,14 +94,14 @@ def capabilities(self): >>> info = np.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, - 'data-dependent shapes': True} + 'data-dependent shapes': True, + 'max dimensions': 64} """ return { "boolean indexing": True, "data-dependent shapes": True, - # 'max rank' will be part of the 2024.12 standard - # "max rank": 64, + "max dimensions": 64, } def default_device(self): diff --git a/numpy/_build_utils/gitversion.py b/numpy/_build_utils/gitversion.py index defc704c41eb..7975dd9dba65 100644 --- a/numpy/_build_utils/gitversion.py +++ b/numpy/_build_utils/gitversion.py @@ -22,8 +22,8 @@ def git_version(version): # Append last commit date and hash to dev version information, # if available - import subprocess import os.path + import subprocess git_hash = '' try: diff --git a/numpy/_build_utils/process_src_template.py b/numpy/_build_utils/process_src_template.py index 259c4eaa1628..8bd1ea872a42 100644 --- a/numpy/_build_utils/process_src_template.py +++ b/numpy/_build_utils/process_src_template.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 -import os import argparse import importlib.util +import os def get_processor(): diff --git a/numpy/_build_utils/tempita.py b/numpy/_build_utils/tempita.py index 32e400f9c907..e3571ef8747d 100644 --- a/numpy/_build_utils/tempita.py +++ b/numpy/_build_utils/tempita.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 -import sys -import os import argparse +import os +import sys import tempita diff --git a/numpy/_build_utils/tempita/_looper.py b/numpy/_build_utils/tempita/_looper.py index 4864f2949605..e7d6b2649fb5 100644 --- a/numpy/_build_utils/tempita/_looper.py +++ b/numpy/_build_utils/tempita/_looper.py @@ -150,7 +150,7 @@ def _compare_group(self, item, other, getter): return getattr(item, getter)() != getattr(other, getter)() else: return getattr(item, getter) != getattr(other, getter) - elif hasattr(getter, '__call__'): + elif callable(getter): return getter(item) != getter(other) else: return item[getter] != other[getter] diff --git a/numpy/_build_utils/tempita/_tempita.py b/numpy/_build_utils/tempita/_tempita.py index 446658fc15f8..88ead791574b 100644 --- a/numpy/_build_utils/tempita/_tempita.py +++ b/numpy/_build_utils/tempita/_tempita.py @@ -29,9 +29,9 @@ def foo(bar): If there are syntax errors ``TemplateError`` will be raised. """ +import os import re import sys -import os import tokenize from io import StringIO @@ -152,6 +152,7 @@ def __init__( if default_inherit is not None: self.default_inherit = default_inherit + @classmethod def from_filename( cls, filename, @@ -172,8 +173,6 @@ def from_filename( get_template=get_template, ) - from_filename = classmethod(from_filename) - def __repr__(self): return f"<{self.__class__.__name__} {id(self):x} name={self.name!r}>" @@ -727,7 +726,7 @@ def parse(s, name=None, line_offset=0, delimiters=None): >>> parse('{{py:x=1}}') [('py', (1, 3), 'x=1')] >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}') - [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] # noqa: E501 + [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] Some exceptions:: @@ -759,7 +758,7 @@ def parse(s, name=None, line_offset=0, delimiters=None): Traceback (most recent call last): ... TemplateError: Multi-line py blocks must start with a newline at line 1 column 3 - """ + """ # noqa: E501 if delimiters is None: delimiters = ( Template.default_namespace["start_braces"], @@ -786,13 +785,12 @@ def parse_expr(tokens, name, context=()): expr = expr.replace("\r\n", "\n") expr = expr.replace("\r", "") expr += "\n" - else: - if "\n" in expr: - raise TemplateError( - "Multi-line py blocks must start with a newline", - position=pos, - name=name, - ) + elif "\n" in expr: + raise TemplateError( + "Multi-line py blocks must start with a newline", + position=pos, + name=name, + ) return ("py", pos, expr), tokens[1:] elif expr in ("continue", "break"): if "for" not in context: @@ -973,7 +971,7 @@ def get_token(pos=False): tok_type, tok_string = get_token() if tok_type == tokenize.ENDMARKER: break - if tok_type == tokenize.OP and (tok_string == "*" or tok_string == "**"): + if tok_type == tokenize.OP and tok_string in {"*", "**"}: var_arg_type = tok_string tok_type, tok_string = get_token() if tok_type != tokenize.NAME: @@ -1066,10 +1064,11 @@ def isolate_expression(string, start_pos, end_pos): def fill_command(args=None): - import sys import optparse - import pkg_resources import os + import sys + + import pkg_resources if args is None: args = sys.argv[1:] diff --git a/numpy/_configtool.py b/numpy/_configtool.py index 70a14b876bcc..db7831c33951 100644 --- a/numpy/_configtool.py +++ b/numpy/_configtool.py @@ -1,9 +1,9 @@ import argparse -from pathlib import Path import sys +from pathlib import Path -from .version import __version__ from .lib._utils_impl import get_include +from .version import __version__ def main() -> None: diff --git a/numpy/_core/__init__.py b/numpy/_core/__init__.py index 9f3a20bc1578..b0be8d1cbab6 100644 --- a/numpy/_core/__init__.py +++ b/numpy/_core/__init__.py @@ -10,7 +10,6 @@ from numpy.version import version as __version__ - # disables OpenBLAS affinity setting of the main thread that limits # python threads or processes to one core env_added = [] @@ -23,29 +22,64 @@ from . import multiarray except ImportError as exc: import sys - msg = """ + + # Bypass for the module re-initialization opt-out + if exc.msg == "cannot load module more than once per process": + raise + + # Basically always, the problem should be that the C module is wrong/missing... + if ( + isinstance(exc, ModuleNotFoundError) + and exc.name == "numpy._core._multiarray_umath" + ): + import sys + candidates = [] + for path in __path__: + candidates.extend( + f for f in os.listdir(path) if f.startswith("_multiarray_umath")) + if len(candidates) == 0: + bad_c_module_info = ( + "We found no compiled module, did NumPy build successfully?\n") + else: + candidate_str = '\n * '.join(candidates) + # cache_tag is documented to be possibly None, so just use name if it is + # this guesses at cache_tag being the same as the extension module scheme + tag = sys.implementation.cache_tag or sys.implementation.name + bad_c_module_info = ( + f"The following compiled module files exist, but seem incompatible\n" + f"with with either python '{tag}' or the " + f"platform '{sys.platform}':\n\n * {candidate_str}\n" + ) + else: + bad_c_module_info = "" + + major, minor, *_ = sys.version_info + msg = f""" IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! Importing the numpy C-extensions failed. This error can happen for many reasons, often due to issues with your setup or how NumPy was installed. - +{bad_c_module_info} We have compiled some common reasons and troubleshooting tips at: https://numpy.org/devdocs/user/troubleshooting-importerror.html Please note and check the following: - * The Python version is: Python%d.%d from "%s" - * The NumPy version is: "%s" + * The Python version is: Python {major}.{minor} from "{sys.executable}" + * The NumPy version is: "{__version__}" and make sure that they are the versions you expect. -Please carefully study the documentation linked above for further help. -Original error was: %s -""" % (sys.version_info[0], sys.version_info[1], sys.executable, - __version__, exc) +Please carefully study the information and documentation linked above. +This is unlikely to be a NumPy issue but will be caused by a bad install +or environment on your machine. + +Original error was: {exc} +""" + raise ImportError(msg) from exc finally: for envkey in env_added: @@ -69,37 +103,43 @@ raise ImportError(msg.format(path)) from . import numerictypes as nt -from .numerictypes import sctypes, sctypeDict +from .numerictypes import sctypeDict, sctypes + multiarray.set_typeDict(nt.sctypeDict) -from . import numeric -from .numeric import * -from . import fromnumeric +from . import ( + _machar, + einsumfunc, + fromnumeric, + function_base, + getlimits, + numeric, + shape_base, +) +from .einsumfunc import * from .fromnumeric import * -from .records import record, recarray -# Note: module name memmap is overwritten by a class with same name -from .memmap import * -from . import function_base from .function_base import * -from . import _machar -from . import getlimits from .getlimits import * -from . import shape_base + +# Note: module name memmap is overwritten by a class with same name +from .memmap import * +from .numeric import * +from .records import recarray, record from .shape_base import * -from . import einsumfunc -from .einsumfunc import * -del nt -from .numeric import absolute as abs +del nt # do this after everything else, to minimize the chance of this misleadingly # appearing in an import-time traceback -from . import _add_newdocs -from . import _add_newdocs_scalars # add these for module-freeze analysis (like PyInstaller) -from . import _dtype_ctypes -from . import _internal -from . import _dtype -from . import _methods +from . import ( + _add_newdocs, + _add_newdocs_scalars, + _dtype, + _dtype_ctypes, + _internal, + _methods, +) +from .numeric import absolute as abs acos = numeric.arccos acosh = numeric.arccosh @@ -176,5 +216,6 @@ def __getattr__(name): del copyreg, _ufunc_reduce, _DType_reduce from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/_core/__init__.pyi b/numpy/_core/__init__.pyi index 40d9c411b97c..a8884917f34a 100644 --- a/numpy/_core/__init__.pyi +++ b/numpy/_core/__init__.pyi @@ -1,2 +1,666 @@ -# NOTE: The `np._core` namespace is deliberately kept empty due to it -# being private +# keep in sync with https://github.com/numpy/numtype/blob/main/src/numpy-stubs/_core/__init__.pyi + +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, +) +from .einsumfunc import einsum, einsum_path +from .fromnumeric import ( + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + var, +) +from .fromnumeric import transpose as permute_dims +from .function_base import geomspace, linspace, logspace +from .getlimits import finfo, iinfo +from .memmap import memmap +from .numeric import ( + False_, + True_, + allclose, + arange, + argwhere, + array, + array_equal, + array_equiv, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + astype, + base_repr, + binary_repr, + bitwise_not, + broadcast, + can_cast, + concatenate, + convolve, + copyto, + correlate, + count_nonzero, + cross, + dot, + dtype, + empty, + empty_like, + flatiter, + flatnonzero, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + fromstring, + full, + full_like, + identity, + indices, + inf, + inner, + isclose, + isfortran, + isscalar, + lexsort, + little_endian, + matmul, + may_share_memory, + min_scalar_type, + moveaxis, + nan, + ndarray, + nditer, + nested_iters, + newaxis, + ones, + ones_like, + outer, + promote_types, + putmask, + result_type, + roll, + rollaxis, + shares_memory, + tensordot, + ufunc, + vdot, + vecdot, + where, + zeros, + zeros_like, +) +from .numeric import concatenate as concat +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + sctypeDict, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .records import recarray, record +from .shape_base import ( + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + unstack, + vstack, +) +from .umath import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecmat, +) +from .umath import absolute as abs +from .umath import arccos as acos +from .umath import arccosh as acosh +from .umath import arcsin as asin +from .umath import arcsinh as asinh +from .umath import arctan as atan +from .umath import arctan2 as atan2 +from .umath import arctanh as atanh +from .umath import invert as bitwise_invert +from .umath import left_shift as bitwise_left_shift +from .umath import power as pow +from .umath import right_shift as bitwise_right_shift + +__all__ = [ + "False_", + "ScalarType", + "True_", + "abs", + "absolute", + "acos", + "acosh", + "add", + "all", + "allclose", + "amax", + "amin", + "any", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", + "array", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", + "asanyarray", + "asarray", + "ascontiguousarray", + "asfortranarray", + "asin", + "asinh", + "astype", + "atan", + "atan2", + "atanh", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_invert", + "bitwise_left_shift", + "bitwise_not", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "block", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", + "can_cast", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concat", + "concatenate", + "conj", + "conjugate", + "convolve", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", + "cross", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "einsum", + "einsum_path", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "finfo", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", + "fromiter", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "geomspace", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hstack", + "hypot", + "identity", + "iinfo", + "indices", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", + "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", + "isscalar", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "linspace", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logspace", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "memmap", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", + "ones", + "ones_like", + "outer", + "partition", + "permute_dims", + "pi", + "positive", + "pow", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", + "putmask", + "rad2deg", + "radians", + "ravel", + "recarray", + "reciprocal", + "record", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "sctypeDict", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", + "shares_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "stack", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "unstack", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "vstack", + "where", + "zeros", + "zeros_like", +] diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 9f452931fcf1..597d5c6deaf3 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -10,8 +10,7 @@ """ from numpy._core.function_base import add_newdoc -from numpy._core.overrides import get_array_function_like_doc - +from numpy._core.overrides import get_array_function_like_doc # noqa: F401 ############################################################################### # @@ -5945,7 +5944,7 @@ >>> import numpy as np >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> print(dt.fields) - {'name': (dtype('|S16'), 0), 'grades': (dtype(('float64',(2,))), 16)} + {'name': (dtype('>> # n is 7, k is 4, m is 3 The matmul function implements the semantics of the ``@`` operator - introduced in Python 3.5 following :pep:`465`. + defined in :pep:`465`. It uses an optimized BLAS library when possible (see `numpy.linalg`). @@ -2963,7 +2963,7 @@ def add_newdoc(place, name, doc): matrix-vector product is defined as: .. math:: - \\mathbf{A} \\cdot \\mathbf{b} = \\sum_{j=0}^{n-1} A_{ij} v_j + \\mathbf{A} \\cdot \\mathbf{v} = \\sum_{j=0}^{n-1} A_{ij} v_j where the sum is over the last dimensions in ``x1`` and ``x2`` (unless ``axes`` is specified). (For a matrix-vector product with the @@ -3030,7 +3030,7 @@ def add_newdoc(place, name, doc): vector-matrix product is defined as: .. math:: - \\mathbf{b} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} + \\mathbf{v} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} where the sum is over the last dimension of ``x1`` and the one-but-last dimensions in ``x2`` (unless `axes` is specified) and where @@ -3581,8 +3581,8 @@ def add_newdoc(place, name, doc): This should not be confused with: - * Python 3.7's `math.remainder` and C's ``remainder``, which - computes the IEEE remainder, which are the complement to + * Python's `math.remainder` and C's ``remainder``, which + compute the IEEE remainder, which are the complement to ``round(x1 / x2)``. * The MATLAB ``rem`` function and or the C ``%`` operator which is the complement to ``int(x1 / x2)``. diff --git a/numpy/_core/code_generators/verify_c_api_version.py b/numpy/_core/code_generators/verify_c_api_version.py index e1bcf4316f6d..955ec595327e 100644 --- a/numpy/_core/code_generators/verify_c_api_version.py +++ b/numpy/_core/code_generators/verify_c_api_version.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 +import argparse import os import sys -import argparse class MismatchCAPIError(ValueError): diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index d782e6131337..bde8921f5504 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -18,24 +18,37 @@ import functools import numpy as np -from numpy._utils import set_module -from .numerictypes import bytes_, str_, character -from .numeric import ndarray, array as narray, asarray as asnarray -from numpy._core.multiarray import compare_chararrays from numpy._core import overrides +from numpy._core.multiarray import compare_chararrays +from numpy._core.strings import ( + _join as join, +) +from numpy._core.strings import ( + _rsplit as rsplit, +) +from numpy._core.strings import ( + _split as split, +) +from numpy._core.strings import ( + _splitlines as splitlines, +) +from numpy._utils import set_module from numpy.strings import * from numpy.strings import ( multiply as strings_multiply, +) +from numpy.strings import ( partition as strings_partition, - rpartition as strings_rpartition, ) -from numpy._core.strings import ( - _split as split, - _rsplit as rsplit, - _splitlines as splitlines, - _join as join, +from numpy.strings import ( + rpartition as strings_rpartition, ) +from .numeric import array as narray +from .numeric import asarray as asnarray +from .numeric import ndarray +from .numerictypes import bytes_, character, str_ + __all__ = [ 'equal', 'not_equal', 'greater_equal', 'less_equal', 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize', diff --git a/numpy/_core/defchararray.pyi b/numpy/_core/defchararray.pyi index b34f13ef9641..26a5af432824 100644 --- a/numpy/_core/defchararray.pyi +++ b/numpy/_core/defchararray.pyi @@ -1,37 +1,26 @@ -from typing import ( - Literal as L, - overload, - TypeAlias, - TypeVar, - Any, - SupportsIndex, - SupportsInt, -) +from typing import Any, Self, SupportsIndex, SupportsInt, TypeAlias, overload +from typing import Literal as L +from typing_extensions import TypeVar import numpy as np from numpy import ( - ndarray, - dtype, - str_, + _OrderKACF, + _SupportsBuffer, bytes_, + dtype, int_, + ndarray, object_, - _OrderKACF, - _SupportsBuffer, - _SupportsArray -) -from numpy._typing import ( - NDArray, - _Shape, - _ShapeLike, - _ArrayLikeStr_co as U_co, - _ArrayLikeBytes_co as S_co, - _ArrayLikeString_co as T_co, - _ArrayLikeAnyString_co as UST_co, - _ArrayLikeInt_co as i_co, - _ArrayLikeBool_co as b_co, + str_, ) from numpy._core.multiarray import compare_chararrays +from numpy._typing import NDArray, _AnyShape, _Shape, _ShapeLike, _SupportsArray +from numpy._typing import _ArrayLikeAnyString_co as UST_co +from numpy._typing import _ArrayLikeBool_co as b_co +from numpy._typing import _ArrayLikeBytes_co as S_co +from numpy._typing import _ArrayLikeInt_co as i_co +from numpy._typing import _ArrayLikeStr_co as U_co +from numpy._typing import _ArrayLikeString_co as T_co __all__ = [ "equal", @@ -89,14 +78,15 @@ __all__ = [ "chararray", ] -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) _CharacterT = TypeVar("_CharacterT", bound=np.character) -_CharDTypeT_co = TypeVar("_CharDTypeT_co", bound=dtype[np.character], covariant=True) -_CharArray: TypeAlias = chararray[tuple[int, ...], dtype[_CharacterT]] +_CharDTypeT_co = TypeVar("_CharDTypeT_co", bound=dtype[np.character], default=dtype, covariant=True) -_StringDTypeArray: TypeAlias = np.ndarray[_Shape, np.dtypes.StringDType] +_CharArray: TypeAlias = chararray[_AnyShape, dtype[_CharacterT]] + +_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] +_StringDTypeOrUnicodeArray: TypeAlias = _StringDTypeArray | NDArray[np.str_] _StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_Shape, np.dtype[np.str_]] | np.ndarray[_Shape, np.dtypes.StringDType] class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): @overload @@ -109,7 +99,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): offset: SupportsIndex = ..., strides: _ShapeLike = ..., order: _OrderKACF = ..., - ) -> chararray[_Shape, dtype[bytes_]]: ... + ) -> _CharArray[bytes_]: ... @overload def __new__( subtype, @@ -120,12 +110,12 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): offset: SupportsIndex = ..., strides: _ShapeLike = ..., order: _OrderKACF = ..., - ) -> chararray[_Shape, dtype[str_]]: ... + ) -> _CharArray[str_]: ... def __array_finalize__(self, obj: object) -> None: ... - def __mul__(self, other: i_co) -> chararray[_Shape, _CharDTypeT_co]: ... - def __rmul__(self, other: i_co) -> chararray[_Shape, _CharDTypeT_co]: ... - def __mod__(self, i: Any) -> chararray[_Shape, _CharDTypeT_co]: ... + def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... + def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... + def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... @overload def __eq__( @@ -273,7 +263,7 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): def expandtabs( self, tabsize: i_co = ..., - ) -> chararray[_Shape, _CharDTypeT_co]: ... + ) -> Self: ... @overload def find( @@ -498,12 +488,12 @@ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]): deletechars: S_co | None = ..., ) -> _CharArray[bytes_]: ... - def zfill(self, width: i_co) -> chararray[_Shape, _CharDTypeT_co]: ... - def capitalize(self) -> chararray[_ShapeT_co, _CharDTypeT_co]: ... - def title(self) -> chararray[_ShapeT_co, _CharDTypeT_co]: ... - def swapcase(self) -> chararray[_ShapeT_co, _CharDTypeT_co]: ... - def lower(self) -> chararray[_ShapeT_co, _CharDTypeT_co]: ... - def upper(self) -> chararray[_ShapeT_co, _CharDTypeT_co]: ... + def zfill(self, width: i_co) -> Self: ... + def capitalize(self) -> Self: ... + def title(self) -> Self: ... + def swapcase(self) -> Self: ... + def lower(self) -> Self: ... + def upper(self) -> Self: ... def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... @@ -564,7 +554,7 @@ def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... @overload def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def add(x1: T_co, T_co) -> _StringDTypeOrUnicodeArray: ... +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... diff --git a/numpy/_core/einsumfunc.pyi b/numpy/_core/einsumfunc.pyi index a9c11b0b21a9..9653a26dcd78 100644 --- a/numpy/_core/einsumfunc.pyi +++ b/numpy/_core/einsumfunc.pyi @@ -1,23 +1,23 @@ from collections.abc import Sequence -from typing import TypeAlias, TypeVar, Any, overload, Literal +from typing import Any, Literal, TypeAlias, TypeVar, overload import numpy as np -from numpy import number, _OrderKACF +from numpy import _OrderKACF, number from numpy._typing import ( NDArray, _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, _ArrayLikeObject_co, + _ArrayLikeUInt_co, _DTypeLikeBool, - _DTypeLikeUInt, - _DTypeLikeInt, - _DTypeLikeFloat, _DTypeLikeComplex, _DTypeLikeComplex_co, + _DTypeLikeFloat, + _DTypeLikeInt, _DTypeLikeObject, + _DTypeLikeUInt, ) __all__ = ["einsum", "einsum_path"] diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index bc00877612d6..73dcd1ddc11d 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -7,13 +7,13 @@ import numpy as np from numpy._utils import set_module + +from . import _methods, overrides from . import multiarray as mu -from . import overrides -from . import umath as um from . import numerictypes as nt -from .multiarray import asarray, array, asanyarray, concatenate +from . import umath as um from ._multiarray_umath import _array_converter -from . import _methods +from .multiarray import asanyarray, asarray, concatenate _dt_ = nt.sctype2char diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index f974dc33a027..2aedc727e6dc 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,4 +1,5 @@ # ruff: noqa: ANN401 +from _typeshed import Incomplete from collections.abc import Sequence from typing import ( Any, @@ -11,52 +12,51 @@ from typing import ( overload, type_check_only, ) - -from _typeshed import Incomplete from typing_extensions import deprecated import numpy as np from numpy import ( - uint64, - int_, - int64, - intp, - float16, - floating, - complexfloating, - timedelta64, - object_, - generic, _AnyShapeT, - _OrderKACF, - _OrderACF, + _CastingKind, _ModeKind, + _OrderACF, + _OrderKACF, _PartitionKind, _SortKind, _SortSide, - _CastingKind, + complexfloating, + float16, + floating, + generic, + int64, + int_, + intp, + object_, + timedelta64, + uint64, ) from numpy._globals import _NoValueType from numpy._typing import ( - DTypeLike, - _DTypeLike, ArrayLike, - _ArrayLike, + DTypeLike, NDArray, - _NestedSequence, - _ShapeLike, + _AnyShape, + _ArrayLike, _ArrayLikeBool_co, - _ArrayLikeUInt_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, _ArrayLikeInt, _ArrayLikeInt_co, - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, _ArrayLikeObject_co, - _IntLike_co, + _ArrayLikeUInt_co, _BoolLike_co, _ComplexLike_co, + _DTypeLike, + _IntLike_co, + _NestedSequence, _NumberLike_co, _ScalarLike_co, + _ShapeLike, ) __all__ = [ @@ -111,6 +111,7 @@ _NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) _ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) _ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) @type_check_only class _SupportsShape(Protocol[_ShapeT_co]): @@ -418,18 +419,18 @@ def argmax( def argmax( a: ArrayLike, axis: SupportsIndex | None, - out: _ArrayT, + out: _BoolOrIntArrayT, *, keepdims: bool = ..., -) -> _ArrayT: ... +) -> _BoolOrIntArrayT: ... @overload def argmax( a: ArrayLike, axis: SupportsIndex | None = ..., *, - out: _ArrayT, + out: _BoolOrIntArrayT, keepdims: bool = ..., -) -> _ArrayT: ... +) -> _BoolOrIntArrayT: ... @overload def argmin( @@ -451,18 +452,18 @@ def argmin( def argmin( a: ArrayLike, axis: SupportsIndex | None, - out: _ArrayT, + out: _BoolOrIntArrayT, *, keepdims: bool = ..., -) -> _ArrayT: ... +) -> _BoolOrIntArrayT: ... @overload def argmin( a: ArrayLike, axis: SupportsIndex | None = ..., *, - out: _ArrayT, + out: _BoolOrIntArrayT, keepdims: bool = ..., -) -> _ArrayT: ... +) -> _BoolOrIntArrayT: ... @overload def searchsorted( @@ -579,7 +580,7 @@ def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ... # this prevents `Any` from being returned with Pyright @overload -def shape(a: _SupportsShape[Never]) -> tuple[int, ...]: ... +def shape(a: _SupportsShape[Never]) -> _AnyShape: ... @overload def shape(a: _SupportsShape[_ShapeT]) -> _ShapeT: ... @overload @@ -594,7 +595,7 @@ def shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... @overload def shape(a: memoryview | bytearray) -> tuple[int]: ... @overload -def shape(a: ArrayLike) -> tuple[int, ...]: ... +def shape(a: ArrayLike) -> _AnyShape: ... @overload def compress( @@ -817,9 +818,10 @@ def sum( where: _ArrayLikeBool_co = ..., ) -> _ArrayT: ... +# keep in sync with `any` @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: None = None, out: None = None, keepdims: Literal[False, 0] | _NoValueType = ..., @@ -828,7 +830,7 @@ def all( ) -> np.bool: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: _BoolLike_co | _NoValueType = ..., @@ -837,7 +839,7 @@ def all( ) -> Incomplete: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None, out: _ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., @@ -846,7 +848,7 @@ def all( ) -> _ArrayT: ... @overload def all( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, out: _ArrayT, @@ -854,9 +856,10 @@ def all( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# keep in sync with `all` @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: None = None, out: None = None, keepdims: Literal[False, 0] | _NoValueType = ..., @@ -865,7 +868,7 @@ def any( ) -> np.bool: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, out: None = None, keepdims: _BoolLike_co | _NoValueType = ..., @@ -874,7 +877,7 @@ def any( ) -> Incomplete: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None, out: _ArrayT, keepdims: _BoolLike_co | _NoValueType = ..., @@ -883,7 +886,7 @@ def any( ) -> _ArrayT: ... @overload def any( - a: ArrayLike, + a: ArrayLike | None, axis: int | tuple[int, ...] | None = None, *, out: _ArrayT, @@ -891,6 +894,7 @@ def any( where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _ArrayT: ... +# @overload def cumsum( a: _ArrayLike[_ScalarT], diff --git a/numpy/_core/function_base.py b/numpy/_core/function_base.py index 3a436a00a815..12ab2a7ef546 100644 --- a/numpy/_core/function_base.py +++ b/numpy/_core/function_base.py @@ -1,14 +1,15 @@ import functools -import warnings import operator import types +import warnings import numpy as np -from . import numeric as _nx -from .numeric import result_type, nan, asanyarray, ndim -from numpy._core.multiarray import add_docstring -from numpy._core._multiarray_umath import _array_converter from numpy._core import overrides +from numpy._core._multiarray_umath import _array_converter +from numpy._core.multiarray import add_docstring + +from . import numeric as _nx +from .numeric import asanyarray, nan, ndim, result_type __all__ = ['logspace', 'linspace', 'geomspace'] @@ -157,11 +158,10 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, y *= delta else: y = y * delta + elif _mult_inplace: + y *= step else: - if _mult_inplace: - y *= step - else: - y = y * step + y = y * step else: # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0) # have an undefined step diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 5348ebfb40c3..600265b1fd0a 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,10 +1,15 @@ +from _typeshed import Incomplete from typing import Literal as L from typing import SupportsIndex, TypeAlias, TypeVar, overload -from _typeshed import Incomplete - import numpy as np -from numpy._typing import DTypeLike, NDArray, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _DTypeLike +from numpy._typing import ( + DTypeLike, + NDArray, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _DTypeLike, +) from numpy._typing._array_like import _DualArrayLike __all__ = ["geomspace", "linspace", "logspace"] diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index 2dc6d1e7fad2..afa2ccebcfd2 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -7,11 +7,12 @@ import warnings from numpy._utils import set_module -from ._machar import MachAr + from . import numeric from . import numerictypes as ntypes +from ._machar import MachAr from .numeric import array, inf, nan -from .umath import log10, exp2, nextafter, isnan +from .umath import exp2, isnan, log10, nextafter def _fr0(a): diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index e2556a07a3ef..5eaa29035428 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -98,11 +98,23 @@ #endif #ifdef _MSC_VER - #define NPY_FINLINE static __forceinline + #ifdef __cplusplus + #define NPY_FINLINE __forceinline + #else + #define NPY_FINLINE static __forceinline + #endif #elif defined(__GNUC__) - #define NPY_FINLINE static inline __attribute__((always_inline)) + #ifdef __cplusplus + #define NPY_FINLINE inline __attribute__((always_inline)) + #else + #define NPY_FINLINE static inline __attribute__((always_inline)) + #endif #else - #define NPY_FINLINE static + #ifdef __cplusplus + #define NPY_FINLINE inline + #else + #define NPY_FINLINE static NPY_INLINE + #endif #endif #if defined(_MSC_VER) diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index 4fb3fb406869..52e9d5996bd1 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -111,24 +111,13 @@ #endif #elif defined(__loongarch_lp64) #define NPY_CPU_LOONGARCH64 -#elif defined(__EMSCRIPTEN__) +#elif defined(__EMSCRIPTEN__) || defined(__wasm__) /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ + /* __wasm__ is defined by clang when targeting wasm */ #define NPY_CPU_WASM #else #error Unknown CPU, please report this to numpy maintainers with \ information about your platform (OS, CPU and compiler) #endif -/* - * Except for the following architectures, memory access is limited to the natural - * alignment of data types otherwise it may lead to bus error or performance regression. - * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt. -*/ -#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__) - #define NPY_ALIGNMENT_REQUIRED 0 -#endif -#ifndef NPY_ALIGNMENT_REQUIRED - #define NPY_ALIGNMENT_REQUIRED 1 -#endif - #endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */ diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index 6a9d11b72808..52d7e2b5d7d7 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -123,8 +123,8 @@ /* user provided a target version, use it */ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION #else - /* Use the default (increase when dropping Python 3.10 support) */ - #define NPY_FEATURE_VERSION NPY_1_21_API_VERSION + /* Use the default (increase when dropping Python 3.11 support) */ + #define NPY_FEATURE_VERSION NPY_1_23_API_VERSION #endif /* Sanity check the (requested) feature version */ @@ -172,7 +172,7 @@ #define NPY_FEATURE_VERSION_STRING "2.0" #elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.1" -#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION +#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION /* also 2.4 */ #define NPY_FEATURE_VERSION_STRING "2.3" #else #error "Missing version string define for new NumPy version." diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index 561ac38a4d58..8cfa7f94a8da 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -1,8 +1,10 @@ -from contextlib import nullcontext import operator +from contextlib import nullcontext + import numpy as np from numpy._utils import set_module -from .numeric import uint8, ndarray, dtype + +from .numeric import dtype, ndarray, uint8 __all__ = ['memmap'] @@ -250,13 +252,13 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, size = bytes // _dbytes shape = (size,) else: - if type(shape) not in (tuple, list): + if not isinstance(shape, (tuple, list)): try: shape = [operator.index(shape)] except TypeError: pass shape = tuple(shape) - size = np.intp(1) # avoid default choice of np.int_, which might overflow + size = np.intp(1) # avoid overflows for k in shape: size *= k diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index b9207415ee08..18eb7d788495 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -128,6 +128,21 @@ if use_intel_sort and not fs.exists('src/npysort/x86-simd-sort/README.md') error('Missing the `x86-simd-sort` git submodule! Run `git submodule update --init` to fix this.') endif +# openMP related settings: +if get_option('disable-threading') and get_option('enable-openmp') + error('Build options `disable-threading` and `enable-openmp` are conflicting. Please set at most one to true.') +endif + +use_openmp = get_option('enable-openmp') and not get_option('disable-threading') + +# Setup openmp flags for x86-simd-sort: +omp = [] +omp_dep = [] +if use_intel_sort and use_openmp + omp = dependency('openmp', required : true) + omp_dep = declare_dependency(dependencies: omp, compile_args: ['-DXSS_USE_OPENMP']) +endif + if not fs.exists('src/common/pythoncapi-compat') error('Missing the `pythoncapi-compat` git submodule! ' + 'Run `git submodule update --init` to fix this.') @@ -867,12 +882,15 @@ foreach gen_mtargets : [ ] : [] ], ] + + + mtargets = mod_features.multi_targets( gen_mtargets[0], multiarray_gen_headers + gen_mtargets[1], dispatch: gen_mtargets[2], # baseline: CPU_BASELINE, it doesn't provide baseline fallback prefix: 'NPY_', - dependencies: [py_dep, np_core_dep], + dependencies: [py_dep, np_core_dep, omp_dep], c_args: c_args_common + max_opt, cpp_args: cpp_args_common + max_opt, include_directories: [ @@ -914,13 +932,14 @@ foreach gen_mtargets : [ ], [ 'loops_arithmetic.dispatch.h', - src_file.process('src/umath/loops_arithmetic.dispatch.c.src'), + src_file.process('src/umath/loops_arithmetic.dispatch.cpp.src'), [ AVX512_SKX, AVX512F, AVX2, SSE41, SSE2, NEON, VSX4, VSX2, VX, LSX, + RVV, ] ], [ @@ -1291,7 +1310,7 @@ py.extension_module('_multiarray_umath', 'src/umath', 'src/highway' ], - dependencies: [blas_dep], + dependencies: [blas_dep, omp], link_with: [ npymath_lib, unique_hash_so, diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 8348522e8420..5599494720b6 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -7,17 +7,25 @@ """ import functools -from . import overrides -from . import _multiarray_umath + +from . import _multiarray_umath, overrides from ._multiarray_umath import * # noqa: F403 + # These imports are needed for backward compatibility, # do not change them. issue gh-15518 # _get_ndarray_c_version is semi-public, on purpose not added to __all__ -from ._multiarray_umath import ( - _flagdict, from_dlpack, _place, _reconstruct, - _vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version, - _get_madvise_hugepage, _set_madvise_hugepage, - ) +from ._multiarray_umath import ( # noqa: F401 + _ARRAY_API, + _flagdict, + _get_madvise_hugepage, + _get_ndarray_c_version, + _monotonicity, + _place, + _reconstruct, + _set_madvise_hugepage, + _vec_string, + from_dlpack, +) __all__ = [ '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', @@ -1715,7 +1723,7 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): Examples -------- >>> import numpy as np - >>> import pytz + >>> from zoneinfo import ZoneInfo >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') >>> d array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', @@ -1728,9 +1736,9 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + >>> np.datetime_as_string(d, timezone=ZoneInfo('US/Eastern')) array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype=' NDArray[Any]: ... +# @overload -def unravel_index( # type: ignore[misc] - indices: _IntLike_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[intp, ...]: ... -@overload -def unravel_index( - indices: _ArrayLikeInt_co, - shape: _ShapeLike, - order: _OrderCF = ..., -) -> tuple[NDArray[intp], ...]: ... - -@overload -def ravel_multi_index( # type: ignore[misc] - multi_index: Sequence[_IntLike_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_IntLike_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> intp: ... @overload def ravel_multi_index( - multi_index: Sequence[_ArrayLikeInt_co], - dims: Sequence[SupportsIndex], - mode: _ModeKind | tuple[_ModeKind, ...] = ..., - order: _OrderCF = ..., + multi_index: SupportsLenAndGetItem[_ArrayLikeInt_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", ) -> NDArray[intp]: ... +# +@overload +def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[intp, ...]: ... +@overload +def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ... + # NOTE: Allow any sequence of array-like objects @overload def concatenate( # type: ignore[misc] diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 7adeaeddda54..964447fa0d8a 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -1,33 +1,67 @@ +import builtins import functools import itertools +import math +import numbers import operator import sys import warnings -import numbers -import builtins -import math import numpy as np -from . import multiarray +from numpy.exceptions import AxisError + +from . import multiarray, numerictypes, overrides, shape_base, umath from . import numerictypes as nt -from .multiarray import ( - ALLOW_THREADS, BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, - RAISE, WRAP, arange, array, asarray, asanyarray, ascontiguousarray, - asfortranarray, broadcast, can_cast, concatenate, copyto, dot, dtype, - empty, empty_like, flatiter, frombuffer, from_dlpack, fromfile, fromiter, - fromstring, inner, lexsort, matmul, may_share_memory, min_scalar_type, - ndarray, nditer, nested_iters, promote_types, putmask, result_type, - shares_memory, vdot, where, zeros, normalize_axis_index, vecdot +from ._ufunc_config import errstate +from .multiarray import ( # noqa: F401 + ALLOW_THREADS, + BUFSIZE, + CLIP, + MAXDIMS, + MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT, + RAISE, + WRAP, + arange, + array, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + broadcast, + can_cast, + concatenate, + copyto, + dot, + dtype, + empty, + empty_like, + flatiter, + from_dlpack, + frombuffer, + fromfile, + fromiter, + fromstring, + inner, + lexsort, + matmul, + may_share_memory, + min_scalar_type, + ndarray, + nditer, + nested_iters, + normalize_axis_index, + promote_types, + putmask, + result_type, + shares_memory, + vdot, + vecdot, + where, + zeros, ) - -from . import overrides -from . import umath -from . import shape_base from .overrides import finalize_array_function_like, set_module -from .umath import (multiply, invert, sin, PINF, NAN) -from . import numerictypes -from numpy.exceptions import AxisError -from ._ufunc_config import errstate +from .umath import NAN, PINF, invert, multiply, sin bitwise_not = invert ufunc = type(sin) @@ -1426,7 +1460,7 @@ def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): normalize_axis_index : normalizing a single scalar axis """ # Optimization to speed-up the most common cases. - if type(axis) not in (tuple, list): + if not isinstance(axis, (tuple, list)): try: axis = [operator.index(axis)] except TypeError: @@ -1920,8 +1954,11 @@ def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): _fromfunction_with_like = array_function_dispatch()(fromfunction) -def _frombuffer(buf, dtype, shape, order): - return frombuffer(buf, dtype=dtype).reshape(shape, order=order) +def _frombuffer(buf, dtype, shape, order, axis_order=None): + array = frombuffer(buf, dtype=dtype) + if order == 'K' and axis_order is not None: + return array.reshape(shape, order='C').transpose(axis_order) + return array.reshape(shape, order=order) @set_module('numpy') @@ -2098,25 +2135,24 @@ def err_if_insufficient(width, binwidth): err_if_insufficient(width, binwidth) return binary.zfill(outwidth) - else: - if width is None: - return f'-{-num:b}' + elif width is None: + return f'-{-num:b}' - else: - poswidth = len(f'{-num:b}') + else: + poswidth = len(f'{-num:b}') - # See gh-8679: remove extra digit - # for numbers at boundaries. - if 2**(poswidth - 1) == -num: - poswidth -= 1 + # See gh-8679: remove extra digit + # for numbers at boundaries. + if 2**(poswidth - 1) == -num: + poswidth -= 1 - twocomp = 2**(poswidth + 1) + num - binary = f'{twocomp:b}' - binwidth = len(binary) + twocomp = 2**(poswidth + 1) + num + binary = f'{twocomp:b}' + binwidth = len(binary) - outwidth = builtins.max(binwidth, width) - err_if_insufficient(width, binwidth) - return '1' * (outwidth - binwidth) + binary + outwidth = builtins.max(binwidth, width) + err_if_insufficient(width, binwidth) + return '1' * (outwidth - binwidth) + binary @set_module('numpy') @@ -2443,7 +2479,20 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): elif isinstance(y, int): y = float(y) + # atol and rtol can be arrays + if not (np.all(np.isfinite(atol)) and np.all(np.isfinite(rtol))): + err_s = np.geterr()["invalid"] + err_msg = f"One of rtol or atol is not valid, atol: {atol}, rtol: {rtol}" + + if err_s == "warn": + warnings.warn(err_msg, RuntimeWarning, stacklevel=2) + elif err_s == "raise": + raise FloatingPointError(err_msg) + elif err_s == "print": + print(err_msg) + with errstate(invalid='ignore'): + result = (less_equal(abs(x - y), atol + rtol * abs(y)) & isfinite(y) | (x == y)) @@ -2695,16 +2744,14 @@ def extend_all(module): __all__.append(a) -from .umath import * -from .numerictypes import * -from . import fromnumeric -from .fromnumeric import * -from . import arrayprint -from .arrayprint import * -from . import _asarray +from . import _asarray, _ufunc_config, arrayprint, fromnumeric from ._asarray import * -from . import _ufunc_config from ._ufunc_config import * +from .arrayprint import * +from .fromnumeric import * +from .numerictypes import * +from .umath import * + extend_all(fromnumeric) extend_all(umath) extend_all(numerictypes) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 416c4eec8785..b54fa856b007 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1,271 +1,734 @@ +from builtins import bool as py_bool from collections.abc import Callable, Sequence -from typing import Any, Final, Never, NoReturn, SupportsAbs, SupportsIndex, TypeAlias, TypeGuard, TypeVar, Unpack, overload +from typing import ( + Any, + Final, + Never, + NoReturn, + SupportsAbs, + SupportsIndex, + TypeAlias, + TypeGuard, + TypeVar, + Unpack, + overload, +) from typing import Literal as L import numpy as np from numpy import ( - # re-exports - bitwise_not, False_, True_, - broadcast, - dtype, - flatiter, - from_dlpack, + _OrderCF, + _OrderKACF, + bitwise_not, inf, little_endian, - matmul, - vecdot, nan, - ndarray, - nditer, newaxis, ufunc, +) +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _DTypeLike, + _NestedSequence, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, +) +from numpy.lib._array_utils_impl import normalize_axis_tuple as normalize_axis_tuple - # other - generic, - unsignedinteger, - signedinteger, - floating, - complexfloating, - int_, - intp, - float64, - timedelta64, - object_, - _AnyShapeT, - _OrderKACF, - _OrderCF, +from ._asarray import require +from ._ufunc_config import ( + errstate, + getbufsize, + geterr, + geterrcall, + setbufsize, + seterr, + seterrcall, +) +from .arrayprint import ( + array2string, + array_repr, + array_str, + format_float_positional, + format_float_scientific, + get_printoptions, + printoptions, + set_printoptions, ) from .fromnumeric import ( - all as all, - any as any, - argpartition as argpartition, - matrix_transpose as matrix_transpose, - mean as mean, + all, + amax, + amin, + any, + argmax, + argmin, + argpartition, + argsort, + around, + choose, + clip, + compress, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + diagonal, + matrix_transpose, + max, + mean, + min, + ndim, + nonzero, + partition, + prod, + ptp, + put, + ravel, + repeat, + reshape, + resize, + round, + searchsorted, + shape, + size, + sort, + squeeze, + std, + sum, + swapaxes, + take, + trace, + transpose, + var, ) +from .multiarray import ALLOW_THREADS as ALLOW_THREADS +from .multiarray import BUFSIZE as BUFSIZE +from .multiarray import CLIP as CLIP +from .multiarray import MAXDIMS as MAXDIMS +from .multiarray import MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS +from .multiarray import MAY_SHARE_EXACT as MAY_SHARE_EXACT +from .multiarray import RAISE as RAISE +from .multiarray import WRAP as WRAP from .multiarray import ( - # re-exports + _Array, + _ConstructorEmpty, + _KwargsEmpty, arange, array, - asarray, asanyarray, + asarray, ascontiguousarray, asfortranarray, + broadcast, can_cast, concatenate, copyto, dot, + dtype, empty, empty_like, + flatiter, + from_dlpack, frombuffer, fromfile, fromiter, fromstring, inner, lexsort, + matmul, may_share_memory, min_scalar_type, + ndarray, + nditer, nested_iters, - putmask, promote_types, + putmask, result_type, shares_memory, vdot, where, zeros, - - # other - _Array, - _ConstructorEmpty, - _KwargsEmpty, ) - -from numpy._typing import ( - ArrayLike, - NDArray, - DTypeLike, - _SupportsDType, - _ShapeLike, - _DTypeLike, - _ArrayLike, - _SupportsArrayFunc, - _ScalarLike_co, - _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, - _ArrayLikeTD64_co, - _ArrayLikeObject_co, - _NestedSequence, +from .multiarray import normalize_axis_index as normalize_axis_index +from .numerictypes import ( + ScalarType, + bool, + bool_, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + datetime_as_string, + datetime_data, + double, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + is_busday, + isdtype, + issubdtype, + long, + longdouble, + longlong, + number, + object_, + short, + signedinteger, + single, + str_, + timedelta64, + typecodes, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from .umath import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecdot, + vecmat, ) __all__ = [ - "newaxis", - "ndarray", - "flatiter", - "nditer", - "nested_iters", - "ufunc", + "False_", + "ScalarType", + "True_", + "absolute", + "add", + "all", + "allclose", + "amax", + "amin", + "any", "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argpartition", + "argsort", + "argwhere", + "around", "array", - "asarray", + "array2string", + "array_equal", + "array_equiv", + "array_repr", + "array_str", "asanyarray", + "asarray", "ascontiguousarray", "asfortranarray", - "zeros", - "count_nonzero", - "empty", - "broadcast", - "dtype", - "fromstring", - "fromfile", - "frombuffer", - "from_dlpack", - "where", - "argwhere", - "copyto", - "concatenate", - "lexsort", "astype", + "base_repr", + "binary_repr", + "bitwise_and", + "bitwise_count", + "bitwise_not", + "bitwise_or", + "bitwise_xor", + "bool", + "bool_", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "byte", + "bytes_", "can_cast", - "promote_types", - "min_scalar_type", - "result_type", - "isfortran", - "empty_like", - "zeros_like", - "ones_like", - "correlate", + "cbrt", + "cdouble", + "ceil", + "character", + "choose", + "clip", + "clongdouble", + "complex64", + "complex128", + "complex192", + "complex256", + "complexfloating", + "compress", + "concatenate", + "conj", + "conjugate", "convolve", - "inner", - "dot", - "outer", - "vdot", - "roll", - "rollaxis", - "moveaxis", + "copysign", + "copyto", + "correlate", + "cos", + "cosh", + "count_nonzero", "cross", - "tensordot", - "little_endian", + "csingle", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "datetime64", + "datetime_as_string", + "datetime_data", + "deg2rad", + "degrees", + "diagonal", + "divide", + "divmod", + "dot", + "double", + "dtype", + "e", + "empty", + "empty_like", + "equal", + "errstate", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "flatiter", + "flatnonzero", + "flexible", + "float16", + "float32", + "float64", + "float96", + "float128", + "float_power", + "floating", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "format_float_positional", + "format_float_scientific", + "frexp", + "from_dlpack", + "frombuffer", + "fromfile", + "fromfunction", "fromiter", - "array_equal", - "array_equiv", + "frompyfunc", + "fromstring", + "full", + "full_like", + "gcd", + "generic", + "get_printoptions", + "getbufsize", + "geterr", + "geterrcall", + "greater", + "greater_equal", + "half", + "heaviside", + "hypot", + "identity", "indices", - "fromfunction", + "inexact", + "inf", + "inner", + "int8", + "int16", + "int32", + "int64", + "int_", + "intc", + "integer", + "intp", + "invert", + "is_busday", "isclose", + "isdtype", + "isfinite", + "isfortran", + "isinf", + "isnan", + "isnat", "isscalar", - "binary_repr", - "base_repr", + "issubdtype", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "lexsort", + "little_endian", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "long", + "longdouble", + "longlong", + "matmul", + "matrix_transpose", + "matvec", + "max", + "maximum", + "may_share_memory", + "mean", + "min", + "min_scalar_type", + "minimum", + "mod", + "modf", + "moveaxis", + "multiply", + "nan", + "ndarray", + "ndim", + "nditer", + "negative", + "nested_iters", + "newaxis", + "nextafter", + "nonzero", + "not_equal", + "number", + "object_", "ones", - "identity", - "allclose", + "ones_like", + "outer", + "partition", + "pi", + "positive", + "power", + "printoptions", + "prod", + "promote_types", + "ptp", + "put", "putmask", - "flatnonzero", - "inf", - "nan", - "False_", - "True_", - "bitwise_not", - "full", - "full_like", - "matmul", - "vecdot", + "rad2deg", + "radians", + "ravel", + "reciprocal", + "remainder", + "repeat", + "require", + "reshape", + "resize", + "result_type", + "right_shift", + "rint", + "roll", + "rollaxis", + "round", + "searchsorted", + "set_printoptions", + "setbufsize", + "seterr", + "seterrcall", + "shape", "shares_memory", - "may_share_memory", + "short", + "sign", + "signbit", + "signedinteger", + "sin", + "single", + "sinh", + "size", + "sort", + "spacing", + "sqrt", + "square", + "squeeze", + "std", + "str_", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "tensordot", + "timedelta64", + "trace", + "transpose", + "true_divide", + "trunc", + "typecodes", + "ubyte", + "ufunc", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintc", + "uintp", + "ulong", + "ulonglong", + "unsignedinteger", + "ushort", + "var", + "vdot", + "vecdot", + "vecmat", + "void", + "where", + "zeros", + "zeros_like", ] _T = TypeVar("_T") _ScalarT = TypeVar("_ScalarT", bound=generic) -_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_DTypeT = TypeVar("_DTypeT", bound=dtype) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_AnyShapeT = TypeVar( + "_AnyShapeT", + tuple[()], + tuple[int], + tuple[int, int], + tuple[int, int, int], + tuple[int, int, int, int], + tuple[int, ...], +) _CorrelateMode: TypeAlias = L["valid", "same", "full"] +# keep in sync with `ones_like` @overload def zeros_like( a: _ArrayT, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def zeros_like( a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def zeros_like( - a: Any, + a: object, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def zeros_like( - a: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... ones: Final[_ConstructorEmpty] +# keep in sync with `zeros_like` @overload def ones_like( a: _ArrayT, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def ones_like( a: _ArrayLike[_ScalarT], - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def ones_like( - a: Any, + a: object, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def ones_like( - a: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... # TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview @@ -372,46 +835,46 @@ def full( @overload def full_like( a: _ArrayT, - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: L[True] = ..., - shape: None = ..., + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: L[True] = True, + shape: None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> _ArrayT: ... @overload def full_like( a: _ArrayLike[_ScalarT], - fill_value: Any, - dtype: None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + fill_value: object, + dtype: None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def full_like( - a: Any, - fill_value: Any, + a: object, + fill_value: object, dtype: _DTypeLike[_ScalarT], - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def full_like( - a: Any, - fill_value: Any, - dtype: DTypeLike | None = ..., - order: _OrderKACF = ..., - subok: bool = ..., - shape: _ShapeLike | None = ..., + a: object, + fill_value: object, + dtype: DTypeLike | None = None, + order: _OrderKACF = "K", + subok: py_bool = True, + shape: _ShapeLike | None = None, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... # @@ -424,10 +887,10 @@ def count_nonzero( a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True] ) -> NDArray[np.intp]: ... @overload -def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: bool = False) -> Any: ... +def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: py_bool = False) -> Any: ... # -def isfortran(a: NDArray[Any] | generic) -> bool: ... +def isfortran(a: NDArray[Any] | generic) -> py_bool: ... def argwhere(a: ArrayLike) -> NDArray[intp]: ... @@ -819,31 +1282,31 @@ def identity( def allclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., -) -> bool: ... + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, +) -> py_bool: ... @overload def isclose( a: _ScalarLike_co, b: _ScalarLike_co, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, ) -> np.bool: ... @overload def isclose( a: ArrayLike, b: ArrayLike, - rtol: ArrayLike = ..., - atol: ArrayLike = ..., - equal_nan: bool = ..., + rtol: ArrayLike = 1e-5, + atol: ArrayLike = 1e-8, + equal_nan: py_bool = False, ) -> NDArray[np.bool]: ... -def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: py_bool = False) -> py_bool: ... -def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... +def array_equiv(a1: ArrayLike, a2: ArrayLike) -> py_bool: ... @overload def astype( @@ -851,8 +1314,8 @@ def astype( dtype: _DTypeLike[_ScalarT], /, *, - copy: bool = ..., - device: L["cpu"] | None = ..., + copy: py_bool = True, + device: L["cpu"] | None = None, ) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... @overload def astype( @@ -860,6 +1323,6 @@ def astype( dtype: DTypeLike, /, *, - copy: bool = ..., - device: L["cpu"] | None = ..., + copy: py_bool = True, + device: L["cpu"] | None = None, ) -> ndarray[_ShapeT, dtype]: ... diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index ef95137a628d..135dc1b51d97 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -79,12 +79,19 @@ import numbers import warnings +from numpy._utils import set_module + from . import multiarray as ma from .multiarray import ( - ndarray, dtype, datetime_data, datetime_as_string, - busday_offset, busday_count, is_busday, busdaycalendar - ) -from numpy._utils import set_module + busday_count, + busday_offset, + busdaycalendar, + datetime_as_string, + datetime_data, + dtype, + is_busday, + ndarray, +) # we add more at the bottom __all__ = [ @@ -95,19 +102,19 @@ # we don't need all these imports, but we need to keep them for compatibility # for users using np._core.numerictypes.UPPER_TABLE -from ._string_helpers import ( # noqa: F401 - english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE -) - -from ._type_aliases import ( - sctypeDict, allTypes, sctypes -) -from ._dtype import _kind_name - # we don't export these for import *, but we do want them accessible # as numerictypes.bool, etc. -from builtins import bool, int, float, complex, object, str, bytes # noqa: F401, UP029 +from builtins import bool, bytes, complex, float, int, object, str # noqa: F401, UP029 +from ._dtype import _kind_name +from ._string_helpers import ( # noqa: F401 + LOWER_TABLE, + UPPER_TABLE, + english_capitalize, + english_lower, + english_upper, +) +from ._type_aliases import allTypes, sctypeDict, sctypes # We use this later generic = allTypes['generic'] diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 3b6b0c63713a..b649b8f91cd1 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -1,63 +1,70 @@ -import builtins -from typing import Any, Literal as L, TypedDict, type_check_only +from builtins import bool as py_bool +from typing import Final, TypedDict, type_check_only +from typing import Literal as L import numpy as np from numpy import ( - dtype, - generic, bool, bool_, - uint8, - uint16, - uint32, - uint64, - ubyte, - ushort, - uintc, - ulong, - ulonglong, - uintp, - uint, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + double, + dtype, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, int8, int16, int32, int64, - byte, - short, + int_, intc, - long, - longlong, + integer, intp, - int_, - float16, - float32, - float64, - half, - single, - double, + long, longdouble, - complex64, - complex128, - csingle, - cdouble, - clongdouble, - datetime64, - timedelta64, + longlong, + number, object_, + short, + signedinteger, + single, str_, - bytes_, - void, + timedelta64, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, unsignedinteger, - character, - inexact, - number, - integer, - flexible, - complexfloating, - signedinteger, - floating, + ushort, + void, ) -from ._type_aliases import sctypeDict # noqa: F401 +from numpy._typing import DTypeLike + +from ._type_aliases import sctypeDict as sctypeDict from .multiarray import ( busday_count, busday_offset, @@ -67,9 +74,6 @@ from .multiarray import ( is_busday, ) -from numpy._typing import DTypeLike -from numpy._typing._extended_precision import float96, float128, complex192, complex256 - __all__ = [ "ScalarType", "typecodes", @@ -151,41 +155,43 @@ class _TypeCodes(TypedDict): Datetime: L['Mm'] All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] -def isdtype(dtype: dtype | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ... +def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... +def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> py_bool: ... -def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ... - -typecodes: _TypeCodes -ScalarType: tuple[ - type[int], - type[float], - type[complex], - type[builtins.bool], - type[bytes], - type[str], - type[memoryview], - type[np.bool], - type[csingle], - type[cdouble], - type[clongdouble], - type[half], - type[single], - type[double], - type[longdouble], - type[byte], - type[short], - type[intc], - type[long], - type[longlong], - type[timedelta64], - type[datetime64], - type[object_], - type[bytes_], - type[str_], - type[ubyte], - type[ushort], - type[uintc], - type[ulong], - type[ulonglong], - type[void], -] +typecodes: Final[_TypeCodes] = ... +ScalarType: Final[ + tuple[ + type[int], + type[float], + type[complex], + type[py_bool], + type[bytes], + type[str], + type[memoryview], + type[np.bool], + type[csingle], + type[cdouble], + type[clongdouble], + type[half], + type[single], + type[double], + type[longdouble], + type[byte], + type[short], + type[intc], + type[long], + type[longlong], + type[timedelta64], + type[datetime64], + type[object_], + type[bytes_], + type[str_], + type[ubyte], + type[ushort], + type[uintc], + type[ulong], + type[ulonglong], + type[void], + ] +] = ... +typeDict: Final = sctypeDict diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index aed83d17b836..6414710ae900 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -2,11 +2,13 @@ import collections import functools -from numpy._utils import set_module -from numpy._utils._inspect import getargspec from numpy._core._multiarray_umath import ( - add_docstring, _get_implementing_args, _ArrayFunctionDispatcher) - + _ArrayFunctionDispatcher, + _get_implementing_args, + add_docstring, +) +from numpy._utils import set_module # noqa: F401 +from numpy._utils._inspect import getargspec ARRAY_FUNCTIONS = set() diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi index 05453190efd4..91d624203e81 100644 --- a/numpy/_core/overrides.pyi +++ b/numpy/_core/overrides.pyi @@ -1,11 +1,11 @@ from collections.abc import Callable, Iterable -from typing import Any, Final, NamedTuple, ParamSpec, TypeVar - -from numpy._typing import _SupportsArrayFunc +from typing import Any, Final, NamedTuple, ParamSpec, TypeAlias, TypeVar _T = TypeVar("_T") _Tss = ParamSpec("_Tss") -_FuncT = TypeVar("_FuncT", bound=Callable[..., object]) +_FuncLikeT = TypeVar("_FuncLikeT", bound=type | Callable[..., object]) + +_Dispatcher: TypeAlias = Callable[_Tss, Iterable[object]] ### @@ -18,14 +18,11 @@ class ArgSpec(NamedTuple): keywords: str | None defaults: tuple[Any, ...] -def get_array_function_like_doc(public_api: Callable[..., Any], docstring_template: str = "") -> str: ... -def finalize_array_function_like(public_api: _FuncT) -> _FuncT: ... +def get_array_function_like_doc(public_api: Callable[..., object], docstring_template: str = "") -> str: ... +def finalize_array_function_like(public_api: _FuncLikeT) -> _FuncLikeT: ... # -def verify_matching_signatures( - implementation: Callable[_Tss, object], - dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]], -) -> None: ... +def verify_matching_signatures(implementation: Callable[_Tss, object], dispatcher: _Dispatcher[_Tss]) -> None: ... # NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with # the original wrapped callable stored in the `._implementation` attribute. It checks @@ -33,11 +30,11 @@ def verify_matching_signatures( # specifies. Since the dispatcher only returns an iterable of passed array-like args, # this overridable behaviour is impossible to annotate. def array_function_dispatch( - dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]] | None = None, + dispatcher: _Dispatcher[_Tss] | None = None, module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = False, -) -> Callable[[_FuncT], _FuncT]: ... +) -> Callable[[_FuncLikeT], _FuncLikeT]: ... # def array_function_from_dispatcher( @@ -45,4 +42,4 @@ def array_function_from_dispatcher( module: str | None = None, verify: bool = True, docs_from_dispatcher: bool = True, -) -> Callable[[Callable[_Tss, Iterable[_SupportsArrayFunc]]], Callable[_Tss, _T]]: ... +) -> Callable[[_Dispatcher[_Tss]], Callable[_Tss, _T]]: ... diff --git a/numpy/_core/printoptions.py b/numpy/_core/printoptions.py index 7ac93c2290e0..5d6f9635cd3c 100644 --- a/numpy/_core/printoptions.py +++ b/numpy/_core/printoptions.py @@ -29,4 +29,4 @@ } format_options = ContextVar( - "format_options", default=default_format_options_dict.copy()) + "format_options", default=default_format_options_dict) diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 6d0331984bc7..39bcf4ba6294 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -7,6 +7,7 @@ from contextlib import nullcontext from numpy._utils import set_module + from . import numeric as sb from . import numerictypes as nt from .arrayprint import _get_legacy_print_mode @@ -243,11 +244,10 @@ def __setattr__(self, attr, val): res = fielddict.get(attr, None) if res: return self.setfield(val, *res[:2]) + elif getattr(self, attr, None): + return nt.void.__setattr__(self, attr, val) else: - if getattr(self, attr, None): - return nt.void.__setattr__(self, attr, val) - else: - raise AttributeError(f"'record' object has no attribute '{attr}'") + raise AttributeError(f"'record' object has no attribute '{attr}'") def __getitem__(self, indx): obj = nt.void.__getitem__(self, indx) diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index b1725bcdf237..ead165918478 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,13 +1,31 @@ # ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false -from collections.abc import Iterable, Sequence -from typing import Any, ClassVar, Literal, Protocol, SupportsIndex, TypeAlias, TypeVar, overload, type_check_only - from _typeshed import StrOrBytesPath +from collections.abc import Iterable, Sequence +from typing import ( + Any, + ClassVar, + Literal, + Protocol, + SupportsIndex, + TypeAlias, + overload, + type_check_only, +) +from typing_extensions import TypeVar import numpy as np from numpy import _ByteOrder, _OrderKACF, _SupportsBuffer -from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLikeVoid_co, _NestedSequence, _ShapeLike +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLikeVoid_co, + _NestedSequence, + _Shape, + _ShapeLike, +) __all__ = [ "array", @@ -23,10 +41,10 @@ __all__ = [ _T = TypeVar("_T") _ScalarT = TypeVar("_ScalarT", bound=np.generic) -_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, covariant=True) -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) -_RecArray: TypeAlias = recarray[Any, np.dtype[_ScalarT]] +_RecArray: TypeAlias = recarray[_AnyShape, np.dtype[_ScalarT]] @type_check_only class _SupportsReadInto(Protocol): diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index 62334695eb7b..c2a0f0dae789 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -5,10 +5,10 @@ import itertools import operator +from . import fromnumeric as _from_nx from . import numeric as _nx from . import overrides from .multiarray import array, asanyarray, normalize_axis_index -from . import fromnumeric as _from_nx array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -587,7 +587,7 @@ def _block_check_depths_match(arrays, parent_index=[]): the choice of algorithm used using benchmarking wisdom. """ - if type(arrays) is tuple: + if isinstance(arrays, tuple): # not strictly necessary, but saves us from: # - more than one way to do things - no point treating tuples like # lists @@ -598,7 +598,7 @@ def _block_check_depths_match(arrays, parent_index=[]): 'Only lists can be used to arrange blocks, and np.block does ' 'not allow implicit conversion from tuple to ndarray.' ) - elif type(arrays) is list and len(arrays) > 0: + elif isinstance(arrays, list) and len(arrays) > 0: idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) for i, arr in enumerate(arrays)) @@ -618,7 +618,7 @@ def _block_check_depths_match(arrays, parent_index=[]): first_index = index return first_index, max_arr_ndim, final_size - elif type(arrays) is list and len(arrays) == 0: + elif isinstance(arrays, list) and len(arrays) == 0: # We've 'bottomed out' on an empty list return parent_index + [None], 0, 0 else: @@ -770,7 +770,7 @@ def _block_dispatcher(arrays): # Use type(...) is list to match the behavior of np.block(), which special # cases list specifically rather than allowing for generic iterables or # tuple. Also, we know that list.__array_function__ will never exist. - if type(arrays) is list: + if isinstance(arrays, list): for subarrays in arrays: yield from _block_dispatcher(subarrays) else: diff --git a/numpy/_core/src/common/half.hpp b/numpy/_core/src/common/half.hpp index 484750ad84cd..14dabbe79d7f 100644 --- a/numpy/_core/src/common/half.hpp +++ b/numpy/_core/src/common/half.hpp @@ -9,8 +9,6 @@ // TODO(@seiko2plus): // - covers half-precision operations that being supported by numpy/halffloat.h // - add support for arithmetic operations -// - enables __fp16 causes massive FP exceptions on aarch64, -// needs a deep investigation namespace np { @@ -19,42 +17,19 @@ namespace np { /// Provides a type that implements 16-bit floating point (half-precision). /// This type is ensured to be 16-bit size. -#if 1 // ndef __ARM_FP16_FORMAT_IEEE class Half final { public: - /// Whether `Half` has a full native HW support. - static constexpr bool kNative = false; - /// Whether `Half` has a native HW support for single/double conversion. - template - static constexpr bool kNativeConversion = ( - ( - std::is_same_v && - #if defined(NPY_HAVE_FP16) || defined(NPY_HAVE_VSX3) - true - #else - false - #endif - ) || ( - std::is_same_v && - #if defined(NPY_HAVE_AVX512FP16) || (defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE)) - true - #else - false - #endif - ) - ); - /// Default constructor. initialize nothing. Half() = default; /// Construct from float /// If there are no hardware optimization available, rounding will always /// be set to ties to even. - explicit Half(float f) + NPY_FINLINE explicit Half(float f) { #if defined(NPY_HAVE_FP16) __m128 mf = _mm_load_ss(&f); - bits_ = static_cast(_mm_cvtsi128_si32(_mm_cvtps_ph(mf, _MM_FROUND_TO_NEAREST_INT))); + bits_ = _mm_extract_epi16(_mm_cvtps_ph(mf, _MM_FROUND_TO_NEAREST_INT), 0); #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX_ASM) __vector float vf32 = vec_splats(f); __vector unsigned short vf16; @@ -64,6 +39,9 @@ class Half final { #else bits_ = vec_extract(vf16, 0); #endif + #elif defined(__ARM_FP16_FORMAT_IEEE) + __fp16 f16 = __fp16(f); + bits_ = BitCast(f16); #else bits_ = half_private::FromFloatBits(BitCast(f)); #endif @@ -72,20 +50,23 @@ class Half final { /// Construct from double. /// If there are no hardware optimization available, rounding will always /// be set to ties to even. - explicit Half(double f) + NPY_FINLINE explicit Half(double f) { #if defined(NPY_HAVE_AVX512FP16) __m128d md = _mm_load_sd(&f); - bits_ = static_cast(_mm_cvtsi128_si32(_mm_castph_si128(_mm_cvtpd_ph(md)))); + bits_ = _mm_extract_epi16(_mm_castph_si128(_mm_cvtpd_ph(md)), 0); #elif defined(NPY_HAVE_VSX3) && defined(NPY_HAVE_VSX3_HALF_DOUBLE) __asm__ __volatile__ ("xscvdphp %x0,%x1" : "=wa" (bits_) : "wa" (f)); + #elif defined(__ARM_FP16_FORMAT_IEEE) + __fp16 f16 = __fp16(f); + bits_ = BitCast(f16); #else bits_ = half_private::FromDoubleBits(BitCast(f)); #endif } /// Cast to float - explicit operator float() const + NPY_FINLINE explicit operator float() const { #if defined(NPY_HAVE_FP16) float ret; @@ -99,13 +80,15 @@ class Half final { : "=wa"(vf32) : "wa"(vec_splats(bits_))); return vec_extract(vf32, 0); + #elif defined(__ARM_FP16_FORMAT_IEEE) + return float(BitCast<__fp16>(bits_)); #else return BitCast(half_private::ToFloatBits(bits_)); #endif } /// Cast to double - explicit operator double() const + NPY_FINLINE explicit operator double() const { #if defined(NPY_HAVE_AVX512FP16) double ret; @@ -117,6 +100,8 @@ class Half final { : "=wa"(f64) : "wa"(bits_)); return f64; + #elif defined(__ARM_FP16_FORMAT_IEEE) + return double(BitCast<__fp16>(bits_)); #else return BitCast(half_private::ToDoubleBits(bits_)); #endif @@ -223,40 +208,6 @@ class Half final { private: uint16_t bits_; }; -#else // __ARM_FP16_FORMAT_IEEE -class Half final { - public: - static constexpr bool kNative = true; - template - static constexpr bool kNativeConversion = ( - std::is_same_v || std::is_same_v - ); - Half() = default; - constexpr Half(__fp16 h) : half_(h) - {} - constexpr operator __fp16() const - { return half_; } - static Half FromBits(uint16_t bits) - { - Half h; - h.half_ = BitCast<__fp16>(bits); - return h; - } - uint16_t Bits() const - { return BitCast(half_); } - constexpr bool Less(Half r) const - { return half_ < r.half_; } - constexpr bool LessEqual(Half r) const - { return half_ <= r.half_; } - constexpr bool Equal(Half r) const - { return half_ == r.half_; } - constexpr bool IsNaN() const - { return half_ != half_; } - - private: - __fp16 half_; -}; -#endif // __ARM_FP16_FORMAT_IEEE /// @} cpp_core_types diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 8810182812e5..f15f636cdb1e 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -246,7 +246,7 @@ npy__cpu_validate_baseline(void) static int npy__cpu_check_env(int disable, const char *env) { - static const char *names[] = { + static const char *const names[] = { "enable", "disable", "NPY_ENABLE_CPU_FEATURES", "NPY_DISABLE_CPU_FEATURES", "During parsing environment variable: 'NPY_ENABLE_CPU_FEATURES':\n", @@ -277,7 +277,7 @@ npy__cpu_check_env(int disable, const char *env) { char *notsupp_cur = ¬supp[0]; //comma and space including (htab, vtab, CR, LF, FF) - const char *delim = ", \t\v\r\n\f"; + const char delim[] = ", \t\v\r\n\f"; char *feature = strtok(features, delim); while (feature) { if (npy__cpu_baseline_fid(feature) > 0){ diff --git a/numpy/_core/src/common/simd/README.md b/numpy/_core/src/common/simd/README.md index 9a68d1aa1bfc..a13a0f75b6fc 100644 --- a/numpy/_core/src/common/simd/README.md +++ b/numpy/_core/src/common/simd/README.md @@ -45,19 +45,19 @@ StoreU(v128, data); #include "simd/simd.hpp" // Check if SIMD is enabled -#if NPY_SIMDX +#if NPY_HWY // SIMD code #else // Scalar fallback code #endif // Check for float64 support -#if NPY_SIMDX_F64 +#if NPY_HWY_F64 // Use float64 SIMD operations #endif // Check for FMA support -#if NPY_SIMDX_FMA +#if NPY_HWY_FMA // Use FMA operations #endif ``` @@ -70,9 +70,9 @@ The wrapper provides type constraints to help with SFINAE (Substitution Failure ```cpp // Base template - always defined, even when SIMD is not enabled (for SFINAE) template - constexpr bool kSupportLane = NPY_SIMDX != 0; + constexpr bool kSupportLane = NPY_HWY != 0; template <> - constexpr bool kSupportLane = NPY_SIMDX_F64 != 0; + constexpr bool kSupportLane = NPY_HWY_F64 != 0; ``` - `kMaxLanes`: Maximum number of lanes supported by the SIMD extension for the specified lane type. @@ -165,6 +165,7 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d - When `NPY_DISABLE_OPTIMIZATION` is defined, SIMD operations are disabled - SIMD is enabled only when the Highway target is not scalar (`HWY_TARGET != HWY_SCALAR`) + and not EMU128 (`HWY_TARGET != HWY_EMU128`) ## Design Notes @@ -172,18 +173,20 @@ The SIMD wrapper automatically disables SIMD operations when optimizations are d - NumPy already provides kernels for scalar operations - Compilers can better optimize standard library implementations - Not all Highway intrinsics are fully supported in scalar mode + - For strict IEEE 754 floating-point compliance requirements, direct scalar + implementations offer more predictable behavior than EMU128 2. **Legacy Universal Intrinsics** - The older universal intrinsics C interface (in `simd.h` and accessible via `NPY_SIMD` macros) is deprecated - - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_SIMDX` macros) + - All new SIMD code should use this Highway-based wrapper (accessible via `NPY_HWY` macros) - The legacy code is maintained for compatibility but will eventually be removed 3. **Feature Detection Constants vs. Highway Constants** - - NumPy-specific constants (`NPY_SIMDX_F16`, `NPY_SIMDX_F64`, `NPY_SIMDX_FMA`) provide additional safety beyond raw Highway constants + - NumPy-specific constants (`NPY_HWY_F16`, `NPY_HWY_F64`, `NPY_HWY_FMA`) provide additional safety beyond raw Highway constants - Highway constants (e.g., `HWY_HAVE_FLOAT16`) only check platform capabilities but don't consider NumPy's build configuration - Our constants combine both checks: ```cpp - #define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) + #define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) ``` - This ensures SIMD features won't be used when: - Platform supports it but NumPy optimization is disabled via meson option: diff --git a/numpy/_core/src/common/simd/simd.hpp b/numpy/_core/src/common/simd/simd.hpp index 698da4adf865..40556a68c59d 100644 --- a/numpy/_core/src/common/simd/simd.hpp +++ b/numpy/_core/src/common/simd/simd.hpp @@ -10,7 +10,7 @@ */ /** * Since `NPY_SIMD` is only limited to NumPy C universal intrinsics, - * `NPY_SIMDX` is defined to indicate the SIMD availability for Google's Highway + * `NPY_HWY` is defined to indicate the SIMD availability for Google's Highway * C++ code. * * Highway SIMD is only available when optimization is enabled. @@ -22,38 +22,44 @@ /** * We avoid using Highway scalar operations for the following reasons: - * 1. We already provide kernels for scalar operations, so falling back to - * the NumPy implementation is more appropriate. Compilers can often - * optimize these better since they rely on standard libraries. - * 2. Not all Highway intrinsics are fully supported in scalar mode. * - * Therefore, we only enable SIMD when the Highway target is not scalar. + * 1. NumPy already provides optimized kernels for scalar operations. Using these + * existing implementations is more consistent with NumPy's architecture and + * allows for compiler optimizations specific to standard library calls. + * + * 2. Not all Highway intrinsics are fully supported in scalar mode, which could + * lead to compilation errors or unexpected behavior for certain operations. + * + * 3. For NumPy's strict IEEE 754 floating-point compliance requirements, direct scalar + * implementations offer more predictable behavior than EMU128. + * + * Therefore, we only enable Highway SIMD when targeting actual SIMD instruction sets. */ -#define NPY_SIMDX (HWY_TARGET != HWY_SCALAR) +#define NPY_HWY ((HWY_TARGET != HWY_SCALAR) && (HWY_TARGET != HWY_EMU128)) // Indicates if the SIMD operations are available for float16. -#define NPY_SIMDX_F16 (NPY_SIMDX && HWY_HAVE_FLOAT16) +#define NPY_HWY_F16 (NPY_HWY && HWY_HAVE_FLOAT16) // Note: Highway requires SIMD extentions with native float32 support, so we don't need // to check for it. // Indicates if the SIMD operations are available for float64. -#define NPY_SIMDX_F64 (NPY_SIMDX && HWY_HAVE_FLOAT64) +#define NPY_HWY_F64 (NPY_HWY && HWY_HAVE_FLOAT64) // Indicates if the SIMD floating operations are natively supports fma. -#define NPY_SIMDX_FMA (NPY_SIMDX && HWY_NATIVE_FMA) +#define NPY_HWY_FMA (NPY_HWY && HWY_NATIVE_FMA) #else -#define NPY_SIMDX 0 -#define NPY_SIMDX_F16 0 -#define NPY_SIMDX_F64 0 -#define NPY_SIMDX_FMA 0 +#define NPY_HWY 0 +#define NPY_HWY_F16 0 +#define NPY_HWY_F64 0 +#define NPY_HWY_FMA 0 #endif namespace np { /// Represents the max SIMD width supported by the platform. namespace simd { -#if NPY_SIMDX +#if NPY_HWY /// The highway namespace alias. /// We can not import all the symbols from the HWY_NAMESPACE because it will /// conflict with the existing symbols in the numpy namespace. @@ -67,7 +73,7 @@ using _Tag = hn::ScalableTag; /// Represents the 128-bit SIMD width. namespace simd128 { -#if NPY_SIMDX +#if NPY_HWY namespace hn = hwy::HWY_NAMESPACE; template using _Tag = hn::Full128; diff --git a/numpy/_core/src/common/simd/simd.inc.hpp b/numpy/_core/src/common/simd/simd.inc.hpp index 64d28bc47118..f4a2540927dd 100644 --- a/numpy/_core/src/common/simd/simd.inc.hpp +++ b/numpy/_core/src/common/simd/simd.inc.hpp @@ -1,6 +1,6 @@ -#ifndef NPY_SIMDX +#ifndef NPY_HWY #error "This is not a standalone header. Include simd.hpp instead." -#define NPY_SIMDX 1 // Prevent editors from graying out the happy branch +#define NPY_HWY 1 // Prevent editors from graying out the happy branch #endif // Using anonymous namespace instead of inline to ensure each translation unit @@ -17,9 +17,9 @@ namespace { * @tparam TLane The lane type to check for support. */ template -constexpr bool kSupportLane = NPY_SIMDX != 0; +constexpr bool kSupportLane = NPY_HWY != 0; -#if NPY_SIMDX +#if NPY_HWY // Define lane type support based on Highway capabilities template <> constexpr bool kSupportLane = HWY_HAVE_FLOAT16 != 0; @@ -127,6 +127,6 @@ using hn::Sqrt; using hn::Sub; using hn::Xor; -#endif // NPY_SIMDX +#endif // NPY_HWY -} // namespace anonymous +} // namespace diff --git a/numpy/_core/src/dummymodule.c b/numpy/_core/src/dummymodule.c index 2f293d6c4cd6..e1ef80ab3af3 100644 --- a/numpy/_core/src/dummymodule.c +++ b/numpy/_core/src/dummymodule.c @@ -14,25 +14,27 @@ static struct PyMethodDef methods[] = { {NULL, NULL, 0, NULL} }; +static struct PyModuleDef_Slot dummy_slots[] = { +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + // signal that this module can be imported in isolated subinterpreters + {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "dummy", - NULL, - -1, - methods, - NULL, - NULL, - NULL, - NULL + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "dummy", + .m_size = 0, + .m_methods = methods, + .m_slots = dummy_slots, }; /* Initialization function for the module */ PyMODINIT_FUNC PyInit__dummy(void) { - PyObject *m; - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; - } - return m; + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index dd25e1ffd6cc..112c57433094 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -174,8 +174,8 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple, npy_bool from_pickle); /* - * Gets a tzoffset in minutes by calling the fromutc() function on - * the Python datetime.tzinfo object. + * Gets a tzoffset in minutes by calling the astimezone() function on + * the Python datetime.datetime object. */ NPY_NO_EXPORT int get_tzoffset_from_pytzinfo(PyObject *timezone, npy_datetimestruct *dts); diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index fc73a64b19a0..8012a32b070e 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -2413,41 +2413,56 @@ static PyMethodDef Multiarray_TestsMethods[] = { }; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_tests", - NULL, - -1, - Multiarray_TestsMethods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -PyMODINIT_FUNC PyInit__multiarray_tests(void) +static int +_multiarray_tests_exec(PyObject *m) { - PyObject *m; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; + } + module_loaded = 1; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return m; + if (PyArray_ImportNumPyAPI() < 0) { + return -1; } - import_array(); if (init_argparse_mutex() < 0) { - return NULL; + return -1; } if (PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "cannot load _multiarray_tests module."); } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _multiarray_tests_slots[] = { + {Py_mod_exec, _multiarray_tests_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "_multiarray_tests", + .m_size = 0, + .m_methods = Multiarray_TestsMethods, + .m_slots = _multiarray_tests_slots, +}; - return m; +PyMODINIT_FUNC PyInit__multiarray_tests(void) +{ + return PyModuleDef_Init(&moduledef); } NPY_NO_EXPORT int diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index fb2e09d341f7..cc9c5762a196 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -27,9 +27,21 @@ #endif #endif -#define NBUCKETS 1024 /* number of buckets for data*/ -#define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ -#define NCACHE 7 /* number of cache entries per bucket */ +/* Do not enable the alloc cache if the GIL is disabled, or if ASAN or MSAN + * instrumentation is enabled. The cache makes ASAN use-after-free or MSAN + * use-of-uninitialized-memory warnings less useful. */ +#define USE_ALLOC_CACHE 1 +#ifdef Py_GIL_DISABLED +# define USE_ALLOC_CACHE 0 +#elif defined(__has_feature) +# if __has_feature(address_sanitizer) || __has_feature(memory_sanitizer) +# define USE_ALLOC_CACHE 0 +# endif +#endif + +# define NBUCKETS 1024 /* number of buckets for data*/ +# define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */ +# define NCACHE 7 /* number of cache entries per bucket */ /* this structure fits neatly into a cacheline */ typedef struct { npy_uintp available; /* number of cached pointers */ @@ -38,7 +50,6 @@ typedef struct { static cache_bucket datacache[NBUCKETS]; static cache_bucket dimcache[NBUCKETS_DIM]; - /* * This function tells whether NumPy attempts to call `madvise` with * `MADV_HUGEPAGE`. `madvise` is only ever used on linux, so the value @@ -115,7 +126,7 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, assert((esz == 1 && cache == datacache) || (esz == sizeof(npy_intp) && cache == dimcache)); assert(PyGILState_Check()); -#ifndef Py_GIL_DISABLED +#if USE_ALLOC_CACHE if (nelem < msz) { if (cache[nelem].available > 0) { return cache[nelem].ptrs[--(cache[nelem].available)]; @@ -141,7 +152,7 @@ _npy_free_cache(void * p, npy_uintp nelem, npy_uint msz, cache_bucket * cache, void (*dealloc)(void *)) { assert(PyGILState_Check()); -#ifndef Py_GIL_DISABLED +#if USE_ALLOC_CACHE if (p != NULL && nelem < msz) { if (cache[nelem].available < NCACHE) { cache[nelem].ptrs[cache[nelem].available++] = p; diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index e356b8251931..a18f74bda71a 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -11,6 +11,7 @@ #include "npy_static_data.h" #include "npy_import.h" #include +#include #ifdef __cplusplus extern "C" { @@ -230,15 +231,6 @@ npy_uint_alignment(int itemsize) * compared to memchr it returns one stride past end instead of NULL if needle * is not found. */ -#ifdef __clang__ - /* - * The code below currently makes use of !NPY_ALIGNMENT_REQUIRED, which - * should be OK but causes the clang sanitizer to warn. It may make - * sense to modify the code to avoid this "unaligned" access but - * it would be good to carefully check the performance changes. - */ - __attribute__((no_sanitize("alignment"))) -#endif static inline char * npy_memchr(char * haystack, char needle, npy_intp stride, npy_intp size, npy_intp * psubloopsize, int invert) @@ -259,11 +251,12 @@ npy_memchr(char * haystack, char needle, } else { /* usually find elements to skip path */ - if (!NPY_ALIGNMENT_REQUIRED && needle == 0 && stride == 1) { + if (needle == 0 && stride == 1) { /* iterate until last multiple of 4 */ char * block_end = haystack + size - (size % sizeof(unsigned int)); while (p < block_end) { - unsigned int v = *(unsigned int*)p; + unsigned int v; + memcpy(&v, p, sizeof(v)); if (v != 0) { break; } diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 26b898fa1479..fee0d4a61a78 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -920,11 +920,11 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *const *args, Py_ssize_t return NULL; } -static const char *EMPTY_SEQUENCE_ERR_MSG = "indices must be integral: the provided " \ +static const char EMPTY_SEQUENCE_ERR_MSG[] = "indices must be integral: the provided " \ "empty sequence was inferred as float. Wrap it with " \ "'np.array(indices, dtype=np.intp)'"; -static const char *NON_INTEGRAL_ERROR_MSG = "only int indices permitted"; +static const char NON_INTEGRAL_ERROR_MSG[] = "only int indices permitted"; /* Convert obj to an ndarray with integer dtype or fail */ static PyArrayObject * @@ -1465,7 +1465,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t PyObject *obj; PyObject *str; const char *docstr; - static char *msg = "already has a different docstring"; + static const char msg[] = "already has a different docstring"; /* Don't add docstrings */ #if PY_VERSION_HEX > 0x030b0000 @@ -1620,19 +1620,15 @@ pack_inner(const char *inptr, bb[1] = npyv_tobits_b8(npyv_cmpneq_u8(v1, v_zero)); bb[2] = npyv_tobits_b8(npyv_cmpneq_u8(v2, v_zero)); bb[3] = npyv_tobits_b8(npyv_cmpneq_u8(v3, v_zero)); - if(out_stride == 1 && - (!NPY_ALIGNMENT_REQUIRED || isAligned)) { - npy_uint64 *ptr64 = (npy_uint64*)outptr; + if(out_stride == 1 && isAligned) { #if NPY_SIMD_WIDTH == 16 - npy_uint64 bcomp = bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48); - ptr64[0] = bcomp; + npy_uint64 arr[1] = {bb[0] | (bb[1] << 16) | (bb[2] << 32) | (bb[3] << 48)}; #elif NPY_SIMD_WIDTH == 32 - ptr64[0] = bb[0] | (bb[1] << 32); - ptr64[1] = bb[2] | (bb[3] << 32); + npy_uint64 arr[2] = {bb[0] | (bb[1] << 32), bb[2] | (bb[3] << 32)}; #else - ptr64[0] = bb[0]; ptr64[1] = bb[1]; - ptr64[2] = bb[2]; ptr64[3] = bb[3]; + npy_uint64 arr[4] = {bb[0], bb[1], bb[2], bb[3]}; #endif + memcpy(outptr, arr, sizeof(arr)); outptr += vstepx4; } else { for(int i = 0; i < 4; i++) { diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index 777a71c20423..5ada3e6e4faf 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -318,7 +318,7 @@ PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf) buf->len = (npy_intp) view.len; /* - * In Python 3 both of the deprecated functions PyObject_AsWriteBuffer and + * Both of the deprecated functions PyObject_AsWriteBuffer and * PyObject_AsReadBuffer that this code replaces release the buffer. It is * up to the object that supplies the buffer to guarantee that the buffer * sticks around after the release. diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index dbaea718f3f3..f7efe5041ab3 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2327,7 +2327,7 @@ PyArray_FromInterface(PyObject *origin) } data = (char *)view.buf; /* - * In Python 3 both of the deprecated functions PyObject_AsWriteBuffer and + * Both of the deprecated functions PyObject_AsWriteBuffer and * PyObject_AsReadBuffer that this code replaces release the buffer. It is * up to the object that supplies the buffer to guarantee that the buffer * sticks around after the release. diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 9c024dbcd91c..d820474532ca 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2245,8 +2245,8 @@ NpyDatetime_ConvertPyDateTimeToDatetimeStruct( } /* - * Gets a tzoffset in minutes by calling the fromutc() function on - * the Python datetime.tzinfo object. + * Gets a tzoffset in minutes by calling the astimezone() function on + * the Python datetime.datetime object. */ NPY_NO_EXPORT int get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) @@ -2255,14 +2255,14 @@ get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) npy_datetimestruct loc_dts; /* Create a Python datetime to give to the timezone object */ - dt = PyDateTime_FromDateAndTime((int)dts->year, dts->month, dts->day, - dts->hour, dts->min, 0, 0); + dt = PyDateTimeAPI->DateTime_FromDateAndTime((int)dts->year, dts->month, dts->day, + dts->hour, dts->min, 0, 0, PyDateTime_TimeZone_UTC, PyDateTimeAPI->DateTimeType); if (dt == NULL) { return -1; } /* Convert the datetime from UTC to local time */ - loc_dt = PyObject_CallMethod(timezone_obj, "fromutc", "O", dt); + loc_dt = PyObject_CallMethod(dt, "astimezone", "O", timezone_obj); Py_DECREF(dt); if (loc_dt == NULL) { return -1; diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index feea67c79bb9..5c036b704774 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -4,6 +4,7 @@ #define PY_SSIZE_T_CLEAN #include #include +#include #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" @@ -1484,7 +1485,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, if (argpart == NULL) { ret = argsort(valptr, idxptr, N, op); - /* Object comparisons may raise an exception in Python 3 */ + /* Object comparisons may raise an exception */ if (needs_api && PyErr_Occurred()) { ret = -1; } @@ -1498,7 +1499,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort, for (i = 0; i < nkth; ++i) { ret = argpart(valptr, idxptr, N, kth[i], pivots, &npiv, nkth, op); - /* Object comparisons may raise an exception in Python 3 */ + /* Object comparisons may raise an exception */ if (needs_api && PyErr_Occurred()) { ret = -1; } @@ -2525,11 +2526,13 @@ count_nonzero_u8(const char *data, npy_intp bstride, npy_uintp len) len -= len_m; count = len_m - zcount; #else - if (!NPY_ALIGNMENT_REQUIRED || npy_is_aligned(data, sizeof(npy_uint64))) { + if (npy_is_aligned(data, sizeof(npy_uint64))) { int step = 6 * sizeof(npy_uint64); int left_bytes = len % step; for (const char *end = data + len; data < end - left_bytes; data += step) { - count += count_nonzero_bytes_384((const npy_uint64 *)data); + npy_uint64 arr[6]; + memcpy(arr, data, step); + count += count_nonzero_bytes_384(arr); } len = left_bytes; } diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 01ffd225274f..0c4eb3dd9a8d 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -33,11 +33,7 @@ * instructions (16 byte). * So this flag can only be enabled if autovectorization is disabled. */ -#if NPY_ALIGNMENT_REQUIRED -# define NPY_USE_UNALIGNED_ACCESS 0 -#else -# define NPY_USE_UNALIGNED_ACCESS 0 -#endif +#define NPY_USE_UNALIGNED_ACCESS 0 #define _NPY_NOP1(x) (x) #define _NPY_NOP2(x) (x) diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index fedeb7d04cd3..7953e32fcbf0 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1969,6 +1969,15 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) if (tmp_arr && solve_may_share_memory(self, tmp_arr, 1) != 0) { Py_SETREF(tmp_arr, (PyArrayObject *)PyArray_NewCopy(tmp_arr, NPY_ANYORDER)); } + for (i = 0; i < index_num; ++i) { + if (indices[i].object != NULL && PyArray_Check(indices[i].object) && + solve_may_share_memory(self, (PyArrayObject *)indices[i].object, 1) != 0) { + Py_SETREF(indices[i].object, PyArray_Copy((PyArrayObject*)indices[i].object)); + if (indices[i].object == NULL) { + goto fail; + } + } + } /* * Special case for very simple 1-d fancy indexing, which however diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index baa680f3a74a..58a554dc40be 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1845,77 +1845,115 @@ array_reduce_ex_regular(PyArrayObject *self, int NPY_UNUSED(protocol)) static PyObject * array_reduce_ex_picklebuffer(PyArrayObject *self, int protocol) { - PyObject *numeric_mod = NULL, *from_buffer_func = NULL; - PyObject *pickle_module = NULL, *picklebuf_class = NULL; - PyObject *picklebuf_args = NULL; + PyObject *from_buffer_func = NULL; + PyObject *picklebuf_class = NULL; PyObject *buffer = NULL, *transposed_array = NULL; PyArray_Descr *descr = NULL; + PyObject *rev_perm = NULL; // only used in 'K' order char order; descr = PyArray_DESCR(self); - /* we expect protocol 5 to be available in Python 3.8 */ - pickle_module = PyImport_ImportModule("pickle"); - if (pickle_module == NULL){ - return NULL; - } - picklebuf_class = PyObject_GetAttrString(pickle_module, "PickleBuffer"); - Py_DECREF(pickle_module); - if (picklebuf_class == NULL) { + if (npy_cache_import_runtime("pickle", "PickleBuffer", &picklebuf_class) == -1) { return NULL; } /* Construct a PickleBuffer of the array */ - - if (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*) self) && - PyArray_IS_F_CONTIGUOUS((PyArrayObject*) self)) { + if (PyArray_IS_C_CONTIGUOUS((PyArrayObject *)self)) { + order = 'C'; + } + else if (PyArray_IS_F_CONTIGUOUS((PyArrayObject *)self)) { /* if the array if Fortran-contiguous and not C-contiguous, * the PickleBuffer instance will hold a view on the transpose * of the initial array, that is C-contiguous. */ order = 'F'; - transposed_array = PyArray_Transpose((PyArrayObject*)self, NULL); - picklebuf_args = Py_BuildValue("(N)", transposed_array); + transposed_array = PyArray_Transpose((PyArrayObject *)self, NULL); + if (transposed_array == NULL) { + return NULL; + } } else { - order = 'C'; - picklebuf_args = Py_BuildValue("(O)", self); - } - if (picklebuf_args == NULL) { - Py_DECREF(picklebuf_class); - return NULL; + order = 'K'; + const int n = PyArray_NDIM(self); + npy_stride_sort_item items[NPY_MAXDIMS]; + // sort (strde, perm) as descending = transpose to C + PyArray_CreateSortedStridePerm(n, PyArray_STRIDES(self), items); + rev_perm = PyTuple_New(n); + if (rev_perm == NULL) { + return NULL; + } + PyArray_Dims perm; + npy_intp dims[NPY_MAXDIMS]; + for (int i = 0; i < n; i++) { + dims[i] = items[i].perm; + PyObject *idx = PyLong_FromLong(i); + if (idx == NULL) { + Py_DECREF(rev_perm); + return NULL; + } + PyTuple_SET_ITEM(rev_perm, items[i].perm, idx); + } + perm.ptr = dims; + perm.len = n; + transposed_array = PyArray_Transpose((PyArrayObject *)self, &perm); + if (transposed_array == NULL) { + Py_DECREF(rev_perm); + return NULL; + } + if (!PyArray_IS_C_CONTIGUOUS((PyArrayObject *)transposed_array)) { + // self is non-contiguous + Py_DECREF(rev_perm); + Py_DECREF(transposed_array); + return array_reduce_ex_regular(self, protocol); + } } - - buffer = PyObject_CallObject(picklebuf_class, picklebuf_args); - Py_DECREF(picklebuf_class); - Py_DECREF(picklebuf_args); + buffer = PyObject_CallOneArg(picklebuf_class, transposed_array == NULL ? (PyObject*) self: transposed_array); if (buffer == NULL) { /* Some arrays may refuse to export a buffer, in which case * just fall back on regular __reduce_ex__ implementation * (gh-12745). */ + Py_XDECREF(rev_perm); + Py_XDECREF(transposed_array); PyErr_Clear(); return array_reduce_ex_regular(self, protocol); } /* Get the _frombuffer() function for reconstruction */ - - numeric_mod = PyImport_ImportModule("numpy._core.numeric"); - if (numeric_mod == NULL) { + if (npy_cache_import_runtime("numpy._core.numeric", "_frombuffer", + &from_buffer_func) == -1) { + Py_XDECREF(rev_perm); + Py_XDECREF(transposed_array); Py_DECREF(buffer); return NULL; } - from_buffer_func = PyObject_GetAttrString(numeric_mod, - "_frombuffer"); - Py_DECREF(numeric_mod); - if (from_buffer_func == NULL) { + + PyObject *shape = NULL; + if (order == 'K') { + shape = PyArray_IntTupleFromIntp( + PyArray_NDIM((PyArrayObject *)transposed_array), + PyArray_SHAPE((PyArrayObject *)transposed_array)); + } + else { + shape = PyArray_IntTupleFromIntp(PyArray_NDIM(self), + PyArray_SHAPE(self)); + } + Py_XDECREF(transposed_array); + if (shape == NULL) { + Py_XDECREF(rev_perm); Py_DECREF(buffer); return NULL; } - - return Py_BuildValue("N(NONN)", - from_buffer_func, buffer, (PyObject *)descr, - PyObject_GetAttrString((PyObject *)self, "shape"), - PyUnicode_FromStringAndSize(&order, 1)); + if (order == 'K') { + return Py_BuildValue("N(NONNN)", from_buffer_func, buffer, + (PyObject *)descr, shape, + PyUnicode_FromStringAndSize(&order, 1), rev_perm); + } + else { + return Py_BuildValue("N(NONN)", from_buffer_func, buffer, + (PyObject *)descr, shape, + PyUnicode_FromStringAndSize(&order, 1)); + } } static PyObject * @@ -1930,8 +1968,6 @@ array_reduce_ex(PyArrayObject *self, PyObject *args) descr = PyArray_DESCR(self); if ((protocol < 5) || - (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*)self) && - !PyArray_IS_F_CONTIGUOUS((PyArrayObject*)self)) || PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || (PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) && ((PyObject*)self)->ob_type != &PyArray_Type) || @@ -1943,6 +1979,11 @@ array_reduce_ex(PyArrayObject *self, PyObject *args) return array_reduce_ex_regular(self, protocol); } else { + /* The func will check internally + * if the array isn't backed by a contiguous data buffer or + * if the array refuses to export a buffer + * In either case, fall back to `array_reduce_ex_regular` + */ return array_reduce_ex_picklebuffer(self, protocol); } } diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 8ba38b555edb..7724756ba351 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -530,8 +530,7 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, NPY_NO_EXPORT PyArrayObject * PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, NPY_ORDER order, PyArrayObject *ret, - PyArray_Descr *dtype, NPY_CASTING casting, - npy_bool casting_not_passed) + PyArray_Descr *dtype, NPY_CASTING casting) { int iarrays; npy_intp shape = 0; @@ -647,12 +646,11 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays, * @param ret output array to fill * @param dtype Forced output array dtype (cannot be combined with ret) * @param casting Casting mode used - * @param casting_not_passed Deprecation helper */ NPY_NO_EXPORT PyObject * PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret, PyArray_Descr *dtype, - NPY_CASTING casting, npy_bool casting_not_passed) + NPY_CASTING casting) { int iarrays, narrays; PyArrayObject **arrays; @@ -698,7 +696,7 @@ PyArray_ConcatenateInto(PyObject *op, if (axis == NPY_RAVEL_AXIS) { ret = PyArray_ConcatenateFlattenedArrays( narrays, arrays, NPY_CORDER, ret, dtype, - casting, casting_not_passed); + casting); } else { ret = PyArray_ConcatenateArrays( @@ -743,7 +741,7 @@ PyArray_Concatenate(PyObject *op, int axis) casting = NPY_SAME_KIND_CASTING; } return PyArray_ConcatenateInto( - op, axis, NULL, NULL, casting, 0); + op, axis, NULL, NULL, casting); } static int @@ -2489,7 +2487,6 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *out = NULL; PyArray_Descr *dtype = NULL; NPY_CASTING casting = NPY_SAME_KIND_CASTING; - PyObject *casting_obj = NULL; PyObject *res; int axis = 0; @@ -2499,22 +2496,10 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), "|axis", &PyArray_AxisConverter, &axis, "|out", NULL, &out, "$dtype", &PyArray_DescrConverter2, &dtype, - "$casting", NULL, &casting_obj, + "$casting", &PyArray_CastingConverter, &casting, NULL, NULL, NULL) < 0) { return NULL; } - int casting_not_passed = 0; - if (casting_obj == NULL) { - /* - * Casting was not passed in, needed for deprecation only. - * This should be simplified once the deprecation is finished. - */ - casting_not_passed = 1; - } - else if (!PyArray_CastingConverter(casting_obj, &casting)) { - Py_XDECREF(dtype); - return NULL; - } if (out != NULL) { if (out == Py_None) { out = NULL; @@ -2526,7 +2511,7 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), } } res = PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out, dtype, - casting, casting_not_passed); + casting); Py_XDECREF(dtype); return res; } @@ -4773,36 +4758,27 @@ initialize_thread_unsafe_state(void) { return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_umath", - NULL, - -1, - array_module_methods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit__multiarray_umath(void) { - PyObject *m, *d, *s; - PyObject *c_api; +static int +_multiarray_umath_exec(PyObject *m) { + PyObject *d, *s, *c_api; - /* Create the module and add the functions */ - m = PyModule_Create(&moduledef); - if (!m) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; /* Initialize CPU features */ if (npy_cpu_init() < 0) { - goto err; + return -1; } /* Initialize CPU dispatch tracer */ if (npy_cpu_dispatch_tracer_init(m) < 0) { - goto err; + return -1; } #if defined(MS_WIN64) && defined(__GNUC__) @@ -4818,62 +4794,62 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { numpy_pydatetime_import(); if (PyErr_Occurred()) { - goto err; + return -1; } /* Add some symbolic constants to the module */ d = PyModule_GetDict(m); if (!d) { - goto err; + return -1; } if (intern_strings() < 0) { - goto err; + return -1; } if (initialize_static_globals() < 0) { - goto err; + return -1; } if (initialize_thread_unsafe_state() < 0) { - goto err; + return -1; } if (init_import_mutex() < 0) { - goto err; + return -1; } if (init_extobj() < 0) { - goto err; + return -1; } if (PyType_Ready(&PyUFunc_Type) < 0) { - goto err; + return -1; } PyArrayDTypeMeta_Type.tp_base = &PyType_Type; if (PyType_Ready(&PyArrayDTypeMeta_Type) < 0) { - goto err; + return -1; } PyArrayDescr_Type.tp_hash = PyArray_DescrHash; Py_SET_TYPE(&PyArrayDescr_Type, &PyArrayDTypeMeta_Type); if (PyType_Ready(&PyArrayDescr_Type) < 0) { - goto err; + return -1; } initialize_casting_tables(); initialize_numeric_types(); if (initscalarmath(m) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArray_Type) < 0) { - goto err; + return -1; } if (setup_scalartypes(d) < 0) { - goto err; + return -1; } PyArrayIter_Type.tp_iter = PyObject_SelfIter; @@ -4881,28 +4857,28 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter; PyArrayMultiIter_Type.tp_free = PyArray_free; if (PyType_Ready(&PyArrayIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayMapIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayMultiIter_Type) < 0) { - goto err; + return -1; } PyArrayNeighborhoodIter_Type.tp_new = PyType_GenericNew; if (PyType_Ready(&PyArrayNeighborhoodIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&NpyIter_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayFlags_Type) < 0) { - goto err; + return -1; } NpyBusDayCalendar_Type.tp_new = PyType_GenericNew; if (PyType_Ready(&NpyBusDayCalendar_Type) < 0) { - goto err; + return -1; } /* @@ -4923,43 +4899,43 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { s = npy_cpu_features_dict(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_features__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = npy_cpu_baseline_list(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_baseline__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = npy_cpu_dispatch_list(); if (s == NULL) { - goto err; + return -1; } if (PyDict_SetItemString(d, "__cpu_dispatch__", s) < 0) { Py_DECREF(s); - goto err; + return -1; } Py_DECREF(s); s = PyCapsule_New((void *)_datetime_strings, NULL, NULL); if (s == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "DATETIMEUNITS", s); Py_DECREF(s); #define ADDCONST(NAME) \ - s = PyLong_FromLong(NPY_##NAME); \ + s = PyLong_FromLong(NPY_##NAME); \ PyDict_SetItemString(d, #NAME, s); \ Py_DECREF(s) @@ -4999,39 +4975,39 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* Finalize scalar types and expose them via namespace or typeinfo dict */ if (set_typeinfo(d) != 0) { - goto err; + return -1; } if (PyType_Ready(&PyArrayFunctionDispatcher_Type) < 0) { - goto err; + return -1; } PyDict_SetItemString( d, "_ArrayFunctionDispatcher", (PyObject *)&PyArrayFunctionDispatcher_Type); if (PyType_Ready(&PyArrayArrayConverter_Type) < 0) { - goto err; + return -1; } PyDict_SetItemString( d, "_array_converter", (PyObject *)&PyArrayArrayConverter_Type); if (PyType_Ready(&PyArrayMethod_Type) < 0) { - goto err; + return -1; } if (PyType_Ready(&PyBoundArrayMethod_Type) < 0) { - goto err; + return -1; } if (initialize_and_map_pytypes_to_dtypes() < 0) { - goto err; + return -1; } if (PyArray_InitializeCasts() < 0) { - goto err; + return -1; } if (init_string_dtype() < 0) { - goto err; + return -1; } /* @@ -5040,7 +5016,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { PyDataMem_DefaultHandler = PyCapsule_New( &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); if (PyDataMem_DefaultHandler == NULL) { - goto err; + return -1; } /* @@ -5049,32 +5025,32 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { */ current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); if (current_handler == NULL) { - goto err; + return -1; } if (initumath(m) != 0) { - goto err; + return -1; } if (set_matmul_flags(d) < 0) { - goto err; + return -1; } // initialize static references to ndarray.__array_*__ special methods npy_static_pydata.ndarray_array_finalize = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_finalize__"); if (npy_static_pydata.ndarray_array_finalize == NULL) { - goto err; + return -1; } npy_static_pydata.ndarray_array_ufunc = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_ufunc__"); if (npy_static_pydata.ndarray_array_ufunc == NULL) { - goto err; + return -1; } npy_static_pydata.ndarray_array_function = PyObject_GetAttrString( (PyObject *)&PyArray_Type, "__array_function__"); if (npy_static_pydata.ndarray_array_function == NULL) { - goto err; + return -1; } /* @@ -5090,13 +5066,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { if (npy_cache_import_runtime( "numpy.dtypes", "_add_dtype_helper", &npy_runtime_imports._add_dtype_helper) == -1) { - goto err; + return -1; } if (PyObject_CallFunction( npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "StringDType", (PyObject *)&PyArray_StringDType); @@ -5104,13 +5080,13 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { npy_static_pydata.zero_pyint_like_arr = PyArray_ZEROS( 0, NULL, NPY_DEFAULT_INT, NPY_FALSE); if (npy_static_pydata.zero_pyint_like_arr == NULL) { - goto err; + return -1; } ((PyArrayObject_fields *)npy_static_pydata.zero_pyint_like_arr)->flags |= (NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); if (verify_static_structs_initialized() < 0) { - goto err; + return -1; } /* @@ -5120,33 +5096,44 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* The dtype API is not auto-filled/generated via Python scripts: */ _fill_dtype_api(PyArray_API); if (c_api == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "_ARRAY_API", c_api); Py_DECREF(c_api); c_api = PyCapsule_New((void *)PyUFunc_API, NULL, NULL); if (c_api == NULL) { - goto err; + return -1; } PyDict_SetItemString(d, "_UFUNC_API", c_api); Py_DECREF(c_api); if (PyErr_Occurred()) { - goto err; + return -1; } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _multiarray_umath_slots[] = { + {Py_mod_exec, _multiarray_umath_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, #endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; - return m; +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "_multiarray_umath", + .m_size = 0, + .m_methods = array_module_methods, + .m_slots = _multiarray_umath_slots, +}; - err: - if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_RuntimeError, - "cannot load multiarray module."); - } - Py_DECREF(m); - return NULL; +PyMODINIT_FUNC PyInit__multiarray_umath(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 1f683851f585..03165b10337e 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -2310,7 +2310,7 @@ gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args)) buffer = view.buf; buflen = view.len; /* - * In Python 3 both of the deprecated functions PyObject_AsWriteBuffer and + * Both of the deprecated functions PyObject_AsWriteBuffer and * PyObject_AsReadBuffer that this code replaces release the buffer. It is * up to the object that supplies the buffer to guarantee that the buffer * sticks around after the release. diff --git a/numpy/_core/src/multiarray/stringdtype/casts.cpp b/numpy/_core/src/multiarray/stringdtype/casts.cpp index f66727501f97..3632e359c9a9 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.cpp +++ b/numpy/_core/src/multiarray/stringdtype/casts.cpp @@ -605,7 +605,7 @@ load_non_nullable_string(char *in, int has_null, const npy_static_string *defaul const npy_packed_static_string *ps = (npy_packed_static_string *)in; int isnull = NpyString_load(allocator, ps, string_to_load); if (isnull == -1) { - const char *msg = "Failed to load string for conversion to a non-nullable type"; + const char msg[] = "Failed to load string for conversion to a non-nullable type"; if (has_gil) { PyErr_SetString(PyExc_MemoryError, msg); @@ -617,7 +617,7 @@ load_non_nullable_string(char *in, int has_null, const npy_static_string *defaul } else if (isnull) { if (has_null) { - const char *msg = "Arrays with missing data cannot be converted to a non-nullable type"; + const char msg[] = "Arrays with missing data cannot be converted to a non-nullable type"; if (has_gil) { PyErr_SetString(PyExc_ValueError, msg); @@ -821,8 +821,8 @@ static PyType_Slot s2int_slots[] = { static const char * make_s2type_name(NPY_TYPES typenum) { - const char *prefix = "cast_StringDType_to_"; - size_t plen = strlen(prefix); + const char prefix[] = "cast_StringDType_to_"; + size_t plen = sizeof(prefix)/sizeof(char) - 1; const char *type_name = typenum_to_cstr(typenum); size_t nlen = strlen(type_name); @@ -833,31 +833,36 @@ make_s2type_name(NPY_TYPES typenum) { return NULL; } - // memcpy instead of strcpy to avoid stringop-truncation warning, since - // we are not including the trailing null character - memcpy(buf, prefix, plen); - strncat(buf, type_name, nlen); + // memcpy instead of strcpy/strncat to avoid stringop-truncation warning, + // since we are not including the trailing null character + char *p = buf; + memcpy(p, prefix, plen); + p += plen; + memcpy(p, type_name, nlen); return buf; } static const char * make_type2s_name(NPY_TYPES typenum) { - const char *prefix = "cast_"; - size_t plen = strlen(prefix); + const char prefix[] = "cast_"; + size_t plen = sizeof(prefix)/sizeof(char) - 1; const char *type_name = typenum_to_cstr(typenum); size_t nlen = strlen(type_name); - const char *suffix = "_to_StringDType"; - size_t slen = strlen(suffix); + const char suffix[] = "_to_StringDType"; + size_t slen = sizeof(prefix)/sizeof(char) - 1; char *buf = (char *)PyMem_RawCalloc(sizeof(char), plen + nlen + slen + 1); - // memcpy instead of strcpy to avoid stringop-truncation warning, since - // we are not including the trailing null character - memcpy(buf, prefix, plen); - strncat(buf, type_name, nlen); - strncat(buf, suffix, slen); + // memcpy instead of strcpy/strncat to avoid stringop-truncation warning, + // since we are not including the trailing null character + char *p = buf; + memcpy(p, prefix, plen); + p += plen; + memcpy(p, type_name, nlen); + p += nlen; + memcpy(p, suffix, slen); return buf; } diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 02ab7d246a7a..1c29bbb67f7e 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -404,7 +404,7 @@ NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) } } -static const char * const EMPTY_STRING = ""; +static const char EMPTY_STRING[] = ""; /*NUMPY_API * Extract the packed contents of *packed_string* into *unpacked_string*. diff --git a/numpy/_core/src/npymath/halffloat.cpp b/numpy/_core/src/npymath/halffloat.cpp index aa582c1b9517..9289a659f5f5 100644 --- a/numpy/_core/src/npymath/halffloat.cpp +++ b/numpy/_core/src/npymath/halffloat.cpp @@ -198,41 +198,21 @@ npy_half npy_half_divmod(npy_half h1, npy_half h2, npy_half *modulus) npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f) { - if constexpr (Half::kNativeConversion) { - return BitCast(Half(BitCast(f))); - } - else { - return half_private::FromFloatBits(f); - } + return BitCast(Half(BitCast(f))); } npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d) { - if constexpr (Half::kNativeConversion) { - return BitCast(Half(BitCast(d))); - } - else { - return half_private::FromDoubleBits(d); - } + return BitCast(Half(BitCast(d))); } npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h) { - if constexpr (Half::kNativeConversion) { - return BitCast(static_cast(Half::FromBits(h))); - } - else { - return half_private::ToFloatBits(h); - } + return BitCast(static_cast(Half::FromBits(h))); } npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h) { - if constexpr (Half::kNativeConversion) { - return BitCast(static_cast(Half::FromBits(h))); - } - else { - return half_private::ToDoubleBits(h); - } + return BitCast(static_cast(Half::FromBits(h))); } diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index 9a1b616d5cd4..c306ac581a59 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit 9a1b616d5cd4eaf49f7664fb86ccc1d18bad2b8d +Subproject commit c306ac581a59f89585d778254c4ed7197e64ba2d diff --git a/numpy/_core/src/umath/loops_arithmetic.dispatch.c.src b/numpy/_core/src/umath/loops_arithmetic.dispatch.c.src deleted file mode 100644 index c9efe5579e71..000000000000 --- a/numpy/_core/src/umath/loops_arithmetic.dispatch.c.src +++ /dev/null @@ -1,521 +0,0 @@ -#define _UMATHMODULE -#define _MULTIARRAYMODULE -#define NPY_NO_DEPRECATED_API NPY_API_VERSION - -#include "simd/simd.h" -#include "loops_utils.h" -#include "loops.h" -#include "lowlevel_strided_loops.h" -// Provides the various *_LOOP macros -#include "fast_loop_macros.h" - -//############################################################################### -//## Division -//############################################################################### -/******************************************************************************** - ** Defining the SIMD kernels - * - * Floor division of signed is based on T. Granlund and P. L. Montgomery - * "Division by invariant integers using multiplication(see [Figure 6.1] - * https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.2556)" - * For details on TRUNC division see simd/intdiv.h for more clarification - *********************************************************************************** - ** Figure 6.1: Signed division by run-time invariant divisor, rounded towards -INF - *********************************************************************************** - * For q = FLOOR(a/d), all sword: - * sword -dsign = SRL(d, N - 1); - * uword -nsign = (n < -dsign); - * uword -qsign = EOR(-nsign, -dsign); - * q = TRUNC((n - (-dsign ) + (-nsign))/d) - (-qsign); - ********************************************************************************/ - -#if (defined(NPY_HAVE_VSX) && !defined(NPY_HAVE_VSX4)) || defined(NPY_HAVE_NEON) || defined(NPY_HAVE_LSX) - // Due to integer 128-bit multiplication emulation, SIMD 64-bit division - // may not perform well on both neon and up to VSX3 compared to scalar - // division. - #define SIMD_DISABLE_DIV64_OPT -#endif - -#if NPY_SIMD -/**begin repeat - * Signed types - * #sfx = s8, s16, s32, s64# - * #len = 8, 16, 32, 64# - */ -#if @len@ < 64 || (@len@ == 64 && !defined(SIMD_DISABLE_DIV64_OPT)) -static inline void -simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) -{ - npyv_lanetype_@sfx@ *src = (npyv_lanetype_@sfx@ *) args[0]; - npyv_lanetype_@sfx@ scalar = *(npyv_lanetype_@sfx@ *) args[1]; - npyv_lanetype_@sfx@ *dst = (npyv_lanetype_@sfx@ *) args[2]; - const int vstep = npyv_nlanes_@sfx@; - const npyv_@sfx@x3 divisor = npyv_divisor_@sfx@(scalar); - - if (scalar == -1) { - npyv_b@len@ noverflow = npyv_cvt_b@len@_@sfx@(npyv_setall_@sfx@(-1)); - const npyv_@sfx@ vzero = npyv_zero_@sfx@(); - const npyv_@sfx@ vmin = npyv_setall_@sfx@(NPY_MIN_INT@len@); - for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { - npyv_@sfx@ a = npyv_load_@sfx@(src); - npyv_b@len@ gt_min = npyv_cmpgt_@sfx@(a, npyv_setall_@sfx@(NPY_MIN_INT@len@)); - noverflow = npyv_and_b@len@(noverflow, gt_min); - npyv_@sfx@ neg = npyv_ifsub_@sfx@(gt_min, vzero, a, vmin); - npyv_store_@sfx@(dst, neg); - } - - int raise_err = npyv_tobits_b@len@(npyv_not_b@len@(noverflow)) != 0; - for (; len > 0; --len, ++src, ++dst) { - npyv_lanetype_@sfx@ a = *src; - if (a == NPY_MIN_INT@len@) { - raise_err = 1; - *dst = NPY_MIN_INT@len@; - } else { - *dst = -a; - } - } - if (raise_err) { - npy_set_floatstatus_overflow(); - } - } else { - for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { - npyv_@sfx@ nsign_d = npyv_setall_@sfx@(scalar < 0); - npyv_@sfx@ a = npyv_load_@sfx@(src); - npyv_@sfx@ nsign_a = npyv_cvt_@sfx@_b@len@(npyv_cmplt_@sfx@(a, nsign_d)); - nsign_a = npyv_and_@sfx@(nsign_a, npyv_setall_@sfx@(1)); - npyv_@sfx@ diff_sign = npyv_sub_@sfx@(nsign_a, nsign_d); - npyv_@sfx@ to_ninf = npyv_xor_@sfx@(nsign_a, nsign_d); - npyv_@sfx@ trunc = npyv_divc_@sfx@(npyv_add_@sfx@(a, diff_sign), divisor); - npyv_@sfx@ floor = npyv_sub_@sfx@(trunc, to_ninf); - npyv_store_@sfx@(dst, floor); - } - - for (; len > 0; --len, ++src, ++dst) { - const npyv_lanetype_@sfx@ a = *src; - npyv_lanetype_@sfx@ r = a / scalar; - // Negative quotients needs to be rounded down - if (((a > 0) != (scalar > 0)) && ((r * scalar) != a)) { - r--; - } - *dst = r; - } - } - npyv_cleanup(); -} -#endif -/**end repeat**/ - -/**begin repeat - * Unsigned types - * #sfx = u8, u16, u32, u64# - * #len = 8, 16, 32, 64# - */ -#if @len@ < 64 || (@len@ == 64 && !defined(SIMD_DISABLE_DIV64_OPT)) -static inline void -simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) -{ - npyv_lanetype_@sfx@ *src = (npyv_lanetype_@sfx@ *) args[0]; - npyv_lanetype_@sfx@ scalar = *(npyv_lanetype_@sfx@ *) args[1]; - npyv_lanetype_@sfx@ *dst = (npyv_lanetype_@sfx@ *) args[2]; - const int vstep = npyv_nlanes_@sfx@; - const npyv_@sfx@x3 divisor = npyv_divisor_@sfx@(scalar); - - for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { - npyv_@sfx@ a = npyv_load_@sfx@(src); - npyv_@sfx@ c = npyv_divc_@sfx@(a, divisor); - npyv_store_@sfx@(dst, c); - } - - for (; len > 0; --len, ++src, ++dst) { - const npyv_lanetype_@sfx@ a = *src; - *dst = a / scalar; - } - npyv_cleanup(); -} -#endif -/**end repeat**/ - -#if defined(NPY_HAVE_VSX4) - -/**begin repeat - * #t = u, s# - * #signed = 0, 1# - */ -/* - * Computes division of 2 8-bit signed/unsigned integer vectors - * - * As Power10 only supports integer vector division for data of 32 bits or - * greater, we have to convert npyv_u8 into 4x npyv_u32, execute the integer - * vector division instruction, and then, convert the result back to npyv_u8. - */ -NPY_FINLINE npyv_@t@8 -vsx4_div_@t@8(npyv_@t@8 a, npyv_@t@8 b) -{ -#if @signed@ - npyv_s16x2 ta, tb; - npyv_s32x2 ahi, alo, bhi, blo; - ta.val[0] = vec_unpackh(a); - ta.val[1] = vec_unpackl(a); - tb.val[0] = vec_unpackh(b); - tb.val[1] = vec_unpackl(b); - ahi.val[0] = vec_unpackh(ta.val[0]); - ahi.val[1] = vec_unpackl(ta.val[0]); - alo.val[0] = vec_unpackh(ta.val[1]); - alo.val[1] = vec_unpackl(ta.val[1]); - bhi.val[0] = vec_unpackh(tb.val[0]); - bhi.val[1] = vec_unpackl(tb.val[0]); - blo.val[0] = vec_unpackh(tb.val[1]); - blo.val[1] = vec_unpackl(tb.val[1]); -#else - npyv_u16x2 a_expand = npyv_expand_u16_u8(a); - npyv_u16x2 b_expand = npyv_expand_u16_u8(b); - npyv_u32x2 ahi = npyv_expand_u32_u16(a_expand.val[0]); - npyv_u32x2 alo = npyv_expand_u32_u16(a_expand.val[1]); - npyv_u32x2 bhi = npyv_expand_u32_u16(b_expand.val[0]); - npyv_u32x2 blo = npyv_expand_u32_u16(b_expand.val[1]); -#endif - npyv_@t@32 v1 = vec_div(ahi.val[0], bhi.val[0]); - npyv_@t@32 v2 = vec_div(ahi.val[1], bhi.val[1]); - npyv_@t@32 v3 = vec_div(alo.val[0], blo.val[0]); - npyv_@t@32 v4 = vec_div(alo.val[1], blo.val[1]); - npyv_@t@16 hi = vec_pack(v1, v2); - npyv_@t@16 lo = vec_pack(v3, v4); - return vec_pack(hi, lo); -} - -NPY_FINLINE npyv_@t@16 -vsx4_div_@t@16(npyv_@t@16 a, npyv_@t@16 b) -{ -#if @signed@ - npyv_s32x2 a_expand; - npyv_s32x2 b_expand; - a_expand.val[0] = vec_unpackh(a); - a_expand.val[1] = vec_unpackl(a); - b_expand.val[0] = vec_unpackh(b); - b_expand.val[1] = vec_unpackl(b); -#else - npyv_u32x2 a_expand = npyv_expand_@t@32_@t@16(a); - npyv_u32x2 b_expand = npyv_expand_@t@32_@t@16(b); -#endif - npyv_@t@32 v1 = vec_div(a_expand.val[0], b_expand.val[0]); - npyv_@t@32 v2 = vec_div(a_expand.val[1], b_expand.val[1]); - return vec_pack(v1, v2); -} - -#define vsx4_div_@t@32 vec_div -#define vsx4_div_@t@64 vec_div -/**end repeat**/ - -/**begin repeat - * Unsigned types - * #sfx = u8, u16, u32, u64# - * #len = 8, 16, 32, 64# - */ -static inline void -vsx4_simd_divide_contig_@sfx@(char **args, npy_intp len) -{ - npyv_lanetype_@sfx@ *src1 = (npyv_lanetype_@sfx@ *) args[0]; - npyv_lanetype_@sfx@ *src2 = (npyv_lanetype_@sfx@ *) args[1]; - npyv_lanetype_@sfx@ *dst1 = (npyv_lanetype_@sfx@ *) args[2]; - const npyv_@sfx@ vzero = npyv_zero_@sfx@(); - const int vstep = npyv_nlanes_@sfx@; - - for (; len >= vstep; len -= vstep, src1 += vstep, src2 += vstep, - dst1 += vstep) { - npyv_@sfx@ a = npyv_load_@sfx@(src1); - npyv_@sfx@ b = npyv_load_@sfx@(src2); - npyv_@sfx@ c = vsx4_div_@sfx@(a, b); - npyv_store_@sfx@(dst1, c); - if (NPY_UNLIKELY(vec_any_eq(b, vzero))) { - npy_set_floatstatus_divbyzero(); - } - } - - for (; len > 0; --len, ++src1, ++src2, ++dst1) { - const npyv_lanetype_@sfx@ a = *src1; - const npyv_lanetype_@sfx@ b = *src2; - if (NPY_UNLIKELY(b == 0)) { - npy_set_floatstatus_divbyzero(); - *dst1 = 0; - } else{ - *dst1 = a / b; - } - } - npyv_cleanup(); -} -/**end repeat**/ - -/**begin repeat - * Signed types - * #sfx = s8, s16, s32, s64# - * #len = 8, 16, 32, 64# - */ -static inline void -vsx4_simd_divide_contig_@sfx@(char **args, npy_intp len) -{ - npyv_lanetype_@sfx@ *src1 = (npyv_lanetype_@sfx@ *) args[0]; - npyv_lanetype_@sfx@ *src2 = (npyv_lanetype_@sfx@ *) args[1]; - npyv_lanetype_@sfx@ *dst1 = (npyv_lanetype_@sfx@ *) args[2]; - const npyv_@sfx@ vneg_one = npyv_setall_@sfx@(-1); - const npyv_@sfx@ vzero = npyv_zero_@sfx@(); - const npyv_@sfx@ vmin = npyv_setall_@sfx@(NPY_MIN_INT@len@); - npyv_b@len@ warn_zero = npyv_cvt_b@len@_@sfx@(npyv_zero_@sfx@()); - npyv_b@len@ warn_overflow = npyv_cvt_b@len@_@sfx@(npyv_zero_@sfx@()); - const int vstep = npyv_nlanes_@sfx@; - - for (; len >= vstep; len -= vstep, src1 += vstep, src2 += vstep, - dst1 += vstep) { - npyv_@sfx@ a = npyv_load_@sfx@(src1); - npyv_@sfx@ b = npyv_load_@sfx@(src2); - npyv_@sfx@ quo = vsx4_div_@sfx@(a, b); - npyv_@sfx@ rem = npyv_sub_@sfx@(a, vec_mul(b, quo)); - // (b == 0 || (a == NPY_MIN_INT@len@ && b == -1)) - npyv_b@len@ bzero = npyv_cmpeq_@sfx@(b, vzero); - npyv_b@len@ amin = npyv_cmpeq_@sfx@(a, vmin); - npyv_b@len@ bneg_one = npyv_cmpeq_@sfx@(b, vneg_one); - npyv_b@len@ overflow = npyv_and_@sfx@(bneg_one, amin); - warn_zero = npyv_or_@sfx@(bzero, warn_zero); - warn_overflow = npyv_or_@sfx@(overflow, warn_overflow); - // handle mixed case the way Python does - // ((a > 0) == (b > 0) || rem == 0) - npyv_b@len@ a_gt_zero = npyv_cmpgt_@sfx@(a, vzero); - npyv_b@len@ b_gt_zero = npyv_cmpgt_@sfx@(b, vzero); - npyv_b@len@ ab_eq_cond = npyv_cmpeq_@sfx@(a_gt_zero, b_gt_zero); - npyv_b@len@ rem_zero = npyv_cmpeq_@sfx@(rem, vzero); - npyv_b@len@ or = npyv_or_@sfx@(ab_eq_cond, rem_zero); - npyv_@sfx@ to_sub = npyv_select_@sfx@(or, vzero, vneg_one); - quo = npyv_add_@sfx@(quo, to_sub); - // Divide by zero - quo = npyv_select_@sfx@(bzero, vzero, quo); - // Overflow - quo = npyv_select_@sfx@(overflow, vmin, quo); - npyv_store_@sfx@(dst1, quo); - } - - if (!vec_all_eq(warn_zero, vzero)) { - npy_set_floatstatus_divbyzero(); - } - if (!vec_all_eq(warn_overflow, vzero)) { - npy_set_floatstatus_overflow(); - } - - for (; len > 0; --len, ++src1, ++src2, ++dst1) { - const npyv_lanetype_@sfx@ a = *src1; - const npyv_lanetype_@sfx@ b = *src2; - if (NPY_UNLIKELY(b == 0)) { - npy_set_floatstatus_divbyzero(); - *dst1 = 0; - } else if (NPY_UNLIKELY((a == NPY_MIN_INT@len@) && (b == -1))) { - npy_set_floatstatus_overflow(); - *dst1 = NPY_MIN_INT@len@; - } else { - *dst1 = a / b; - if (((a > 0) != (b > 0)) && ((*dst1 * b) != a)) { - *dst1 -= 1; - } - } - } - npyv_cleanup(); -} -/**end repeat**/ -#endif // NPY_HAVE_VSX4 -#endif // NPY_SIMD - -/******************************************************************************** - ** Defining ufunc inner functions - ********************************************************************************/ - -/**begin repeat - * Signed types - * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong# - * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# - */ -#undef TO_SIMD_SFX -#if 0 -/**begin repeat1 - * #len = 8, 16, 32, 64# - */ -#elif NPY_BITSOF_@TYPE@ == @len@ - #define TO_SIMD_SFX(X) X##_s@len@ -/**end repeat1**/ -#endif -#if NPY_BITSOF_@TYPE@ == 64 && defined(SIMD_DISABLE_DIV64_OPT) - #undef TO_SIMD_SFX -#endif - -NPY_FINLINE @type@ floor_div_@TYPE@(const @type@ n, const @type@ d) -{ - /* - * FIXME: On x86 at least, dividing the smallest representable integer - * by -1 causes a SIFGPE (division overflow). We treat this case here - * (to avoid a SIGFPE crash at python level), but a good solution would - * be to treat integer division problems separately from FPU exceptions - * (i.e. a different approach than npy_set_floatstatus_divbyzero()). - */ - if (NPY_UNLIKELY(d == 0 || (n == NPY_MIN_@TYPE@ && d == -1))) { - if (d == 0) { - npy_set_floatstatus_divbyzero(); - return 0; - } - else { - npy_set_floatstatus_overflow(); - return NPY_MIN_@TYPE@; - } - } - @type@ r = n / d; - // Negative quotients needs to be rounded down - if (((n > 0) != (d > 0)) && ((r * d) != n)) { - r--; - } - return r; -} - -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) -{ - if (IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(@type@) { - io1 = floor_div_@TYPE@(io1, *(@type@*)ip2); - } - *((@type@ *)iop1) = io1; - } -#if NPY_SIMD && defined(TO_SIMD_SFX) -#if defined(NPY_HAVE_VSX4) - // both arguments are arrays of the same size - else if (IS_BLOCKABLE_BINARY(sizeof(@type@), NPY_SIMD_WIDTH)) { - TO_SIMD_SFX(vsx4_simd_divide_contig)(args, dimensions[0]); - } -#endif - // for contiguous block of memory, divisor is a scalar and not 0 - else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), NPY_SIMD_WIDTH) && - (*(@type@ *)args[1]) != 0) { - TO_SIMD_SFX(simd_divide_by_scalar_contig)(args, dimensions[0]); - } -#endif - else { - BINARY_LOOP { - *((@type@ *)op1) = floor_div_@TYPE@(*(@type@*)ip1, *(@type@*)ip2); - } - } -} - -NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(@TYPE@_divide_indexed) -(PyArrayMethod_Context *NPY_UNUSED(context), char * const*args, npy_intp const *dimensions, npy_intp const *steps, NpyAuxData *NPY_UNUSED(func)) -{ - char *ip1 = args[0]; - char *indxp = args[1]; - char *value = args[2]; - npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; - npy_intp shape = steps[3]; - npy_intp n = dimensions[0]; - npy_intp i; - @type@ *indexed; - for(i = 0; i < n; i++, indxp += isindex, value += isb) { - npy_intp indx = *(npy_intp *)indxp; - if (indx < 0) { - indx += shape; - } - indexed = (@type@ *)(ip1 + is1 * indx); - *indexed = floor_div_@TYPE@(*indexed, *(@type@ *)value); - } - return 0; -} - -/**end repeat**/ - -/**begin repeat - * Unsigned types - * #type = npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong# - * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG# - * #STYPE = BYTE, SHORT, INT, LONG, LONGLONG# - */ -#undef TO_SIMD_SFX -#if 0 -/**begin repeat1 - * #len = 8, 16, 32, 64# - */ -#elif NPY_BITSOF_@STYPE@ == @len@ - #define TO_SIMD_SFX(X) X##_u@len@ -/**end repeat1**/ -#endif -/* - * For 64-bit division on Armv7, Aarch64, and IBM/Power, NPYV fall-backs to the scalar division - * because emulating multiply-high on these architectures is going to be expensive comparing - * to the native scalar dividers. - * Therefore it's better to disable NPYV in this special case to avoid any unnecessary shuffles. - * Power10(VSX4) is an exception here since it has native support for integer vector division. - */ -#if NPY_BITSOF_@STYPE@ == 64 && !defined(NPY_HAVE_VSX4) && (defined(NPY_HAVE_VSX) || defined(NPY_HAVE_NEON) || defined(NPY_HAVE_LSX)) - #undef TO_SIMD_SFX -#endif -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) -{ - if (IS_BINARY_REDUCE) { - BINARY_REDUCE_LOOP(@type@) { - const @type@ d = *(@type@ *)ip2; - if (NPY_UNLIKELY(d == 0)) { - npy_set_floatstatus_divbyzero(); - io1 = 0; - } else { - io1 /= d; - } - } - *((@type@ *)iop1) = io1; - } -#if NPY_SIMD && defined(TO_SIMD_SFX) -#if defined(NPY_HAVE_VSX4) - // both arguments are arrays of the same size - else if (IS_BLOCKABLE_BINARY(sizeof(@type@), NPY_SIMD_WIDTH)) { - TO_SIMD_SFX(vsx4_simd_divide_contig)(args, dimensions[0]); - } -#endif - // for contiguous block of memory, divisor is a scalar and not 0 - else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), NPY_SIMD_WIDTH) && - (*(@type@ *)args[1]) != 0) { - TO_SIMD_SFX(simd_divide_by_scalar_contig)(args, dimensions[0]); - } -#endif - else { - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - if (NPY_UNLIKELY(in2 == 0)) { - npy_set_floatstatus_divbyzero(); - *((@type@ *)op1) = 0; - } else{ - *((@type@ *)op1) = in1 / in2; - } - } - } -} - -NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(@TYPE@_divide_indexed) -(PyArrayMethod_Context *NPY_UNUSED(context), char * const*args, npy_intp const *dimensions, npy_intp const *steps, NpyAuxData *NPY_UNUSED(func)) -{ - char *ip1 = args[0]; - char *indxp = args[1]; - char *value = args[2]; - npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; - npy_intp shape = steps[3]; - npy_intp n = dimensions[0]; - npy_intp i; - @type@ *indexed; - for(i = 0; i < n; i++, indxp += isindex, value += isb) { - npy_intp indx = *(npy_intp *)indxp; - if (indx < 0) { - indx += shape; - } - indexed = (@type@ *)(ip1 + is1 * indx); - @type@ in2 = *(@type@ *)value; - if (NPY_UNLIKELY(in2 == 0)) { - npy_set_floatstatus_divbyzero(); - *indexed = 0; - } else { - *indexed = *indexed / in2; - } - } - return 0; -} - -/**end repeat**/ diff --git a/numpy/_core/src/umath/loops_arithmetic.dispatch.cpp.src b/numpy/_core/src/umath/loops_arithmetic.dispatch.cpp.src new file mode 100644 index 000000000000..a4ec1e85233b --- /dev/null +++ b/numpy/_core/src/umath/loops_arithmetic.dispatch.cpp.src @@ -0,0 +1,341 @@ +#include "loops_utils.h" +#include "loops.h" +#include "lowlevel_strided_loops.h" +// Provides the various *_LOOP macros +#include "fast_loop_macros.h" + +#include +#include "simd/simd.hpp" +using namespace np::simd; + +//############################################################################### +//## Division +//############################################################################### +/******************************************************************************** + ** Defining the SIMD kernels + * + * Floor division of signed is based on T. Granlund and P. L. Montgomery + * "Division by invariant integers using multiplication(see [Figure 6.1] + * https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.2556)" + * For details on TRUNC division see simd/intdiv.h for more clarification + *********************************************************************************** + ** Figure 6.1: Signed division by run-time invariant divisor, rounded towards -INF + *********************************************************************************** + * For q = FLOOR(a/d), all sword: + * sword -dsign = SRL(d, N - 1); + * uword -nsign = (n < -dsign); + * uword -qsign = EOR(-nsign, -dsign); + * q = TRUNC((n - (-dsign ) + (-nsign))/d) - (-qsign); + ********************************************************************************/ + +#if (defined(HWY_ARCH_VSX) && !defined(HWY_ARCH_VSX4)) || defined(HWY_ARCH_NEON) || defined(HWY_ARCH_LSX) + // Due to integer 128-bit multiplication emulation, SIMD 64-bit division + // may not perform well on both neon and up to VSX3 compared to scalar + // division. + #define SIMD_DISABLE_DIV64_OPT +#endif + +#if NPY_HWY +/**begin repeat + * Signed types + * #sfx = s8, s16, s32, s64# + * #len = 8, 16, 32, 64# + */ +#if @len@ < 64 || (@len@ == 64 && !defined(SIMD_DISABLE_DIV64_OPT)) +static inline void +simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) +{ + using T = int@len@_t; + T *src = (T *) args[0]; + T scalar = *(T *) args[1]; + T *dst = (T *) args[2]; + const int vstep = Lanes(); + + if (scalar == 0) { + std::fill(dst, dst + len, 0); + npy_set_floatstatus_divbyzero(); + } + else if (scalar == 1) { + if (src != dst) { + std::copy(src, src + len, dst); + } + } + else if (scalar == -1) { + bool raise_err = false; + auto vec_min = Set(std::numeric_limits::min()); + for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { + auto vec_src = LoadU(src); + auto is_min = Eq(vec_src, vec_min); + auto vec_res = hn::IfThenElse(is_min, vec_min, hn::Neg(vec_src)); + StoreU(vec_res, dst); + if (!raise_err && !hn::AllFalse(_Tag(), is_min)) { + raise_err = true; + } + } + + for (; len > 0; --len, ++src, ++dst) { + T val = *src; + if (val == std::numeric_limits::min()) { + *dst = std::numeric_limits::min(); + raise_err = true; + } else { + *dst = -val; + } + } + if (raise_err) { + npy_set_floatstatus_overflow(); + } + } + else { + auto vec_scalar = Set(scalar); + for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { + auto vec_src = LoadU(src); + auto vec_div = Div(vec_src, vec_scalar); + auto vec_mul = Mul(vec_div, vec_scalar); + auto eq_mask = Eq(vec_src, vec_mul); + auto diff_signs = hn::IsNegative(Xor(vec_src, vec_scalar)); + auto adjust = AndNot(eq_mask, diff_signs); + + vec_div = hn::MaskedSubOr(vec_div, adjust, vec_div, Set(1)); + StoreU(vec_div, dst); + } + + for (; len > 0; --len, ++src, ++dst) { + const T a = *src; + T r = a / scalar; + // Negative quotients needs to be rounded down + if (((a > 0) != (scalar > 0)) && ((r * scalar) != a)) { + r--; + } + *dst = r; + } + } +} +#endif +/**end repeat**/ +#endif // NPY_SIMD + +/**begin repeat + * Unsigned types + * #sfx = u8, u16, u32, u64# + * #len = 8, 16, 32, 64# + */ +#if @len@ < 64 || (@len@ == 64 && !defined(SIMD_DISABLE_DIV64_OPT)) +static inline void +simd_divide_by_scalar_contig_@sfx@(char **args, npy_intp len) +{ + using T = uint@len@_t; + T *src = (T *) args[0]; + T scalar = *(T *) args[1]; + T *dst = (T *) args[2]; + const int vstep = Lanes(); + + if (scalar == 0) { + std::fill(dst, dst + len, 0); + npy_set_floatstatus_divbyzero(); + } + else if (scalar == 1) { + if (src != dst) { + std::copy(src, src + len, dst); + } + } + else { + for (; len >= vstep; len -= vstep, src += vstep, dst += vstep) { + StoreU(Div(LoadU(src), Set(scalar)), dst); + } + + for (; len > 0; --len, ++src, ++dst) { + const T a = *src; + *dst = a / scalar; + } + } +} +#endif +/**end repeat**/ + +/******************************************************************************** + ** Defining ufunc inner functions + ********************************************************************************/ + +/**begin repeat + * Signed types + * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong# + * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# + */ +#undef TO_SIMD_SFX +#if 0 +/**begin repeat1 + * #len = 8, 16, 32, 64# + */ +#elif NPY_BITSOF_@TYPE@ == @len@ + #define TO_SIMD_SFX(X) X##_s@len@ +/**end repeat1**/ +#endif +#if NPY_BITSOF_@TYPE@ == 64 && defined(SIMD_DISABLE_DIV64_OPT) + #undef TO_SIMD_SFX +#endif + +NPY_FINLINE @type@ floor_div_@TYPE@(const @type@ n, const @type@ d) +{ + /* + * FIXME: On x86 at least, dividing the smallest representable integer + * by -1 causes a SIFGPE (division overflow). We treat this case here + * (to avoid a SIGFPE crash at python level), but a good solution would + * be to treat integer division problems separately from FPU exceptions + * (i.e. a different approach than npy_set_floatstatus_divbyzero()). + */ + if (NPY_UNLIKELY(d == 0 || (n == NPY_MIN_@TYPE@ && d == -1))) { + if (d == 0) { + npy_set_floatstatus_divbyzero(); + return 0; + } + else { + npy_set_floatstatus_overflow(); + return NPY_MIN_@TYPE@; + } + } + @type@ r = n / d; + // Negative quotients needs to be rounded down + if (((n > 0) != (d > 0)) && ((r * d) != n)) { + r--; + } + return r; +} + +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + if (IS_BINARY_REDUCE) { + BINARY_REDUCE_LOOP(@type@) { + io1 = floor_div_@TYPE@(io1, *(@type@*)ip2); + } + *((@type@ *)iop1) = io1; + } +#if NPY_HWY && defined(TO_SIMD_SFX) + // for contiguous block of memory, divisor is a scalar and not 0 + else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), NPY_SIMD_WIDTH) && + (*(@type@ *)args[1]) != 0) { + TO_SIMD_SFX(simd_divide_by_scalar_contig)(args, dimensions[0]); + } +#endif + else { + BINARY_LOOP { + *((@type@ *)op1) = floor_div_@TYPE@(*(@type@*)ip1, *(@type@*)ip2); + } + } +} + +NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(@TYPE@_divide_indexed) +(PyArrayMethod_Context *NPY_UNUSED(context), char * const*args, npy_intp const *dimensions, npy_intp const *steps, NpyAuxData *NPY_UNUSED(func)) +{ + char *ip1 = args[0]; + char *indxp = args[1]; + char *value = args[2]; + npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; + npy_intp n = dimensions[0]; + npy_intp i; + @type@ *indexed; + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (@type@ *)(ip1 + is1 * indx); + *indexed = floor_div_@TYPE@(*indexed, *(@type@ *)value); + } + return 0; +} + +/**end repeat**/ + +/**begin repeat + * Unsigned types + * #type = npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong# + * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG# + * #STYPE = BYTE, SHORT, INT, LONG, LONGLONG# + */ +#undef TO_SIMD_SFX +#if 0 +/**begin repeat1 + * #len = 8, 16, 32, 64# + */ +#elif NPY_BITSOF_@STYPE@ == @len@ + #define TO_SIMD_SFX(X) X##_u@len@ +/**end repeat1**/ +#endif +/* + * For 64-bit division on Armv7, Aarch64, and IBM/Power, NPYV fall-backs to the scalar division + * because emulating multiply-high on these architectures is going to be expensive comparing + * to the native scalar dividers. + * Therefore it's better to disable NPYV in this special case to avoid any unnecessary shuffles. + * Power10(VSX4) is an exception here since it has native support for integer vector division. + */ +#if NPY_BITSOF_@STYPE@ == 64 && !defined(HWY_ARCH_VSX4) && (defined(HWY_ARCH_VSX) || defined(HWY_ARCH_NEON) || defined(HWY_ARCH_LSX)) + #undef TO_SIMD_SFX +#endif +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_divide) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + if (IS_BINARY_REDUCE) { + BINARY_REDUCE_LOOP(@type@) { + const @type@ d = *(@type@ *)ip2; + if (NPY_UNLIKELY(d == 0)) { + npy_set_floatstatus_divbyzero(); + io1 = 0; + } else { + io1 /= d; + } + } + *((@type@ *)iop1) = io1; + } +#if NPY_HWY && defined(TO_SIMD_SFX) + // for contiguous block of memory, divisor is a scalar and not 0 + else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), NPY_SIMD_WIDTH) && + (*(@type@ *)args[1]) != 0) { + TO_SIMD_SFX(simd_divide_by_scalar_contig)(args, dimensions[0]); + } +#endif + else { + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + if (NPY_UNLIKELY(in2 == 0)) { + npy_set_floatstatus_divbyzero(); + *((@type@ *)op1) = 0; + } else{ + *((@type@ *)op1) = in1 / in2; + } + } + } +} + +NPY_NO_EXPORT int NPY_CPU_DISPATCH_CURFX(@TYPE@_divide_indexed) +(PyArrayMethod_Context *NPY_UNUSED(context), char * const*args, npy_intp const *dimensions, npy_intp const *steps, NpyAuxData *NPY_UNUSED(func)) +{ + char *ip1 = args[0]; + char *indxp = args[1]; + char *value = args[2]; + npy_intp is1 = steps[0], isindex = steps[1], isb = steps[2]; + npy_intp shape = steps[3]; + npy_intp n = dimensions[0]; + npy_intp i; + @type@ *indexed; + for(i = 0; i < n; i++, indxp += isindex, value += isb) { + npy_intp indx = *(npy_intp *)indxp; + if (indx < 0) { + indx += shape; + } + indexed = (@type@ *)(ip1 + is1 * indx); + @type@ in2 = *(@type@ *)value; + if (NPY_UNLIKELY(in2 == 0)) { + npy_set_floatstatus_divbyzero(); + *indexed = 0; + } else { + *indexed = *indexed / in2; + } + } + return 0; +} + +/**end repeat**/ diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index d9be7b1d6826..02c4fde56bf2 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -596,7 +596,7 @@ NPY_NO_EXPORT void * Use transpose equivalence: * matmul(a, b, o) == matmul(b.T, a.T, o.T) */ - if (o_f_blasable) { + if (o_transpose) { @TYPE@_matmul_matrixmatrix( ip2_, is2_p_, is2_n_, ip1_, is1_n_, is1_m_, diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 554f9ece5197..dafedcbc03ff 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -297,6 +297,18 @@ struct Buffer { return num_codepoints; } + inline size_t + buffer_width() + { + switch (enc) { + case ENCODING::ASCII: + case ENCODING::UTF8: + return after - buf; + case ENCODING::UTF32: + return (after - buf) / sizeof(npy_ucs4); + } + } + inline Buffer& operator+=(npy_int64 rhs) { diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 5b4b67cda625..95f30ccb109e 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -15,6 +15,7 @@ #include "dtypemeta.h" #include "convert_datatype.h" #include "gil_utils.h" +#include "templ_common.h" /* for npy_mul_size_with_overflow_size_t */ #include "string_ufuncs.h" #include "string_fastsearch.h" @@ -166,26 +167,44 @@ string_add(Buffer buf1, Buffer buf2, Buffer out) template -static inline void +static inline int string_multiply(Buffer buf1, npy_int64 reps, Buffer out) { size_t len1 = buf1.num_codepoints(); if (reps < 1 || len1 == 0) { out.buffer_fill_with_zeros_after_index(0); - return; + return 0; } if (len1 == 1) { out.buffer_memset(*buf1, reps); out.buffer_fill_with_zeros_after_index(reps); + return 0; } - else { - for (npy_int64 i = 0; i < reps; i++) { - buf1.buffer_memcpy(out, len1); - out += len1; - } - out.buffer_fill_with_zeros_after_index(0); + + size_t newlen; + if (NPY_UNLIKELY(npy_mul_with_overflow_size_t(&newlen, reps, len1) != 0) || newlen > PY_SSIZE_T_MAX) { + return -1; + } + + size_t pad = 0; + size_t width = out.buffer_width(); + if (width < newlen) { + reps = width / len1; + pad = width % len1; } + + for (npy_int64 i = 0; i < reps; i++) { + buf1.buffer_memcpy(out, len1); + out += len1; + } + + buf1.buffer_memcpy(out, pad); + out += pad; + + out.buffer_fill_with_zeros_after_index(0); + + return 0; } @@ -238,7 +257,9 @@ string_multiply_strint_loop(PyArrayMethod_Context *context, while (N--) { Buffer buf(in1, elsize); Buffer outbuf(out, outsize); - string_multiply(buf, *(npy_int64 *)in2, outbuf); + if (NPY_UNLIKELY(string_multiply(buf, *(npy_int64 *)in2, outbuf) < 0)) { + npy_gil_error(PyExc_OverflowError, "Overflow detected in string multiply"); + } in1 += strides[0]; in2 += strides[1]; @@ -267,7 +288,9 @@ string_multiply_intstr_loop(PyArrayMethod_Context *context, while (N--) { Buffer buf(in2, elsize); Buffer outbuf(out, outsize); - string_multiply(buf, *(npy_int64 *)in1, outbuf); + if (NPY_UNLIKELY(string_multiply(buf, *(npy_int64 *)in1, outbuf) < 0)) { + npy_gil_error(PyExc_OverflowError, "Overflow detected in string multiply"); + } in1 += strides[0]; in2 += strides[1]; @@ -752,10 +775,11 @@ string_multiply_resolve_descriptors( if (given_descrs[2] == NULL) { PyErr_SetString( PyExc_TypeError, - "The 'out' kwarg is necessary. Use numpy.strings.multiply without it."); + "The 'out' kwarg is necessary when using the string multiply ufunc " + "directly. Use numpy.strings.multiply to multiply strings without " + "specifying 'out'."); return _NPY_ERROR_OCCURRED_IN_CAST; } - loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); if (loop_descrs[0] == NULL) { return _NPY_ERROR_OCCURRED_IN_CAST; @@ -1521,7 +1545,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = NPY_OBJECT; dtypes[1] = NPY_BOOL; - const char *unary_buffer_method_names[] = { + const char *const unary_buffer_method_names[] = { "isalpha", "isalnum", "isdigit", "isspace", "islower", "isupper", "istitle", "isdecimal", "isnumeric", }; @@ -1635,7 +1659,7 @@ init_string_ufuncs(PyObject *umath) dtypes[2] = dtypes[3] = NPY_INT64; dtypes[4] = NPY_BOOL; - const char *startswith_endswith_names[] = { + const char *const startswith_endswith_names[] = { "startswith", "endswith" }; @@ -1664,7 +1688,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = NPY_OBJECT; - const char *strip_whitespace_names[] = { + const char *const strip_whitespace_names[] = { "_lstrip_whitespace", "_rstrip_whitespace", "_strip_whitespace" }; @@ -1691,7 +1715,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = dtypes[2] = NPY_OBJECT; - const char *strip_chars_names[] = { + const char *const strip_chars_names[] = { "_lstrip_chars", "_rstrip_chars", "_strip_chars" }; @@ -1750,7 +1774,7 @@ init_string_ufuncs(PyObject *umath) dtypes[1] = NPY_INT64; - const char *center_ljust_rjust_names[] = { + const char *const center_ljust_rjust_names[] = { "_center", "_ljust", "_rjust" }; @@ -1827,7 +1851,7 @@ init_string_ufuncs(PyObject *umath) dtypes[0] = dtypes[1] = dtypes[3] = dtypes[4] = dtypes[5] = NPY_OBJECT; dtypes[2] = NPY_INT64; - const char *partition_names[] = {"_partition_index", "_rpartition_index"}; + const char *const partition_names[] = {"_partition_index", "_rpartition_index"}; static STARTPOSITION partition_startpositions[] = { STARTPOSITION::FRONT, STARTPOSITION::BACK diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 37ae0a39a349..b0181d4186c9 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -137,9 +137,9 @@ static int multiply_loop_core( size_t newsize; int overflowed = npy_mul_with_overflow_size_t( &newsize, cursize, factor); - if (overflowed) { - npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in string multiply"); + if (overflowed || newsize > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, + "Overflow encountered in string multiply"); goto fail; } @@ -1748,9 +1748,9 @@ center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, width - num_codepoints); newsize += s1.size; - if (overflowed) { - npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in %s", ufunc_name); + if (overflowed || newsize > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, + "Overflow encountered in %s", ufunc_name); goto fail; } @@ -2605,7 +2605,7 @@ add_object_and_unicode_promoters(PyObject *umath, const char* ufunc_name, NPY_NO_EXPORT int init_stringdtype_ufuncs(PyObject *umath) { - static const char *comparison_ufunc_names[6] = { + static const char *const comparison_ufunc_names[6] = { "equal", "not_equal", "less", "less_equal", "greater_equal", "greater", }; @@ -2654,7 +2654,7 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - const char *unary_loop_names[] = { + const char *const unary_loop_names[] = { "isalpha", "isdecimal", "isdigit", "isnumeric", "isspace", "isalnum", "istitle", "isupper", "islower", }; @@ -2874,7 +2874,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType, &PyArray_StringDType }; - const char *strip_whitespace_names[] = { + const char *const strip_whitespace_names[] = { "_lstrip_whitespace", "_rstrip_whitespace", "_strip_whitespace", }; @@ -2898,7 +2898,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType, &PyArray_StringDType, &PyArray_StringDType }; - const char *strip_chars_names[] = { + const char *const strip_chars_names[] = { "_lstrip_chars", "_rstrip_chars", "_strip_chars", }; @@ -3082,7 +3082,7 @@ init_stringdtype_ufuncs(PyObject *umath) &PyArray_StringDType }; - const char *partition_names[] = {"_partition", "_rpartition"}; + const char *const partition_names[] = {"_partition", "_rpartition"}; static STARTPOSITION partition_startpositions[] = { STARTPOSITION::FRONT, STARTPOSITION::BACK diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index cd6d1ec439f1..b4dc1656024f 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -3,51 +3,74 @@ operations. """ -import sys import functools +import sys + import numpy as np from numpy import ( - equal, not_equal, less, less_equal, greater, greater_equal, - add, multiply as _multiply_ufunc, + add, + equal, + greater, + greater_equal, + less, + less_equal, + not_equal, +) +from numpy import ( + multiply as _multiply_ufunc, ) from numpy._core.multiarray import _vec_string -from numpy._core.overrides import set_module, array_function_dispatch +from numpy._core.overrides import array_function_dispatch, set_module from numpy._core.umath import ( + _center, + _expandtabs, + _expandtabs_length, + _ljust, + _lstrip_chars, + _lstrip_whitespace, + _partition, + _partition_index, + _replace, + _rjust, + _rpartition, + _rpartition_index, + _rstrip_chars, + _rstrip_whitespace, + _slice, + _strip_chars, + _strip_whitespace, + _zfill, + isalnum, isalpha, + isdecimal, isdigit, - isspace, - isalnum, islower, - isupper, - istitle, - isdecimal, isnumeric, + isspace, + istitle, + isupper, str_len, +) +from numpy._core.umath import ( + count as _count_ufunc, +) +from numpy._core.umath import ( + endswith as _endswith_ufunc, +) +from numpy._core.umath import ( find as _find_ufunc, - rfind as _rfind_ufunc, +) +from numpy._core.umath import ( index as _index_ufunc, +) +from numpy._core.umath import ( + rfind as _rfind_ufunc, +) +from numpy._core.umath import ( rindex as _rindex_ufunc, - count as _count_ufunc, +) +from numpy._core.umath import ( startswith as _startswith_ufunc, - endswith as _endswith_ufunc, - _lstrip_whitespace, - _lstrip_chars, - _rstrip_whitespace, - _rstrip_chars, - _strip_whitespace, - _strip_chars, - _replace, - _expandtabs_length, - _expandtabs, - _center, - _ljust, - _rjust, - _zfill, - _partition, - _partition_index, - _rpartition, - _rpartition_index, - _slice, ) @@ -195,7 +218,7 @@ def multiply(a, i): # Ensure we can do a_len * i without overflow. if np.any(a_len > sys.maxsize / np.maximum(i, 1)): - raise MemoryError("repeated string is too long") + raise OverflowError("Overflow encountered in string multiply") buffersizes = a_len * i out_dtype = f"{a.dtype.char}{buffersizes.max()}" diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index a1ed1ff2b9a5..b187ce71d25c 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,16 +1,12 @@ -from typing import Any, overload, TypeAlias +from typing import TypeAlias, overload import numpy as np -from numpy._typing import ( - NDArray, - _ArrayLikeStr_co as U_co, - _ArrayLikeBytes_co as S_co, - _ArrayLikeInt_co as i_co, - _ArrayLikeString_co as T_co, - _ArrayLikeAnyString_co as UST_co, - _Shape, - _SupportsArray, -) +from numpy._typing import NDArray, _AnyShape, _SupportsArray +from numpy._typing import _ArrayLikeAnyString_co as UST_co +from numpy._typing import _ArrayLikeBytes_co as S_co +from numpy._typing import _ArrayLikeInt_co as i_co +from numpy._typing import _ArrayLikeStr_co as U_co +from numpy._typing import _ArrayLikeString_co as T_co __all__ = [ "add", @@ -58,11 +54,12 @@ __all__ = [ "translate", "upper", "zfill", + "slice", ] -_StringDTypeArray: TypeAlias = np.ndarray[_Shape, np.dtypes.StringDType] +_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] _StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_Shape, np.dtype[np.str_]] | _StringDTypeArray +_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_AnyShape, np.dtype[np.str_]] | _StringDTypeArray @overload def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @@ -498,3 +495,17 @@ def translate( table: str, deletechars: str | None = None, ) -> _StringDTypeOrUnicodeArray: ... + +# +@overload +def slice(a: U_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.str_]: ... # type: ignore[overload-overlap] +@overload +def slice(a: S_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.bytes_]: ... +@overload +def slice( + a: _StringDTypeSupportsArray, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / +) -> _StringDTypeArray: ... +@overload +def slice( + a: T_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / +) -> _StringDTypeOrUnicodeArray: ... diff --git a/numpy/_core/tests/_locales.py b/numpy/_core/tests/_locales.py index 2244e0abda71..debda9639c03 100644 --- a/numpy/_core/tests/_locales.py +++ b/numpy/_core/tests/_locales.py @@ -1,8 +1,8 @@ """Provide class for testing in French locale """ -import sys import locale +import sys import pytest diff --git a/numpy/_core/tests/_natype.py b/numpy/_core/tests/_natype.py index ae241c391c07..1c2175b35933 100644 --- a/numpy/_core/tests/_natype.py +++ b/numpy/_core/tests/_natype.py @@ -8,6 +8,7 @@ import numpy as np + def _create_binary_propagating_op(name, is_divmod=False): is_cmp = name.strip("_") in ["eq", "ne", "le", "lt", "ge", "gt"] diff --git a/numpy/_core/tests/examples/cython/setup.py b/numpy/_core/tests/examples/cython/setup.py index 1bf027700748..eb57477fc2a1 100644 --- a/numpy/_core/tests/examples/cython/setup.py +++ b/numpy/_core/tests/examples/cython/setup.py @@ -3,13 +3,15 @@ for testing. """ -import Cython -import numpy as np -from numpy._utils import _pep440 +import os from distutils.core import setup + +import Cython from Cython.Build import cythonize from setuptools.extension import Extension -import os + +import numpy as np +from numpy._utils import _pep440 macros = [ ("NPY_NO_DEPRECATED_API", 0), diff --git a/numpy/_core/tests/examples/limited_api/setup.py b/numpy/_core/tests/examples/limited_api/setup.py index 18747dc80896..16adcd12327d 100644 --- a/numpy/_core/tests/examples/limited_api/setup.py +++ b/numpy/_core/tests/examples/limited_api/setup.py @@ -2,10 +2,12 @@ Build an example package using the limited Python C API. """ -import numpy as np -from setuptools import setup, Extension import os +from setuptools import Extension, setup + +import numpy as np + macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")] limited_api = Extension( diff --git a/numpy/_core/tests/test__exceptions.py b/numpy/_core/tests/test__exceptions.py index 1b191ad382b7..35782e7a5878 100644 --- a/numpy/_core/tests/test__exceptions.py +++ b/numpy/_core/tests/test__exceptions.py @@ -5,6 +5,7 @@ import pickle import pytest + import numpy as np from numpy.exceptions import AxisError diff --git a/numpy/_core/tests/test_abc.py b/numpy/_core/tests/test_abc.py index f7ab6b635881..aee1904f1727 100644 --- a/numpy/_core/tests/test_abc.py +++ b/numpy/_core/tests/test_abc.py @@ -1,9 +1,9 @@ -from numpy.testing import assert_ - import numbers import numpy as np from numpy._core.numerictypes import sctypes +from numpy.testing import assert_ + class TestABC: def test_abstract(self): diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 8d7c617898e6..d427ac0399a2 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -1,13 +1,18 @@ import sys +import pytest + import numpy as np import numpy._core.umath as ncu from numpy._core._rational_tests import rational -import pytest from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, assert_warns, - HAS_REFCOUNT - ) + HAS_REFCOUNT, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_warns, +) def test_array_array(): diff --git a/numpy/_core/tests/test_argparse.py b/numpy/_core/tests/test_argparse.py index ededced3b9fe..0c49ec00277e 100644 --- a/numpy/_core/tests/test_argparse.py +++ b/numpy/_core/tests/test_argparse.py @@ -18,6 +18,8 @@ def func(arg1, /, arg2, *, arg3): import numpy as np from numpy._core._multiarray_tests import ( argparse_example_function as func, +) +from numpy._core._multiarray_tests import ( threaded_argparse_example_function as thread_func, ) from numpy.testing import IS_WASM diff --git a/numpy/_core/tests/test_array_api_info.py b/numpy/_core/tests/test_array_api_info.py index cccf5d346c8b..4842dbfa9486 100644 --- a/numpy/_core/tests/test_array_api_info.py +++ b/numpy/_core/tests/test_array_api_info.py @@ -1,6 +1,7 @@ -import numpy as np import pytest +import numpy as np + info = np.__array_namespace_info__() diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 9301f3fd92c8..a3939daa8904 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -12,10 +12,7 @@ import numpy as np import numpy._core._multiarray_umath as ncu from numpy._core._rational_tests import rational - -from numpy.testing import ( - assert_array_equal, assert_warns, IS_PYPY, IS_64BIT -) +from numpy.testing import IS_64BIT, IS_PYPY, assert_array_equal def arraylikes(): @@ -325,18 +322,18 @@ def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): cast = np.array(scalar).astype(dtype) except (TypeError, ValueError, RuntimeError): # coercion should also raise (error type may change) - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 np.array(scalar, dtype=dtype) if (isinstance(scalar, rational) and np.issubdtype(dtype, np.signedinteger)): return - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 np.array([scalar], dtype=dtype) # assignment should also raise res = np.zeros((), dtype=dtype) - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 res[()] = scalar return @@ -848,7 +845,7 @@ class TestSpecialAttributeLookupFailure: class WeirdArrayLike: @property - def __array__(self, dtype=None, copy=None): + def __array__(self, dtype=None, copy=None): # noqa: PLR0206 raise RuntimeError("oops!") class WeirdArrayInterface: diff --git a/numpy/_core/tests/test_array_interface.py b/numpy/_core/tests/test_array_interface.py index ed56f7e79daf..afb19f4e280f 100644 --- a/numpy/_core/tests/test_array_interface.py +++ b/numpy/_core/tests/test_array_interface.py @@ -1,8 +1,10 @@ import sys +import sysconfig + import pytest + import numpy as np -from numpy.testing import extbuild, IS_WASM, IS_EDITABLE -import sysconfig +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild @pytest.fixture diff --git a/numpy/_core/tests/test_arraymethod.py b/numpy/_core/tests/test_arraymethod.py index e490ebc9c32b..5b3d51585718 100644 --- a/numpy/_core/tests/test_arraymethod.py +++ b/numpy/_core/tests/test_arraymethod.py @@ -3,8 +3,6 @@ this is private API, but when added, public API may be added here. """ -from __future__ import annotations - import types from typing import Any diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 09ed71f342a2..1fd4ac2fddb7 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -1,17 +1,24 @@ -import sys import gc +import sys +import textwrap + +import pytest from hypothesis import given from hypothesis.extra import numpy as hynp -import pytest import numpy as np +from numpy._core.arrayprint import _typelessdata from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT, - assert_raises_regex, IS_WASM - ) + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) from numpy.testing._private.utils import run_threaded -from numpy._core.arrayprint import _typelessdata -import textwrap + class TestArrayRepr: def test_nan_inf(self): @@ -326,7 +333,8 @@ def test_unstructured_void_repr(self): r"np.void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") assert_equal(repr(a), - r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n" + r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," + "\n" r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')") assert_equal(eval(repr(a), vars(np)), a) diff --git a/numpy/_core/tests/test_casting_floatingpoint_errors.py b/numpy/_core/tests/test_casting_floatingpoint_errors.py index 999ab40ead8b..2f9c01f907c4 100644 --- a/numpy/_core/tests/test_casting_floatingpoint_errors.py +++ b/numpy/_core/tests/test_casting_floatingpoint_errors.py @@ -1,7 +1,8 @@ import pytest from pytest import param -from numpy.testing import IS_WASM + import numpy as np +from numpy.testing import IS_WASM def values_and_dtypes(): diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index b3a5452d0d37..91ecc0dc75b0 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -6,18 +6,17 @@ than integration tests. """ -import pytest -import textwrap +import ctypes import enum import random -import ctypes +import textwrap + +import pytest import numpy as np +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl from numpy.lib.stride_tricks import as_strided - from numpy.testing import assert_array_equal -from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl - # Simple skips object, parametric and long double (unsupported by struct) simple_dtypes = "?bhilqBHILQefdFD" @@ -459,7 +458,7 @@ def test_time_to_time(self, from_dt, to_dt, orig_arr = values.view(from_dt) orig_out = np.empty_like(expected_out) - if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"): + if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"): # noqa: PLR1714 # Casting from non-generic to generic units is an error and should # probably be reported as an invalid cast earlier. with pytest.raises(ValueError): diff --git a/numpy/_core/tests/test_conversion_utils.py b/numpy/_core/tests/test_conversion_utils.py index 5770887f8780..d63ca9e58df5 100644 --- a/numpy/_core/tests/test_conversion_utils.py +++ b/numpy/_core/tests/test_conversion_utils.py @@ -2,13 +2,11 @@ Tests for numpy/_core/src/multiarray/conversion_utils.c """ import re -import sys import pytest -import numpy as np import numpy._core._multiarray_tests as mt -from numpy._core.multiarray import CLIP, WRAP, RAISE +from numpy._core.multiarray import CLIP, RAISE, WRAP from numpy.testing import assert_raises diff --git a/numpy/_core/tests/test_cpu_dispatcher.py b/numpy/_core/tests/test_cpu_dispatcher.py index c52cd418a08b..fc9d5e3147e0 100644 --- a/numpy/_core/tests/test_cpu_dispatcher.py +++ b/numpy/_core/tests/test_cpu_dispatcher.py @@ -1,9 +1,12 @@ +from numpy._core import _umath_tests from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, ) -from numpy._core import _umath_tests from numpy.testing import assert_equal + def test_dispatcher(): """ Testing the utilities of the CPU dispatcher diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index f4bd02ab55e5..ecc806e9c0e5 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -1,16 +1,18 @@ import os -import re -import sys import pathlib import platform +import re import subprocess +import sys + import pytest + from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__, + __cpu_features__, ) -import numpy as np + def assert_features_equal(actual, desired, fname): __tracebackhide__ = True # Hide traceback for py.test @@ -156,7 +158,6 @@ def setup_class(self, tmp_path_factory): file /= "_runtime_detect.py" file.write_text(self.SCRIPT) self.file = file - return def _run(self): return subprocess.run( @@ -189,7 +190,6 @@ def _expect_error( def setup_method(self): """Ensure that the environment is reset""" self.env = os.environ.copy() - return def test_runtime_feature_selection(self): """ @@ -228,7 +228,6 @@ def test_runtime_feature_selection(self): # Ensure that both features are enabled, and they are exactly the ones # specified by `NPY_ENABLE_CPU_FEATURES` assert set(enabled_features) == set(non_baseline_features) - return @pytest.mark.parametrize("enabled, disabled", [ diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index ee3a02b612bf..3336286d8c98 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -3,10 +3,11 @@ import pytest import numpy as np -from numpy.testing import assert_array_equal from numpy._core._multiarray_umath import ( - _discover_array_parameters as discover_array_params, _get_sfloat_dtype) - + _discover_array_parameters as discover_array_params, +) +from numpy._core._multiarray_umath import _get_sfloat_dtype +from numpy.testing import assert_array_equal SF = _get_sfloat_dtype() diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index fda70b9ac79c..2c7b40c5614c 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -1,12 +1,13 @@ -from datetime import datetime import os import subprocess import sys -import pytest import sysconfig +from datetime import datetime + +import pytest import numpy as np -from numpy.testing import assert_array_equal, IS_WASM, IS_EDITABLE +from numpy.testing import IS_EDITABLE, IS_WASM, assert_array_equal # This import is copied from random.tests.test_extending try: @@ -344,7 +345,8 @@ def test_npystring_allocators_other_dtype(install_temp): assert checks.npystring_allocators_other_types(arr1, arr2) == 0 -@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='no checks module on win-arm64') +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='no checks module on win-arm64') def test_npy_uintp_type_enum(): import checks assert checks.check_npy_uintp_type_enum() diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 8d48e8a6630a..81ac9778971b 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -1,5 +1,6 @@ import datetime import pickle +from zoneinfo import ZoneInfo, ZoneInfoNotFoundError import pytest @@ -7,22 +8,25 @@ import numpy as np from numpy.testing import ( IS_WASM, - assert_, assert_equal, assert_raises, assert_warns, suppress_warnings, - assert_raises_regex, assert_array_equal, - ) - -# Use pytz to test out various time zones if available -try: - from pytz import timezone as tz - _has_pytz = True -except ImportError: - _has_pytz = False + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, + suppress_warnings, +) try: RecursionError except NameError: RecursionError = RuntimeError # python < 3.5 +try: + ZoneInfo("US/Central") + _has_tz = True +except ZoneInfoNotFoundError: + _has_tz = False def _assert_equal_hash(v1, v2): assert v1 == v2 @@ -1881,7 +1885,7 @@ def test_datetime_as_string(self): np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'), '2032-01-01') - @pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.") + @pytest.mark.skipif(not _has_tz, reason="The tzdata module is not available.") def test_datetime_as_string_timezone(self): # timezone='local' vs 'UTC' a = np.datetime64('2010-03-15T06:30', 'm') @@ -1896,29 +1900,29 @@ def test_datetime_as_string_timezone(self): b = np.datetime64('2010-02-15T06:30', 'm') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Central')), '2010-03-15T01:30-0500') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Eastern')), '2010-03-15T02:30-0400') - assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(a, timezone=ZoneInfo('US/Pacific')), '2010-03-14T23:30-0700') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Central')), '2010-02-15T00:30-0600') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Eastern')), '2010-02-15T01:30-0500') - assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')), + assert_equal(np.datetime_as_string(b, timezone=ZoneInfo('US/Pacific')), '2010-02-14T22:30-0800') # Dates to strings with a timezone attached is disabled by default assert_raises(TypeError, np.datetime_as_string, a, unit='D', - timezone=tz('US/Pacific')) + timezone=ZoneInfo('US/Pacific')) # Check that we can print out the date in the specified time zone assert_equal(np.datetime_as_string(a, unit='D', - timezone=tz('US/Pacific'), casting='unsafe'), + timezone=ZoneInfo('US/Pacific'), casting='unsafe'), '2010-03-14') assert_equal(np.datetime_as_string(b, unit='D', - timezone=tz('US/Central'), casting='unsafe'), + timezone=ZoneInfo('US/Central'), casting='unsafe'), '2010-02-15') def test_datetime_arange(self): diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index f88e1fce1c11..2607953a940a 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -3,9 +3,12 @@ import numpy as np from numpy._core.multiarray import _vec_string from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, - assert_raises_regex - ) + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) kw_unicode_true = {'unicode': True} # make 2to3 work properly kw_unicode_false = {'unicode': False} diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 693e4ca29663..c4acbf9d2d69 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -3,25 +3,15 @@ to document how deprecations should eventually be turned into errors. """ +import contextlib import warnings + import pytest -import tempfile -import re import numpy as np -from numpy.testing import ( - assert_raises, assert_warns, assert_, assert_array_equal, SkipTest, - KnownFailureException, break_cycles, temppath - ) - -from numpy._core._multiarray_tests import fromstring_null_term_c_api import numpy._core._struct_ufunc_tests as struct_ufunc - -try: - import pytz - _has_pytz = True -except ImportError: - _has_pytz = False +from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 +from numpy.testing import assert_raises, temppath class _DeprecationTestCase: @@ -88,10 +78,12 @@ def assert_deprecated(self, function, num=1, ignore_others=False, if exceptions is np._NoValue: exceptions = (self.warning_cls,) - try: + if function_fails: + context_manager = contextlib.suppress(Exception) + else: + context_manager = contextlib.nullcontext() + with context_manager: function(*args, **kwargs) - except (Exception if function_fails else ()): - pass # just in case, clear the registry num_found = 0 @@ -345,13 +337,13 @@ def test_deprecated_np_lib_math(self): class TestLibImports(_DeprecationTestCase): # Deprecated in Numpy 1.26.0, 2023-09 def test_lib_functions_deprecation_call(self): - from numpy.lib._utils_impl import safe_eval - from numpy.lib._npyio_impl import recfromcsv, recfromtxt + from numpy import in1d, row_stack, trapz + from numpy._core.numerictypes import maximum_sctype from numpy.lib._function_base_impl import disp + from numpy.lib._npyio_impl import recfromcsv, recfromtxt from numpy.lib._shape_base_impl import get_array_wrap - from numpy._core.numerictypes import maximum_sctype + from numpy.lib._utils_impl import safe_eval from numpy.lib.tests.test_io import TextIO - from numpy import in1d, row_stack, trapz self.assert_deprecated(lambda: safe_eval("None")) diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index d273bd798ebb..89c24032b6c1 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -1,8 +1,9 @@ import sys + import pytest import numpy as np -from numpy.testing import assert_array_equal, IS_PYPY +from numpy.testing import IS_PYPY, assert_array_equal def new_and_old_dlpack(): diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index e248eac1fa53..d9bd17c48434 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -1,24 +1,30 @@ -import sys -import operator -import pytest import ctypes import gc +import operator +import pickle +import random +import sys import types +from itertools import permutations from typing import Any -import pickle + +import hypothesis +import pytest +from hypothesis.extra import numpy as hynp import numpy as np import numpy.dtypes -from numpy._core._rational_tests import rational from numpy._core._multiarray_tests import create_custom_field_dtype +from numpy._core._rational_tests import rational from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT, - IS_PYSTON, IS_WASM) -from itertools import permutations -import random - -import hypothesis -from hypothesis.extra import numpy as hynp + HAS_REFCOUNT, + IS_PYSTON, + IS_WASM, + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) def assert_dtype_equal(a, b): @@ -1624,7 +1630,7 @@ def test_basic_dtypes_subclass_properties(self, dtype): assert type(dtype) is not np.dtype if dtype.type.__name__ != "rational": dt_name = type(dtype).__name__.lower().removesuffix("dtype") - if dt_name == "uint" or dt_name == "int": + if dt_name in {"uint", "int"}: # The scalar names has a `c` attached because "int" is Python # int and that is long... dt_name += "c" diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index f3fd137b7c5c..0bd180b5e41f 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -4,9 +4,15 @@ import numpy as np from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_raises, suppress_warnings, assert_raises_regex, assert_allclose - ) + assert_, + assert_allclose, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + suppress_warnings, +) # Setup for optimize einsum chars = 'abcdefghij' diff --git a/numpy/_core/tests/test_errstate.py b/numpy/_core/tests/test_errstate.py index 2ffe48df632e..b72fb65a3239 100644 --- a/numpy/_core/tests/test_errstate.py +++ b/numpy/_core/tests/test_errstate.py @@ -1,8 +1,9 @@ -import pytest import sysconfig +import pytest + import numpy as np -from numpy.testing import assert_, assert_raises, IS_WASM +from numpy.testing import IS_WASM, assert_raises # The floating point emulation on ARM EABI systems lacking a hardware FPU is # known to be buggy. This is an attempt to identify these hosts. It may not diff --git a/numpy/_core/tests/test_extint128.py b/numpy/_core/tests/test_extint128.py index ba9aeeacdbac..6e4d74b81d39 100644 --- a/numpy/_core/tests/test_extint128.py +++ b/numpy/_core/tests/test_extint128.py @@ -1,13 +1,12 @@ -import itertools import contextlib +import itertools import operator + import pytest import numpy as np import numpy._core._multiarray_tests as mt - -from numpy.testing import assert_raises, assert_equal - +from numpy.testing import assert_equal, assert_raises INT64_MAX = np.iinfo(np.int64).max INT64_MIN = np.iinfo(np.int64).min diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index d7a1124d6c3d..3a8552de2d36 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -1,22 +1,39 @@ -import sys import platform +import sys + import pytest import numpy as np from numpy import ( - logspace, linspace, geomspace, dtype, array, arange, isnan, - ndarray, sqrt, nextafter, stack, errstate - ) + arange, + array, + dtype, + errstate, + geomspace, + isnan, + linspace, + logspace, + ndarray, + nextafter, + sqrt, + stack, +) from numpy._core import sctypes from numpy._core.function_base import add_newdoc from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose, - IS_PYPY - ) + IS_PYPY, + assert_, + assert_allclose, + assert_array_equal, + assert_equal, + assert_raises, +) + def _is_armhf(): # Check if the current platform is ARMHF (32-bit ARM architecture) - return platform.machine().startswith('arm') and platform.architecture()[0] == '32bit' + architecture = platform.architecture() + return platform.machine().startswith('arm') and architecture[0] == '32bit' class PhysicalQuantity(float): def __new__(cls, value): diff --git a/numpy/_core/tests/test_getlimits.py b/numpy/_core/tests/test_getlimits.py index 2afdd894e5b2..721c6ac6cdf9 100644 --- a/numpy/_core/tests/test_getlimits.py +++ b/numpy/_core/tests/test_getlimits.py @@ -3,12 +3,14 @@ """ import types import warnings -import numpy as np + import pytest + +import numpy as np +from numpy import double, half, longdouble, single from numpy._core import finfo, iinfo -from numpy import half, single, double, longdouble -from numpy.testing import assert_equal, assert_, assert_raises from numpy._core.getlimits import _discovered_machar, _float_ma +from numpy.testing import assert_, assert_equal, assert_raises ################################################## diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index 98a1dab61bfc..e2d6e6796db4 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -1,9 +1,10 @@ import platform + import pytest import numpy as np -from numpy import uint16, float16, float32, float64 -from numpy.testing import assert_, assert_equal, IS_WASM +from numpy import float16, float32, float64, uint16 +from numpy.testing import IS_WASM, assert_, assert_equal def assert_raises_fpe(strmatch, callable, *args, **kwargs): @@ -530,7 +531,7 @@ def test_half_fpe(self): assert_raises_fpe('overflow', lambda a, b: a - b, float16(-65504), float16(17)) assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) - assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) # noqa: E501 assert_raises_fpe('overflow', np.spacing, float16(65504)) # Invalid value errors diff --git a/numpy/_core/tests/test_hashtable.py b/numpy/_core/tests/test_hashtable.py index 41da06be3f2b..25a7158aaf6f 100644 --- a/numpy/_core/tests/test_hashtable.py +++ b/numpy/_core/tests/test_hashtable.py @@ -1,6 +1,7 @@ +import random + import pytest -import random from numpy._core._multiarray_tests import identityhash_tester diff --git a/numpy/_core/tests/test_indexerrors.py b/numpy/_core/tests/test_indexerrors.py index c1faa9555813..02110c28356a 100644 --- a/numpy/_core/tests/test_indexerrors.py +++ b/numpy/_core/tests/test_indexerrors.py @@ -1,7 +1,8 @@ import numpy as np from numpy.testing import ( - assert_raises, assert_raises_regex, - ) + assert_raises, + assert_raises_regex, +) class TestIndexErrors: diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index b65533bbc5ef..81ba85ea4648 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -1,18 +1,23 @@ -import sys -import warnings import functools import operator +import sys +import warnings +from itertools import product import pytest import numpy as np from numpy._core._multiarray_tests import array_indexing -from itertools import product from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_warns, HAS_REFCOUNT, IS_WASM - ) + HAS_REFCOUNT, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) class TestIndexing: @@ -155,6 +160,20 @@ def test_gh_26542_index_overlap(self): actual_vals = arr[10:] assert_equal(actual_vals, expected_vals) + def test_gh_26844(self): + expected = [0, 1, 3, 3, 3] + a = np.arange(5) + a[2:][a[:-2]] = 3 + assert_equal(a, expected) + + def test_gh_26844_segfault(self): + # check for absence of segfault for: + # https://github.com/numpy/numpy/pull/26958/files#r1854589178 + a = np.arange(5) + expected = [0, 1, 3, 3, 3] + a[2:][None, a[:-2]] = 3 + assert_equal(a, expected) + def test_ellipsis_index(self): a = np.array([[1, 2, 3], [4, 5, 6], @@ -945,7 +964,7 @@ def _get_multi_index(self, arr, indices): except ValueError: raise IndexError in_indices[i] = indx - elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i': + elif indx.dtype.kind not in 'bi': raise IndexError('arrays used as indices must be of ' 'integer (or boolean) type') if indx.ndim != 0: @@ -1002,12 +1021,12 @@ def _get_multi_index(self, arr, indices): # Note that originally this is could be interpreted as # integer in the full integer special case. raise IndexError - else: - # If the index is a singleton, the bounds check is done - # before the broadcasting. This used to be different in <1.9 - if indx.ndim == 0: - if indx >= arr.shape[ax] or indx < -arr.shape[ax]: - raise IndexError + # If the index is a singleton, the bounds check is done + # before the broadcasting. This used to be different in <1.9 + elif indx.ndim == 0 and not ( + -arr.shape[ax] <= indx < arr.shape[ax] + ): + raise IndexError if indx.ndim == 0: # The index is a scalar. This used to be two fold, but if # fancy indexing was active, the check was done later, @@ -1333,7 +1352,8 @@ def test_boolean_indexing_fast_path(self): "size of axis is 3 but size of corresponding boolean axis is 1", lambda: a[idx1]) - # This used to incorrectly give a ValueError: operands could not be broadcast together + # This used to incorrectly give a ValueError: operands could not be + # broadcast together idx2 = np.array([[False] * 8 + [True]]) assert_raises_regex(IndexError, "boolean index did not match indexed array along axis 0; " diff --git a/numpy/_core/tests/test_item_selection.py b/numpy/_core/tests/test_item_selection.py index c63c11011e6f..79fb82dde591 100644 --- a/numpy/_core/tests/test_item_selection.py +++ b/numpy/_core/tests/test_item_selection.py @@ -3,9 +3,7 @@ import pytest import numpy as np -from numpy.testing import ( - assert_, assert_raises, assert_array_equal, HAS_REFCOUNT - ) +from numpy.testing import HAS_REFCOUNT, assert_, assert_array_equal, assert_raises class TestTake: diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 58f2b5ce050d..984210e53af7 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -2,9 +2,10 @@ import subprocess import sys import sysconfig + import pytest -from numpy.testing import IS_WASM, IS_PYPY, NOGIL_BUILD, IS_EDITABLE +from numpy.testing import IS_EDITABLE, IS_PYPY, IS_WASM, NOGIL_BUILD # This import is copied from random.tests.test_extending try: @@ -96,6 +97,6 @@ def test_limited_api(install_temp): and building a cython extension with the limited API """ - import limited_api1 # Earliest (3.6) - import limited_api_latest # Latest version (current Python) - import limited_api2 # cython + import limited_api1 # Earliest (3.6) # noqa: F401 + import limited_api2 # cython # noqa: F401 + import limited_api_latest # Latest version (current Python) # noqa: F401 diff --git a/numpy/_core/tests/test_longdouble.py b/numpy/_core/tests/test_longdouble.py index c09f2d824a1c..f7edd9774573 100644 --- a/numpy/_core/tests/test_longdouble.py +++ b/numpy/_core/tests/test_longdouble.py @@ -1,14 +1,18 @@ -import warnings import platform +import warnings + import pytest import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_warns, assert_array_equal, - temppath, IS_MUSL - ) from numpy._core.tests._locales import CommaDecimalPointLocale - +from numpy.testing import ( + IS_MUSL, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + temppath, +) LD_INFO = np.finfo(np.longdouble) longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) diff --git a/numpy/_core/tests/test_machar.py b/numpy/_core/tests/test_machar.py index 18d7ed30e062..2d772dd51233 100644 --- a/numpy/_core/tests/test_machar.py +++ b/numpy/_core/tests/test_machar.py @@ -3,9 +3,9 @@ rid of both MachAr and this test at some point. """ -from numpy._core._machar import MachAr import numpy._core.numerictypes as ntypes -from numpy import errstate, array +from numpy import array, errstate +from numpy._core._machar import MachAr class TestMachAr: diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index 0d80951a854a..240ea62850ee 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -1,14 +1,12 @@ import itertools + import pytest import numpy as np -from numpy._core._multiarray_tests import solve_diophantine, internal_overlap from numpy._core import _umath_tests +from numpy._core._multiarray_tests import internal_overlap, solve_diophantine from numpy.lib.stride_tricks import as_strided -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_array_equal - ) - +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises ndims = 2 size = 10 @@ -167,8 +165,9 @@ def check_may_share_memory_exact(a, b): err_msg = "" if got != exact: + base_delta = a.__array_interface__['data'][0] - b.__array_interface__['data'][0] err_msg = " " + "\n ".join([ - f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", + f"base_a - base_b = {base_delta!r}", f"shape_a = {a.shape!r}", f"shape_b = {b.shape!r}", f"strides_a = {a.strides!r}", @@ -404,7 +403,9 @@ def check(A, U, exists=None): exists = (X is not None) if X is not None: - assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) + sum_ax = sum(a * x for a, x in zip(A, X)) + sum_au_half = sum(a * u // 2 for a, u in zip(A, U)) + assert_(sum_ax == sum_au_half) assert_(all(0 <= x <= u for x, u in zip(X, U))) assert_(any(x != u // 2 for x, u in zip(X, U))) @@ -643,16 +644,15 @@ def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, sl = [slice(0, 1)] + [0] * (ndim - 1) else: sl = [slice(0, outsize)] + [0] * (ndim - 1) - else: - if outsize is None: - k = b.shape[axis] // 2 - if ndim == 1: - sl[axis] = slice(k, k + 1) - else: - sl[axis] = k + elif outsize is None: + k = b.shape[axis] // 2 + if ndim == 1: + sl[axis] = slice(k, k + 1) else: - assert b.shape[axis] >= outsize - sl[axis] = slice(0, outsize) + sl[axis] = k + else: + assert b.shape[axis] >= outsize + sl[axis] = slice(0, outsize) b_out = b[tuple(sl)] if scalarize: diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 8d09a9ded659..b9f971e73249 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -2,14 +2,14 @@ import gc import os import sys -import threading import sysconfig +import threading import pytest import numpy as np -from numpy.testing import extbuild, assert_warns, IS_WASM, IS_EDITABLE from numpy._core.multiarray import get_handler_name +from numpy.testing import IS_EDITABLE, IS_WASM, assert_warns, extbuild @pytest.fixture diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index f76fab90c959..cbd825205844 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -1,18 +1,34 @@ -import sys -import os import mmap -import pytest +import os +import sys from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryFile -from numpy import ( - memmap, sum, average, prod, ndarray, isscalar, add, subtract, multiply) +import pytest -from numpy import arange, allclose, asarray +from numpy import ( + add, + allclose, + arange, + asarray, + average, + isscalar, + memmap, + multiply, + ndarray, + prod, + subtract, + sum, +) from numpy.testing import ( - assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY, - break_cycles - ) + IS_PYPY, + assert_, + assert_array_equal, + assert_equal, + break_cycles, + suppress_warnings, +) + class TestMemmap: def setup_method(self): diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 0e92dfc4140b..b164f1dada3b 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -1,44 +1,59 @@ -from __future__ import annotations - +import builtins import collections.abc -import tempfile -import sys -import warnings -import operator +import ctypes +import functools +import gc import io import itertools -import functools -import ctypes +import mmap +import operator import os -import gc +import pathlib +import pickle import re +import sys +import tempfile +import warnings import weakref -import pytest from contextlib import contextmanager -import pickle -import pathlib -import builtins + +# Need to test an object that does not fully implement math interface +from datetime import datetime, timedelta from decimal import Decimal -import mmap + +import pytest import numpy as np import numpy._core._multiarray_tests as _multiarray_tests from numpy._core._rational_tests import rational -from numpy.exceptions import AxisError, ComplexWarning -from numpy.testing import ( - assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal, - assert_array_equal, assert_raises_regex, assert_array_almost_equal, - assert_allclose, IS_PYPY, IS_WASM, IS_PYSTON, HAS_REFCOUNT, - assert_array_less, runstring, temppath, suppress_warnings, break_cycles, - check_support_sve, assert_array_compare, IS_64BIT - ) -from numpy.testing._private.utils import requires_memory, _no_tracing +from numpy._core.multiarray import _get_ndarray_c_version, dot from numpy._core.tests._locales import CommaDecimalPointLocale +from numpy.exceptions import AxisError, ComplexWarning from numpy.lib.recfunctions import repack_fields -from numpy._core.multiarray import _get_ndarray_c_version, dot - -# Need to test an object that does not fully implement math interface -from datetime import timedelta, datetime +from numpy.testing import ( + HAS_REFCOUNT, + IS_64BIT, + IS_PYPY, + IS_PYSTON, + IS_WASM, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_compare, + assert_array_equal, + assert_array_less, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, + break_cycles, + check_support_sve, + runstring, + suppress_warnings, + temppath, +) +from numpy.testing._private.utils import _no_tracing, requires_memory def assert_arg_sorted(arr, arg): @@ -3948,16 +3963,15 @@ def first_out_arg(result): elif ufunc_override_expected: assert_equal(arr_method(obj)[0], "__array_ufunc__", err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_method(obj)) + assert_(res.__class__ is obj.__class__, err_msg) else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - res = first_out_arg(arr_method(obj)) - assert_(res.__class__ is obj.__class__, err_msg) - else: - assert_raises((TypeError, Coerced), - arr_method, obj, err_msg=err_msg) + assert_raises((TypeError, Coerced), + arr_method, obj, err_msg=err_msg) # obj __op__ arr arr_rmethod = getattr(arr, f"__r{op}__") if ufunc_override_expected: @@ -3965,17 +3979,16 @@ def first_out_arg(result): assert_equal(res[0], "__array_ufunc__", err_msg=err_msg) assert_equal(res[1], ufunc, err_msg=err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_rmethod(obj)) + assert_(res.__class__ is obj.__class__, err_msg) else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - res = first_out_arg(arr_rmethod(obj)) - assert_(res.__class__ is obj.__class__, err_msg) - else: - # __array_ufunc__ = "asdf" creates a TypeError - assert_raises((TypeError, Coerced), - arr_rmethod, obj, err_msg=err_msg) + # __array_ufunc__ = "asdf" creates a TypeError + assert_raises((TypeError, Coerced), + arr_rmethod, obj, err_msg=err_msg) # arr __iop__ obj # array scalars don't have in-place operators @@ -3990,16 +4003,15 @@ def first_out_arg(result): assert_equal(res[1], ufunc, err_msg) assert_(type(res[-1]["out"]) is tuple, err_msg) assert_(res[-1]["out"][0] is arr, err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + assert_(arr_imethod(obj) is arr, err_msg) else: - if (isinstance(obj, np.ndarray) and - (type(obj).__array_ufunc__ is - np.ndarray.__array_ufunc__)): - # __array__ gets ignored - assert_(arr_imethod(obj) is arr, err_msg) - else: - assert_raises((TypeError, Coerced), - arr_imethod, obj, - err_msg=err_msg) + assert_raises((TypeError, Coerced), + arr_imethod, obj, + err_msg=err_msg) op_fn = getattr(operator, op, None) if op_fn is None: @@ -4404,6 +4416,41 @@ def test_f_contiguous_array(self): assert_equal(f_contiguous_array, depickled_f_contiguous_array) + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") + @pytest.mark.parametrize('transposed_contiguous_array', + [np.random.default_rng(42).random((2, 3, 4)).transpose((1, 0, 2)), + np.random.default_rng(42).random((2, 3, 4, 5)).transpose((1, 3, 0, 2))] + + [np.random.default_rng(42).random(np.arange(2, 7)).transpose(np.random.permutation(5)) for _ in range(3)]) + def test_transposed_contiguous_array(self, transposed_contiguous_array): + buffers = [] + # When using pickle protocol 5, arrays which can be transposed to c_contiguous + # can be serialized using out-of-band buffers + bytes_string = pickle.dumps(transposed_contiguous_array, protocol=5, + buffer_callback=buffers.append) + + assert len(buffers) > 0 + + depickled_transposed_contiguous_array = pickle.loads(bytes_string, + buffers=buffers) + + assert_equal(transposed_contiguous_array, depickled_transposed_contiguous_array) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") + def test_load_legacy_pkl_protocol5(self): + # legacy byte strs are dumped in 2.2.1 + c_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01C\x94t\x94R\x94.' # noqa: E501 + f_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01F\x94t\x94R\x94.' # noqa: E501 + transposed_contiguous_dumped = b'\x80\x05\x95\xa5\x00\x00\x00\x00\x00\x00\x00\x8c\x16numpy._core.multiarray\x94\x8c\x0c_reconstruct\x94\x93\x94\x8c\x05numpy\x94\x8c\x07ndarray\x94\x93\x94K\x00\x85\x94C\x01b\x94\x87\x94R\x94(K\x01K\x04K\x03K\x02\x87\x94h\x03\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94b\x89C\x18\x00\x01\x08\t\x10\x11\x02\x03\n\x0b\x12\x13\x04\x05\x0c\r\x14\x15\x06\x07\x0e\x0f\x16\x17\x94t\x94b.' # noqa: E501 + no_contiguous_dumped = b'\x80\x05\x95\x91\x00\x00\x00\x00\x00\x00\x00\x8c\x16numpy._core.multiarray\x94\x8c\x0c_reconstruct\x94\x93\x94\x8c\x05numpy\x94\x8c\x07ndarray\x94\x93\x94K\x00\x85\x94C\x01b\x94\x87\x94R\x94(K\x01K\x03K\x02\x86\x94h\x03\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94b\x89C\x06\x00\x01\x04\x05\x08\t\x94t\x94b.' # noqa: E501 + x = np.arange(24, dtype='uint8').reshape(3, 4, 2) + assert_equal(x, pickle.loads(c_contiguous_dumped)) + x = np.arange(24, dtype='uint8').reshape(3, 4, 2, order='F') + assert_equal(x, pickle.loads(f_contiguous_dumped)) + x = np.arange(24, dtype='uint8').reshape(3, 4, 2).transpose((1, 0, 2)) + assert_equal(x, pickle.loads(transposed_contiguous_dumped)) + x = np.arange(12, dtype='uint8').reshape(3, 4)[:, :2] + assert_equal(x, pickle.loads(no_contiguous_dumped)) + def test_non_contiguous_array(self): non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2] assert not non_contiguous_array.flags.c_contiguous @@ -4411,10 +4458,13 @@ def test_non_contiguous_array(self): # make sure non-contiguous arrays can be pickled-depickled # using any protocol + buffers = [] for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): depickled_non_contiguous_array = pickle.loads( - pickle.dumps(non_contiguous_array, protocol=proto)) + pickle.dumps(non_contiguous_array, protocol=proto, + buffer_callback=buffers.append if proto >= 5 else None)) + assert_equal(len(buffers), 0) assert_equal(non_contiguous_array, depickled_non_contiguous_array) def test_roundtrip(self): @@ -5979,7 +6029,6 @@ def test_check_reference(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y = x assert_raises(ValueError, x.resize, (5, 1)) - del y # avoid pyflakes unused variable warning. @_no_tracing def test_int_shape(self): @@ -6058,7 +6107,6 @@ def test_check_weakref(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) xref = weakref.ref(x) assert_raises(ValueError, x.resize, (5, 1)) - del xref # avoid pyflakes unused variable warning. class TestRecord: @@ -6098,15 +6146,13 @@ def test_dtype_unicode(): def test_fromarrays_unicode(self): # A single name string provided to fromarrays() is allowed to be unicode - # on both Python 2 and 3: x = np._core.records.fromarrays( [[0], [1]], names='a,b', formats='i4,i4') assert_equal(x['a'][0], 0) assert_equal(x['b'][0], 1) def test_unicode_order(self): - # Test that we can sort with order as a unicode field name in both Python 2 and - # 3: + # Test that we can sort with order as a unicode field name name = 'b' x = np.array([1, 3, 2], dtype=[(name, int)]) x.sort(order=name) @@ -7271,6 +7317,34 @@ def test_dot_equivalent(self, args): r3 = np.matmul(args[0].copy(), args[1].copy()) assert_equal(r1, r3) + # issue 29164 with extra checks + @pytest.mark.parametrize('dtype', ( + np.float32, np.float64, np.complex64, np.complex128 + )) + def test_dot_equivalent_matrix_matrix_blastypes(self, dtype): + modes = list(itertools.product(['C', 'F'], [True, False])) + + def apply_mode(m, mode): + order, is_contiguous = mode + if is_contiguous: + return m.copy() if order == 'C' else m.T.copy().T + + retval = np.zeros( + (m.shape[0] * 2, m.shape[1] * 2), dtype=m.dtype, order=order + )[::2, ::2] + retval[...] = m + return retval + + is_complex = np.issubdtype(dtype, np.complexfloating) + m1 = self.m1.astype(dtype) + (1j if is_complex else 0) + m2 = self.m2.astype(dtype) + (1j if is_complex else 0) + dot_res = np.dot(m1, m2) + mo = np.zeros_like(dot_res) + + for mode in itertools.product(*[modes] * 3): + m1_, m2_, mo_ = [apply_mode(*x) for x in zip([m1, m2, mo], mode)] + assert_equal(np.matmul(m1_, m2_, out=mo_), dot_res) + def test_matmul_object(self): import fractions @@ -8265,7 +8339,6 @@ def test_reference_leak(self): if HAS_REFCOUNT: count_2 = sys.getrefcount(np._core._internal) assert_equal(count_1, count_2) - del c # avoid pyflakes unused variable warning. def test_padded_struct_array(self): dt1 = np.dtype( @@ -8287,7 +8360,7 @@ def test_padded_struct_array(self): self._check_roundtrip(x3) @pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.") - def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')): + def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')): # noqa: B008 # Note: c defined as parameter so that it is persistent and leak # checks will notice gh-16934 (buffer info cache leak). c.strides = (-1, 80, 8) # strides need to be fixed at export @@ -9552,7 +9625,8 @@ def test_dot_out(self): def test_view_assign(self): from numpy._core._multiarray_tests import ( - npy_create_writebackifcopy, npy_resolve + npy_create_writebackifcopy, + npy_resolve, ) arr = np.arange(9).reshape(3, 3).T @@ -9582,7 +9656,8 @@ def test_dealloc_warning(self): def test_view_discard_refcount(self): from numpy._core._multiarray_tests import ( - npy_create_writebackifcopy, npy_discard + npy_create_writebackifcopy, + npy_discard, ) arr = np.arange(9).reshape(3, 3).T @@ -9991,7 +10066,12 @@ def test_npymath_complex(fun, npfun, x, y, test_dtype): def test_npymath_real(): # Smoketest npymath functions from numpy._core._multiarray_tests import ( - npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh) + npy_cosh, + npy_log10, + npy_sinh, + npy_tan, + npy_tanh, + ) funcs = {npy_log10: np.log10, npy_cosh: np.cosh, @@ -10291,6 +10371,21 @@ def test_argsort_int(N, dtype): arr[N - 1] = maxv assert_arg_sorted(arr, np.argsort(arr, kind='quick')) +# Test large arrays that leverage openMP implementations from x86-simd-sort: +@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64]) +def test_sort_largearrays(dtype): + N = 1000000 + rnd = np.random.RandomState(1100710816) + arr = -0.5 + rnd.random(N).astype(dtype) + assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap')) + +# Test large arrays that leverage openMP implementations from x86-simd-sort: +@pytest.mark.parametrize("dtype", [np.float32, np.float64]) +def test_argsort_largearrays(dtype): + N = 1000000 + rnd = np.random.RandomState(1100710816) + arr = -0.5 + rnd.random(N).astype(dtype) + assert_arg_sorted(arr, np.argsort(arr, kind='quick')) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_gh_22683(): diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 0d5094a87978..09f907561ae5 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -1,13 +1,13 @@ import concurrent.futures -import threading import string +import threading -import numpy as np import pytest -from numpy.testing import IS_WASM, IS_64BIT -from numpy.testing._private.utils import run_threaded +import numpy as np from numpy._core import _rational_tests +from numpy.testing import IS_64BIT, IS_WASM +from numpy.testing._private.utils import run_threaded if IS_WASM: pytest.skip(allow_module_level=True, reason="no threading support in wasm") diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 9f60b67ba5b1..a29a49bfb71a 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1,19 +1,25 @@ +import subprocess import sys -import pytest - import textwrap -import subprocess + +import pytest import numpy as np -import numpy._core.umath as ncu import numpy._core._multiarray_tests as _multiarray_tests -from numpy import array, arange, nditer, all +import numpy._core.umath as ncu +from numpy import all, arange, array, nditer from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, - IS_WASM, HAS_REFCOUNT, suppress_warnings, break_cycles, - ) + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + suppress_warnings, +) from numpy.testing._private.utils import requires_memory + def iter_multi_index(i): ret = [] while not i.finished: @@ -78,8 +84,6 @@ def test_iter_refcount(): assert_equal(sys.getrefcount(a), rc_a) assert_equal(sys.getrefcount(dt), rc_dt) - del it2 # avoid pyflakes unused variable warning - def test_iter_best_order(): # The iterator should always find the iteration order # with increasing memory addresses @@ -1482,7 +1486,7 @@ def test_iter_copy_casts_structured2(): # Array of two structured scalars: for res in res1, res2: # Cast to tuple by getitem, which may be weird and changeable?: - assert type(res["a"][0]) == tuple + assert isinstance(res["a"][0], tuple) assert res["a"][0] == (1, 1) for res in res1, res2: diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index 039a36357393..8d9d9e63ce38 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -6,13 +6,12 @@ import operator -import numpy as np - -import pytest import hypothesis +import pytest from hypothesis import strategies -from numpy.testing import assert_array_equal, IS_WASM +import numpy as np +from numpy.testing import IS_WASM, assert_array_equal @pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for fp errors") @@ -113,7 +112,7 @@ def test_weak_promotion_scalar_path(op): # Integer path: res = op(np.uint8(3), 5) assert res == op(3, 5) - assert res.dtype == np.uint8 or res.dtype == bool + assert res.dtype == np.uint8 or res.dtype == bool # noqa: PLR1714 with pytest.raises(OverflowError): op(np.uint8(3), 1000) @@ -121,7 +120,7 @@ def test_weak_promotion_scalar_path(op): # Float path: res = op(np.float32(3), 5.) assert res == op(3., 5.) - assert res.dtype == np.float32 or res.dtype == bool + assert res.dtype == np.float32 or res.dtype == bool # noqa: PLR1714 def test_nep50_complex_promotion(): diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 21dae72168de..8a72e4bfa65d 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -1,26 +1,34 @@ -import sys -import warnings import itertools -import platform -import pytest import math +import platform +import sys +import warnings from decimal import Decimal +import pytest +from hypothesis import given +from hypothesis import strategies as st +from hypothesis.extra import numpy as hynp + import numpy as np -from numpy._core import umath, sctypes +from numpy import ma +from numpy._core import sctypes +from numpy._core._rational_tests import rational from numpy._core.numerictypes import obj2sctype from numpy.exceptions import AxisError from numpy.random import rand, randint, randn from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_almost_equal, assert_array_almost_equal, - assert_warns, assert_array_max_ulp, HAS_REFCOUNT, IS_WASM - ) -from numpy._core._rational_tests import rational -from numpy import ma - -from hypothesis import given, strategies as st -from hypothesis.extra import numpy as hynp + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, + assert_raises_regex, +) class TestResize: @@ -1959,9 +1967,9 @@ def __bool__(self): def test_nonzero_byteorder(self): values = [0., -0., 1, float('nan'), 0, 1, np.float16(0), np.float16(12.3)] - expected = [0, 0, 1, 1, 0, 1, 0, 1] + expected_values = [0, 0, 1, 1, 0, 1, 0, 1] - for value, expected in zip(values, expected): + for value, expected in zip(values, expected_values): A = np.array([value]) A_byteswapped = (A.view(A.dtype.newbyteorder()).byteswap()).copy() @@ -3205,6 +3213,24 @@ def test_timedelta(self): assert np.allclose(a, a, atol=0, equal_nan=True) assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True) + def test_tol_warnings(self): + a = np.array([1, 2, 3]) + b = np.array([np.inf, np.nan, 1]) + + for i in b: + for j in b: + # Making sure that i and j are not both numbers, because that won't create a warning + if (i == 1) and (j == 1): + continue + + with warnings.catch_warnings(record=True) as w: + + warnings.simplefilter("always") + c = np.isclose(a, a, atol=i, rtol=j) + assert len(w) == 1 + assert issubclass(w[-1].category, RuntimeWarning) + assert f"One of rtol or atol is not valid, atol: {i}, rtol: {j}" in str(w[-1].message) + class TestStdVar: def setup_method(self): diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index ec35e30bd5cd..c9a2ac06472c 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -1,14 +1,17 @@ -import sys import itertools +import sys import pytest + import numpy as np import numpy._core.numerictypes as nt -from numpy._core.numerictypes import ( - issctype, sctype2char, maximum_sctype, sctypes -) +from numpy._core.numerictypes import issctype, maximum_sctype, sctype2char, sctypes from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, IS_PYPY + IS_PYPY, + assert_, + assert_equal, + assert_raises, + assert_raises_regex, ) # This is the structure of the table used for plain objects: diff --git a/numpy/_core/tests/test_overrides.py b/numpy/_core/tests/test_overrides.py index 5e5c07135297..b0d73375ed10 100644 --- a/numpy/_core/tests/test_overrides.py +++ b/numpy/_core/tests/test_overrides.py @@ -1,20 +1,22 @@ import inspect -import sys import os +import pickle +import sys import tempfile from io import StringIO from unittest import mock -import pickle import pytest import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex) -from numpy.testing.overrides import get_overridable_numpy_array_functions from numpy._core.overrides import ( - _get_implementing_args, array_function_dispatch, - verify_matching_signatures) + _get_implementing_args, + array_function_dispatch, + verify_matching_signatures, +) +from numpy.testing import assert_, assert_equal, assert_raises, assert_raises_regex +from numpy.testing.overrides import get_overridable_numpy_array_functions + def _return_not_implemented(self, *args, **kwargs): return NotImplemented @@ -211,14 +213,6 @@ def test_wrong_arguments(self): with pytest.raises(TypeError, match="kwargs must be a dict"): a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1)) - def test_wrong_arguments(self): - # Check our implementation guards against wrong arguments. - a = np.array([1, 2]) - with pytest.raises(TypeError, match="args must be a tuple"): - a.__array_function__(np.reshape, (np.ndarray,), a, (2, 1)) - with pytest.raises(TypeError, match="kwargs must be a dict"): - a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1)) - class TestArrayFunctionDispatch: diff --git a/numpy/_core/tests/test_print.py b/numpy/_core/tests/test_print.py index 856cfa6e0a34..d99b2794d7ca 100644 --- a/numpy/_core/tests/test_print.py +++ b/numpy/_core/tests/test_print.py @@ -1,13 +1,11 @@ import sys +from io import StringIO import pytest import numpy as np -from numpy.testing import assert_, assert_equal, IS_MUSL from numpy._core.tests._locales import CommaDecimalPointLocale - - -from io import StringIO +from numpy.testing import IS_MUSL, assert_, assert_equal _REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} diff --git a/numpy/_core/tests/test_protocols.py b/numpy/_core/tests/test_protocols.py index 50ff1f4c7b2d..96bb600843dc 100644 --- a/numpy/_core/tests/test_protocols.py +++ b/numpy/_core/tests/test_protocols.py @@ -1,5 +1,7 @@ -import pytest import warnings + +import pytest + import numpy as np diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index 9e30887942ae..b4b93aee4026 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -1,17 +1,21 @@ import collections.abc +import pickle import textwrap from io import BytesIO from os import path from pathlib import Path -import pickle import pytest import numpy as np from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_array_almost_equal, - assert_raises, temppath, - ) + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + temppath, +) class TestFromrecords: diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 63899899c8e8..fbfa9311a1dc 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -1,24 +1,35 @@ import copy -import sys import gc +import pickle +import sys import tempfile -import pytest -from os import path from io import BytesIO from itertools import chain -import pickle +from os import path + +import pytest import numpy as np +from numpy._utils import asbytes, asunicode from numpy.exceptions import AxisError, ComplexWarning from numpy.testing import ( - assert_, assert_equal, IS_PYPY, assert_almost_equal, - assert_array_equal, assert_array_almost_equal, assert_raises, - assert_raises_regex, assert_warns, suppress_warnings, - _assert_valid_refcount, HAS_REFCOUNT, IS_PYSTON, IS_WASM, - IS_64BIT, - ) + HAS_REFCOUNT, + IS_64BIT, + IS_PYPY, + IS_PYSTON, + IS_WASM, + _assert_valid_refcount, + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, + suppress_warnings, +) from numpy.testing._private.utils import _no_tracing, requires_memory -from numpy._utils import asbytes, asunicode class TestRegression: @@ -998,8 +1009,6 @@ def test_object_array_refcounting(self): assert_(cnt(a) == cnt0_a + 5 + 2) assert_(cnt(b) == cnt0_b + 5 + 3) - del tmp # Avoid pyflakes unused variable warning - def test_mem_custom_float_to_array(self): # Ticket 702 class MyFloat: @@ -1583,8 +1592,7 @@ def test_take_refcount(self): assert_equal(c1, c2) def test_fromfile_tofile_seeks(self): - # On Python 3, tofile/fromfile used to get (#1610) the Python - # file handle out of sync + # tofile/fromfile used to get (#1610) the Python file handle out of sync with tempfile.NamedTemporaryFile() as f: f.write(np.arange(255, dtype='u1').tobytes()) diff --git a/numpy/_core/tests/test_scalar_ctors.py b/numpy/_core/tests/test_scalar_ctors.py index f4f36f0c3b82..be3ef0459c82 100644 --- a/numpy/_core/tests/test_scalar_ctors.py +++ b/numpy/_core/tests/test_scalar_ctors.py @@ -5,8 +5,11 @@ import numpy as np from numpy.testing import ( - assert_equal, assert_almost_equal, assert_warns, - ) + assert_almost_equal, + assert_equal, + assert_warns, +) + class TestFromString: def test_floating(self): diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index 66d861b7d593..2d508a08bb4d 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -7,10 +7,10 @@ from typing import Any import pytest -import numpy as np +import numpy as np from numpy._core import sctypes -from numpy.testing import assert_equal, assert_raises, IS_MUSL +from numpy.testing import assert_equal, assert_raises class TestAsIntegerRatio: diff --git a/numpy/_core/tests/test_scalarbuffer.py b/numpy/_core/tests/test_scalarbuffer.py index 28aaab09dac4..4d2744b85e53 100644 --- a/numpy/_core/tests/test_scalarbuffer.py +++ b/numpy/_core/tests/test_scalarbuffer.py @@ -1,11 +1,11 @@ """ Test scalar buffer interface adheres to PEP 3118 """ -import numpy as np -from numpy._core._rational_tests import rational -from numpy._core._multiarray_tests import get_buffer_info import pytest +import numpy as np +from numpy._core._multiarray_tests import get_buffer_info +from numpy._core._rational_tests import rational from numpy.testing import assert_, assert_equal, assert_raises # PEP3118 format strings for native (standard alignment and byteorder) types @@ -128,8 +128,8 @@ def test_str_ucs4(self, s): s = np.str_(s) # only our subclass implements the buffer protocol # all the same, characters always encode as ucs4 - expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), 'format': '2w', - 'readonly': True} + expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), + 'format': '2w', 'readonly': True} v = memoryview(s) assert self._as_dict(v) == expected diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 0b086df21c60..746b410f79d2 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -1,23 +1,30 @@ import contextlib -import sys -import warnings import itertools import operator import platform -from numpy._utils import _pep440 +import sys +import warnings + import pytest from hypothesis import given, settings -from hypothesis.strategies import sampled_from from hypothesis.extra import numpy as hynp +from hypothesis.strategies import sampled_from import numpy as np -from numpy.exceptions import ComplexWarning from numpy._core._rational_tests import rational +from numpy._utils import _pep440 +from numpy.exceptions import ComplexWarning from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_almost_equal, - assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data, - assert_warns, check_support_sve, - ) + IS_PYPY, + _gen_alignment_data, + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + check_support_sve, + suppress_warnings, +) types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, np.int_, np.uint, np.longlong, np.ulonglong, diff --git a/numpy/_core/tests/test_scalarprint.py b/numpy/_core/tests/test_scalarprint.py index 298eb232eafb..38ed7780f2e6 100644 --- a/numpy/_core/tests/test_scalarprint.py +++ b/numpy/_core/tests/test_scalarprint.py @@ -1,15 +1,13 @@ """ Test printing of scalar types. """ -import code import platform + import pytest -import sys -from tempfile import TemporaryFile import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, IS_MUSL) +from numpy.testing import IS_MUSL, assert_, assert_equal, assert_raises + class TestRealScalars: def test_str(self): diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 9e4ef3a8e6e9..f7b944be08b7 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -1,16 +1,34 @@ import pytest + import numpy as np from numpy._core import ( - array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack, - newaxis, concatenate, stack - ) + arange, + array, + atleast_1d, + atleast_2d, + atleast_3d, + block, + concatenate, + hstack, + newaxis, + stack, + vstack, +) +from numpy._core.shape_base import ( + _block_concatenate, + _block_dispatcher, + _block_setup, + _block_slicing, +) from numpy.exceptions import AxisError -from numpy._core.shape_base import (_block_dispatcher, _block_setup, - _block_concatenate, _block_slicing) from numpy.testing import ( - assert_, assert_raises, assert_array_equal, assert_equal, - assert_raises_regex, assert_warns, IS_PYPY - ) + IS_PYPY, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) class TestAtleast1d: diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index fdcab688963e..acea4315e679 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -4,9 +4,12 @@ import math import operator import re + import pytest -from numpy._core._simd import targets, clear_floatstatus, get_floatstatus + from numpy._core._multiarray_umath import __cpu_baseline__ +from numpy._core._simd import clear_floatstatus, get_floatstatus, targets + def check_floatstatus(divbyzero=False, overflow=False, underflow=False, invalid=False, diff --git a/numpy/_core/tests/test_simd_module.py b/numpy/_core/tests/test_simd_module.py index f39d680c47fa..dca83fd427b6 100644 --- a/numpy/_core/tests/test_simd_module.py +++ b/numpy/_core/tests/test_simd_module.py @@ -1,5 +1,7 @@ import pytest + from numpy._core._simd import targets + """ This testing unit only for checking the sanity of common functionality, therefore all we need is just to take one submodule that represents any diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 66b642cbd73a..9bab810d4421 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -5,12 +5,13 @@ import sys import tempfile -import numpy as np import pytest +import numpy as np +from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype +from numpy._core.tests._natype import pd_NA from numpy.dtypes import StringDType -from numpy._core.tests._natype import pd_NA, get_stringdtype_dtype as get_dtype -from numpy.testing import assert_array_equal, IS_PYPY +from numpy.testing import IS_PYPY, assert_array_equal @pytest.fixture @@ -127,8 +128,8 @@ def test_null_roundtripping(): def test_string_too_large_error(): arr = np.array(["a", "b", "c"], dtype=StringDType()) - with pytest.raises(MemoryError): - arr * (2**63 - 2) + with pytest.raises(OverflowError): + arr * (sys.maxsize + 1) @pytest.mark.parametrize( @@ -1305,11 +1306,10 @@ def test_unary(string_array, unicode_array, function_name): # to avoid these errors we'd need to add NA support to _vec_string with pytest.raises((ValueError, TypeError)): func(na_arr) + elif function_name == "splitlines": + assert func(na_arr)[0] == func(dtype.na_object)[()] else: - if function_name == "splitlines": - assert func(na_arr)[0] == func(dtype.na_object)[()] - else: - assert func(na_arr)[0] == func(dtype.na_object) + assert func(na_arr)[0] == func(dtype.na_object) return if function_name == "str_len" and not is_str: # str_len always errors for any non-string null, even NA ones because diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 7960142162c5..56e928df4d7b 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -1,10 +1,10 @@ +import operator import sys + import pytest -import operator import numpy as np - -from numpy.testing import assert_array_equal, assert_raises, IS_PYPY +from numpy.testing import IS_PYPY, assert_array_equal, assert_raises from numpy.testing._private.utils import requires_memory COMPARISONS = [ @@ -224,9 +224,20 @@ def test_multiply_raises(self, dt): with pytest.raises(TypeError, match="unsupported type"): np.strings.multiply(np.array("abc", dtype=dt), 3.14) - with pytest.raises(MemoryError): + with pytest.raises(OverflowError): np.strings.multiply(np.array("abc", dtype=dt), sys.maxsize) + def test_inplace_multiply(self, dt): + arr = np.array(['foo ', 'bar'], dtype=dt) + arr *= 2 + if dt != "T": + assert_array_equal(arr, np.array(['foo ', 'barb'], dtype=dt)) + else: + assert_array_equal(arr, ['foo foo ', 'barbar']) + + with pytest.raises(OverflowError): + arr *= sys.maxsize + @pytest.mark.parametrize("i_dt", [np.int8, np.int16, np.int32, np.int64, np.int_]) def test_multiply_integer_dtypes(self, i_dt, dt): diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 26844fabd437..21ebc02c2625 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1,27 +1,35 @@ -import warnings -import itertools -import sys import ctypes as ct +import itertools import pickle +import sys +import warnings import pytest from pytest import param import numpy as np -import numpy._core.umath as ncu -import numpy._core._umath_tests as umt -import numpy.linalg._umath_linalg as uml import numpy._core._operand_flag_tests as opflag_tests import numpy._core._rational_tests as _rational_tests +import numpy._core._umath_tests as umt +import numpy._core.umath as ncu +import numpy.linalg._umath_linalg as uml from numpy.exceptions import AxisError from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, - assert_almost_equal, assert_array_almost_equal, assert_no_warnings, - assert_allclose, HAS_REFCOUNT, suppress_warnings, IS_WASM, IS_PYPY, - ) + HAS_REFCOUNT, + IS_PYPY, + IS_WASM, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, + suppress_warnings, +) from numpy.testing._private.utils import requires_memory - UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() if isinstance(obj, np.ufunc)] UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] @@ -484,8 +492,8 @@ def test_signature_dtype_type(self): np.add(3, 4, signature=(float_dtype, float_dtype, None)) @pytest.mark.parametrize("get_kwarg", [ - lambda dt: {"dtype": dt}, - lambda dt: {"signature": (dt, None, None)}]) + param(lambda dt: {"dtype": dt}, id="dtype"), + param(lambda dt: {"signature": (dt, None, None)}, id="signature")]) def test_signature_dtype_instances_allowed(self, get_kwarg): # We allow certain dtype instances when there is a clear singleton # and the given one is equivalent; mainly for backcompat. @@ -495,13 +503,9 @@ def test_signature_dtype_instances_allowed(self, get_kwarg): assert int64 is not int64_2 assert np.add(1, 2, **get_kwarg(int64_2)).dtype == int64 - td = np.timedelta(2, "s") + td = np.timedelta64(2, "s") assert np.add(td, td, **get_kwarg("m8")).dtype == "m8[s]" - @pytest.mark.parametrize("get_kwarg", [ - param(lambda x: {"dtype": x}, id="dtype"), - param(lambda x: {"signature": (x, None, None)}, id="signature")]) - def test_signature_dtype_instances_allowed(self, get_kwarg): msg = "The `dtype` and `signature` arguments to ufuncs" with pytest.raises(TypeError, match=msg): @@ -1476,7 +1480,7 @@ def slice_n(n): return ret def broadcastable(s1, s2): - return s1 == s2 or s1 == 1 or s2 == 1 + return s1 == s2 or 1 in {s1, s2} permute_3 = permute_n(3) slice_3 = slice_n(3) + ((slice(None, None, -1),) * 3,) diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 22ad1b8ac302..001a7bffbcc8 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1,24 +1,38 @@ -import platform -import warnings import fnmatch import itertools -import pytest -import sys import operator +import platform +import sys +import warnings +from collections import namedtuple from fractions import Fraction from functools import reduce -from collections import namedtuple -import numpy._core.umath as ncu -from numpy._core import _umath_tests as ncu_tests, sctypes +import pytest + import numpy as np +import numpy._core.umath as ncu +from numpy._core import _umath_tests as ncu_tests +from numpy._core import sctypes from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_raises_regex, - assert_array_equal, assert_almost_equal, assert_array_almost_equal, - assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings, - _gen_alignment_data, assert_array_almost_equal_nulp, IS_WASM, IS_MUSL, - IS_PYPY, HAS_REFCOUNT - ) + HAS_REFCOUNT, + IS_MUSL, + IS_PYPY, + IS_WASM, + _gen_alignment_data, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_no_warnings, + assert_raises, + assert_raises_regex, + suppress_warnings, +) from numpy.testing._private.utils import _glibc_older_than UFUNCS = [obj for obj in np._core.umath.__dict__.values() @@ -1865,8 +1879,15 @@ def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): # FIXME: NAN raises FP invalid exception: # - ceil/float16 on MSVC:32-bit # - spacing/float16 on almost all platforms + # - spacing/float32,float64 on Windows MSVC with VS2022 if ufunc in (np.spacing, np.ceil) and dtype == 'e': return + # Skip spacing tests with NaN on Windows MSVC (all dtypes) + import platform + if (ufunc == np.spacing and + platform.system() == 'Windows' and + any(np.isnan(d) if isinstance(d, (int, float)) else False for d in data)): + pytest.skip("spacing with NaN generates warnings on Windows/VS2022") array = np.array(data, dtype=dtype) with assert_no_warnings(): ufunc(array) diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index a0e0cbccc596..da9419d63a8a 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -1,12 +1,14 @@ -import numpy as np import os -from os import path import sys +from ctypes import POINTER, c_double, c_float, c_int, c_longlong, cast, pointer +from os import path + import pytest -from ctypes import c_longlong, c_double, c_float, c_int, cast, pointer, POINTER + +import numpy as np +from numpy._core._multiarray_umath import __cpu_features__ from numpy.testing import assert_array_max_ulp from numpy.testing._private.utils import _glibc_older_than -from numpy._core._multiarray_umath import __cpu_features__ UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() if isinstance(obj, np.ufunc)] diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index eb221f15f327..8f6f5c682a91 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -1,13 +1,19 @@ -import sys import platform +import sys + import pytest import numpy as np + # import the c-extension module directly since _arg is not exported via umath import numpy._core._multiarray_umath as ncu from numpy.testing import ( - assert_raises, assert_equal, assert_array_equal, assert_almost_equal, assert_array_max_ulp - ) + assert_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, +) # TODO: branch cuts (use Pauli code) # TODO: conj 'symmetry' diff --git a/numpy/_core/tests/test_unicode.py b/numpy/_core/tests/test_unicode.py index 6ca0cd46d781..6a86503a35ae 100644 --- a/numpy/_core/tests/test_unicode.py +++ b/numpy/_core/tests/test_unicode.py @@ -1,6 +1,7 @@ import numpy as np -from numpy.testing import assert_, assert_equal, assert_array_equal +from numpy.testing import assert_, assert_array_equal, assert_equal + def buffer_length(arr): if isinstance(arr, str): @@ -133,7 +134,8 @@ def test_valuesSD(self): def test_valuesMD(self): # Check creation of multi-dimensional objects with values - ua = np.array([[[self.ucs_value * self.ulen] * 2] * 3] * 4, dtype=f'U{self.ulen}') + data = [[[self.ucs_value * self.ulen] * 2] * 3] * 4 + ua = np.array(data, dtype=f'U{self.ulen}') self.content_check(ua, ua[0, 0, 0], 4 * self.ulen * 2 * 3 * 4) self.content_check(ua, ua[-1, -1, -1], 4 * self.ulen * 2 * 3 * 4) diff --git a/numpy/_core/umath.py b/numpy/_core/umath.py index 0ec85c79344e..94f97c059187 100644 --- a/numpy/_core/umath.py +++ b/numpy/_core/umath.py @@ -7,20 +7,40 @@ """ import numpy + from . import _multiarray_umath -from ._multiarray_umath import * # noqa: F403 +from ._multiarray_umath import * + # These imports are needed for backward compatibility, # do not change them. issue gh-11862 # _ones_like is semi-public, on purpose not added to __all__ -from ._multiarray_umath import ( - _UFUNC_API, _add_newdoc_ufunc, _ones_like, _get_extobj_dict, _make_extobj, - _extobj_contextvar) # These imports are needed for the strip & replace implementations from ._multiarray_umath import ( - _replace, _strip_whitespace, _lstrip_whitespace, _rstrip_whitespace, - _strip_chars, _lstrip_chars, _rstrip_chars, _expandtabs_length, - _expandtabs, _center, _ljust, _rjust, _zfill, _partition, _partition_index, - _rpartition, _rpartition_index, _slice) + _UFUNC_API, + _add_newdoc_ufunc, + _center, + _expandtabs, + _expandtabs_length, + _extobj_contextvar, + _get_extobj_dict, + _ljust, + _lstrip_chars, + _lstrip_whitespace, + _make_extobj, + _ones_like, + _partition, + _partition_index, + _replace, + _rjust, + _rpartition, + _rpartition_index, + _rstrip_chars, + _rstrip_whitespace, + _slice, + _strip_chars, + _strip_whitespace, + _zfill, +) __all__ = [ 'absolute', 'add', diff --git a/numpy/_distributor_init.py b/numpy/_distributor_init.py index 25b0eed79fca..f608036a2405 100644 --- a/numpy/_distributor_init.py +++ b/numpy/_distributor_init.py @@ -10,6 +10,6 @@ """ try: - from . import _distributor_init_local + from . import _distributor_init_local # noqa: F401 except ImportError: pass diff --git a/numpy/_expired_attrs_2_0.py b/numpy/_expired_attrs_2_0.py index f5eb59e5ea17..1397134e3f8c 100644 --- a/numpy/_expired_attrs_2_0.py +++ b/numpy/_expired_attrs_2_0.py @@ -25,7 +25,6 @@ "It's still available as `np.lib.add_docstring`.", "add_newdoc_ufunc": "It's an internal function and doesn't have a replacement.", - "compat": "There's no replacement, as Python 2 is no longer supported.", "safe_eval": "Use `ast.literal_eval` instead.", "float_": "Use `np.float64` instead.", "complex_": "Use `np.complex128` instead.", diff --git a/numpy/_expired_attrs_2_0.pyi b/numpy/_expired_attrs_2_0.pyi index 05c630c9b767..14524689c1c5 100644 --- a/numpy/_expired_attrs_2_0.pyi +++ b/numpy/_expired_attrs_2_0.pyi @@ -18,7 +18,6 @@ class _ExpiredAttributesType(TypedDict): add_newdoc: str add_docstring: str add_newdoc_ufunc: str - compat: str safe_eval: str float_: str complex_: str diff --git a/numpy/_pyinstaller/hook-numpy.py b/numpy/_pyinstaller/hook-numpy.py index 84f3626b43d5..61c224b33810 100644 --- a/numpy/_pyinstaller/hook-numpy.py +++ b/numpy/_pyinstaller/hook-numpy.py @@ -5,8 +5,8 @@ https://pyinstaller.readthedocs.io/en/stable/hooks.html """ -from PyInstaller.compat import is_conda, is_pure_conda -from PyInstaller.utils.hooks import collect_dynamic_libs, is_module_satisfies +from PyInstaller.compat import is_pure_conda +from PyInstaller.utils.hooks import collect_dynamic_libs # Collect all DLLs inside numpy's installation folder, dump them into built # app's root. diff --git a/numpy/_pyinstaller/hook-numpy.pyi b/numpy/_pyinstaller/hook-numpy.pyi index 2642996dad7e..6da4914d7e5a 100644 --- a/numpy/_pyinstaller/hook-numpy.pyi +++ b/numpy/_pyinstaller/hook-numpy.pyi @@ -1,13 +1,6 @@ from typing import Final -# from `PyInstaller.compat` -is_conda: Final[bool] -is_pure_conda: Final[bool] +binaries: Final[list[tuple[str, str]]] = ... -# from `PyInstaller.utils.hooks` -def is_module_satisfies(requirements: str, version: None = None, version_attr: None = None) -> bool: ... - -binaries: Final[list[tuple[str, str]]] - -hiddenimports: Final[list[str]] -excludedimports: Final[list[str]] +hiddenimports: Final[list[str]] = ... +excludedimports: Final[list[str]] = ... diff --git a/numpy/_pyinstaller/tests/__init__.py b/numpy/_pyinstaller/tests/__init__.py index f7c033bcf503..4ed8fdd53f8c 100644 --- a/numpy/_pyinstaller/tests/__init__.py +++ b/numpy/_pyinstaller/tests/__init__.py @@ -1,6 +1,6 @@ -from numpy.testing import IS_WASM, IS_EDITABLE import pytest +from numpy.testing import IS_EDITABLE, IS_WASM if IS_WASM: pytest.skip( diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index f41d54f36bec..77342e44aea0 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -28,8 +28,8 @@ simplify circular import issues. For the same reason, it contains no numpy imports at module scope, instead importing numpy within function calls. """ -import sys import os +import sys __all__ = ['PytestTester'] @@ -123,9 +123,10 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, True """ - import pytest import warnings + import pytest + module = sys.modules[self.module_name] module_path = os.path.abspath(module.__path__[0]) @@ -141,7 +142,7 @@ def __call__(self, label='fast', verbose=1, extra_argv=None, # Filter out distutils cpu warnings (could be localized to # distutils tests). ASV has problems with top level import, # so fetch module for suppression here. - from numpy.distutils import cpuinfo + from numpy.distutils import cpuinfo # noqa: F401 # Filter out annoying import messages. Want these in both develop and # release mode. diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index a0ed7cd53622..16a7eee66ebd 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -1,150 +1,148 @@ """Private counterpart of ``numpy.typing``.""" -from __future__ import annotations +from ._array_like import ArrayLike as ArrayLike +from ._array_like import NDArray as NDArray +from ._array_like import _ArrayLike as _ArrayLike +from ._array_like import _ArrayLikeAnyString_co as _ArrayLikeAnyString_co +from ._array_like import _ArrayLikeBool_co as _ArrayLikeBool_co +from ._array_like import _ArrayLikeBytes_co as _ArrayLikeBytes_co +from ._array_like import _ArrayLikeComplex128_co as _ArrayLikeComplex128_co +from ._array_like import _ArrayLikeComplex_co as _ArrayLikeComplex_co +from ._array_like import _ArrayLikeDT64_co as _ArrayLikeDT64_co +from ._array_like import _ArrayLikeFloat64_co as _ArrayLikeFloat64_co +from ._array_like import _ArrayLikeFloat_co as _ArrayLikeFloat_co +from ._array_like import _ArrayLikeInt as _ArrayLikeInt +from ._array_like import _ArrayLikeInt_co as _ArrayLikeInt_co +from ._array_like import _ArrayLikeNumber_co as _ArrayLikeNumber_co +from ._array_like import _ArrayLikeObject_co as _ArrayLikeObject_co +from ._array_like import _ArrayLikeStr_co as _ArrayLikeStr_co +from ._array_like import _ArrayLikeString_co as _ArrayLikeString_co +from ._array_like import _ArrayLikeTD64_co as _ArrayLikeTD64_co +from ._array_like import _ArrayLikeUInt_co as _ArrayLikeUInt_co +from ._array_like import _ArrayLikeVoid_co as _ArrayLikeVoid_co +from ._array_like import _FiniteNestedSequence as _FiniteNestedSequence +from ._array_like import _SupportsArray as _SupportsArray +from ._array_like import _SupportsArrayFunc as _SupportsArrayFunc -from ._nested_sequence import ( - _NestedSequence as _NestedSequence, -) +# +from ._char_codes import _BoolCodes as _BoolCodes +from ._char_codes import _ByteCodes as _ByteCodes +from ._char_codes import _BytesCodes as _BytesCodes +from ._char_codes import _CDoubleCodes as _CDoubleCodes +from ._char_codes import _CharacterCodes as _CharacterCodes +from ._char_codes import _CLongDoubleCodes as _CLongDoubleCodes +from ._char_codes import _Complex64Codes as _Complex64Codes +from ._char_codes import _Complex128Codes as _Complex128Codes +from ._char_codes import _ComplexFloatingCodes as _ComplexFloatingCodes +from ._char_codes import _CSingleCodes as _CSingleCodes +from ._char_codes import _DoubleCodes as _DoubleCodes +from ._char_codes import _DT64Codes as _DT64Codes +from ._char_codes import _FlexibleCodes as _FlexibleCodes +from ._char_codes import _Float16Codes as _Float16Codes +from ._char_codes import _Float32Codes as _Float32Codes +from ._char_codes import _Float64Codes as _Float64Codes +from ._char_codes import _FloatingCodes as _FloatingCodes +from ._char_codes import _GenericCodes as _GenericCodes +from ._char_codes import _HalfCodes as _HalfCodes +from ._char_codes import _InexactCodes as _InexactCodes +from ._char_codes import _Int8Codes as _Int8Codes +from ._char_codes import _Int16Codes as _Int16Codes +from ._char_codes import _Int32Codes as _Int32Codes +from ._char_codes import _Int64Codes as _Int64Codes +from ._char_codes import _IntCCodes as _IntCCodes +from ._char_codes import _IntCodes as _IntCodes +from ._char_codes import _IntegerCodes as _IntegerCodes +from ._char_codes import _IntPCodes as _IntPCodes +from ._char_codes import _LongCodes as _LongCodes +from ._char_codes import _LongDoubleCodes as _LongDoubleCodes +from ._char_codes import _LongLongCodes as _LongLongCodes +from ._char_codes import _NumberCodes as _NumberCodes +from ._char_codes import _ObjectCodes as _ObjectCodes +from ._char_codes import _ShortCodes as _ShortCodes +from ._char_codes import _SignedIntegerCodes as _SignedIntegerCodes +from ._char_codes import _SingleCodes as _SingleCodes +from ._char_codes import _StrCodes as _StrCodes +from ._char_codes import _StringCodes as _StringCodes +from ._char_codes import _TD64Codes as _TD64Codes +from ._char_codes import _UByteCodes as _UByteCodes +from ._char_codes import _UInt8Codes as _UInt8Codes +from ._char_codes import _UInt16Codes as _UInt16Codes +from ._char_codes import _UInt32Codes as _UInt32Codes +from ._char_codes import _UInt64Codes as _UInt64Codes +from ._char_codes import _UIntCCodes as _UIntCCodes +from ._char_codes import _UIntCodes as _UIntCodes +from ._char_codes import _UIntPCodes as _UIntPCodes +from ._char_codes import _ULongCodes as _ULongCodes +from ._char_codes import _ULongLongCodes as _ULongLongCodes +from ._char_codes import _UnsignedIntegerCodes as _UnsignedIntegerCodes +from ._char_codes import _UShortCodes as _UShortCodes +from ._char_codes import _VoidCodes as _VoidCodes + +# +from ._dtype_like import DTypeLike as DTypeLike +from ._dtype_like import _DTypeLike as _DTypeLike +from ._dtype_like import _DTypeLikeBool as _DTypeLikeBool +from ._dtype_like import _DTypeLikeBytes as _DTypeLikeBytes +from ._dtype_like import _DTypeLikeComplex as _DTypeLikeComplex +from ._dtype_like import _DTypeLikeComplex_co as _DTypeLikeComplex_co +from ._dtype_like import _DTypeLikeDT64 as _DTypeLikeDT64 +from ._dtype_like import _DTypeLikeFloat as _DTypeLikeFloat +from ._dtype_like import _DTypeLikeInt as _DTypeLikeInt +from ._dtype_like import _DTypeLikeObject as _DTypeLikeObject +from ._dtype_like import _DTypeLikeStr as _DTypeLikeStr +from ._dtype_like import _DTypeLikeTD64 as _DTypeLikeTD64 +from ._dtype_like import _DTypeLikeUInt as _DTypeLikeUInt +from ._dtype_like import _DTypeLikeVoid as _DTypeLikeVoid +from ._dtype_like import _SupportsDType as _SupportsDType +from ._dtype_like import _VoidDTypeLike as _VoidDTypeLike + +# +from ._nbit import _NBitByte as _NBitByte +from ._nbit import _NBitDouble as _NBitDouble +from ._nbit import _NBitHalf as _NBitHalf +from ._nbit import _NBitInt as _NBitInt +from ._nbit import _NBitIntC as _NBitIntC +from ._nbit import _NBitIntP as _NBitIntP +from ._nbit import _NBitLong as _NBitLong +from ._nbit import _NBitLongDouble as _NBitLongDouble +from ._nbit import _NBitLongLong as _NBitLongLong +from ._nbit import _NBitShort as _NBitShort +from ._nbit import _NBitSingle as _NBitSingle + +# from ._nbit_base import ( - NBitBase as NBitBase, # pyright: ignore[reportDeprecated] - _8Bit as _8Bit, - _16Bit as _16Bit, - _32Bit as _32Bit, - _64Bit as _64Bit, - _96Bit as _96Bit, - _128Bit as _128Bit, -) -from ._nbit import ( - _NBitByte as _NBitByte, - _NBitShort as _NBitShort, - _NBitIntC as _NBitIntC, - _NBitIntP as _NBitIntP, - _NBitInt as _NBitInt, - _NBitLong as _NBitLong, - _NBitLongLong as _NBitLongLong, - _NBitHalf as _NBitHalf, - _NBitSingle as _NBitSingle, - _NBitDouble as _NBitDouble, - _NBitLongDouble as _NBitLongDouble, -) -from ._char_codes import ( - _BoolCodes as _BoolCodes, - _UInt8Codes as _UInt8Codes, - _UInt16Codes as _UInt16Codes, - _UInt32Codes as _UInt32Codes, - _UInt64Codes as _UInt64Codes, - _Int8Codes as _Int8Codes, - _Int16Codes as _Int16Codes, - _Int32Codes as _Int32Codes, - _Int64Codes as _Int64Codes, - _Float16Codes as _Float16Codes, - _Float32Codes as _Float32Codes, - _Float64Codes as _Float64Codes, - _Complex64Codes as _Complex64Codes, - _Complex128Codes as _Complex128Codes, - _ByteCodes as _ByteCodes, - _ShortCodes as _ShortCodes, - _IntCCodes as _IntCCodes, - _IntPCodes as _IntPCodes, - _IntCodes as _IntCodes, - _LongCodes as _LongCodes, - _LongLongCodes as _LongLongCodes, - _UByteCodes as _UByteCodes, - _UShortCodes as _UShortCodes, - _UIntCCodes as _UIntCCodes, - _UIntPCodes as _UIntPCodes, - _UIntCodes as _UIntCodes, - _ULongCodes as _ULongCodes, - _ULongLongCodes as _ULongLongCodes, - _HalfCodes as _HalfCodes, - _SingleCodes as _SingleCodes, - _DoubleCodes as _DoubleCodes, - _LongDoubleCodes as _LongDoubleCodes, - _CSingleCodes as _CSingleCodes, - _CDoubleCodes as _CDoubleCodes, - _CLongDoubleCodes as _CLongDoubleCodes, - _DT64Codes as _DT64Codes, - _TD64Codes as _TD64Codes, - _StrCodes as _StrCodes, - _BytesCodes as _BytesCodes, - _VoidCodes as _VoidCodes, - _ObjectCodes as _ObjectCodes, - _StringCodes as _StringCodes, - _UnsignedIntegerCodes as _UnsignedIntegerCodes, - _SignedIntegerCodes as _SignedIntegerCodes, - _IntegerCodes as _IntegerCodes, - _FloatingCodes as _FloatingCodes, - _ComplexFloatingCodes as _ComplexFloatingCodes, - _InexactCodes as _InexactCodes, - _NumberCodes as _NumberCodes, - _CharacterCodes as _CharacterCodes, - _FlexibleCodes as _FlexibleCodes, - _GenericCodes as _GenericCodes, -) -from ._scalars import ( - _CharLike_co as _CharLike_co, - _BoolLike_co as _BoolLike_co, - _UIntLike_co as _UIntLike_co, - _IntLike_co as _IntLike_co, - _FloatLike_co as _FloatLike_co, - _ComplexLike_co as _ComplexLike_co, - _TD64Like_co as _TD64Like_co, - _NumberLike_co as _NumberLike_co, - _ScalarLike_co as _ScalarLike_co, - _VoidLike_co as _VoidLike_co, -) -from ._shape import ( - _Shape as _Shape, - _ShapeLike as _ShapeLike, -) -from ._dtype_like import ( - DTypeLike as DTypeLike, - _DTypeLike as _DTypeLike, - _SupportsDType as _SupportsDType, - _VoidDTypeLike as _VoidDTypeLike, - _DTypeLikeBool as _DTypeLikeBool, - _DTypeLikeUInt as _DTypeLikeUInt, - _DTypeLikeInt as _DTypeLikeInt, - _DTypeLikeFloat as _DTypeLikeFloat, - _DTypeLikeComplex as _DTypeLikeComplex, - _DTypeLikeTD64 as _DTypeLikeTD64, - _DTypeLikeDT64 as _DTypeLikeDT64, - _DTypeLikeObject as _DTypeLikeObject, - _DTypeLikeVoid as _DTypeLikeVoid, - _DTypeLikeStr as _DTypeLikeStr, - _DTypeLikeBytes as _DTypeLikeBytes, - _DTypeLikeComplex_co as _DTypeLikeComplex_co, -) -from ._array_like import ( - NDArray as NDArray, - ArrayLike as ArrayLike, - _ArrayLike as _ArrayLike, - _ArrayLikeInt as _ArrayLikeInt, - _ArrayLikeBool_co as _ArrayLikeBool_co, - _ArrayLikeUInt_co as _ArrayLikeUInt_co, - _ArrayLikeInt_co as _ArrayLikeInt_co, - _ArrayLikeFloat_co as _ArrayLikeFloat_co, - _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, - _ArrayLikeComplex_co as _ArrayLikeComplex_co, - _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, - _ArrayLikeNumber_co as _ArrayLikeNumber_co, - _ArrayLikeTD64_co as _ArrayLikeTD64_co, - _ArrayLikeDT64_co as _ArrayLikeDT64_co, - _ArrayLikeObject_co as _ArrayLikeObject_co, - _ArrayLikeVoid_co as _ArrayLikeVoid_co, - _ArrayLikeStr_co as _ArrayLikeStr_co, - _ArrayLikeBytes_co as _ArrayLikeBytes_co, - _ArrayLikeString_co as _ArrayLikeString_co, - _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, - _FiniteNestedSequence as _FiniteNestedSequence, - _SupportsArray as _SupportsArray, - _SupportsArrayFunc as _SupportsArrayFunc, + NBitBase as NBitBase, # type: ignore[deprecated] # pyright: ignore[reportDeprecated] ) +from ._nbit_base import _8Bit as _8Bit +from ._nbit_base import _16Bit as _16Bit +from ._nbit_base import _32Bit as _32Bit +from ._nbit_base import _64Bit as _64Bit +from ._nbit_base import _96Bit as _96Bit +from ._nbit_base import _128Bit as _128Bit -from ._ufunc import ( - _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, - _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, - _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, - _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, - _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, -) +# +from ._nested_sequence import _NestedSequence as _NestedSequence + +# +from ._scalars import _BoolLike_co as _BoolLike_co +from ._scalars import _CharLike_co as _CharLike_co +from ._scalars import _ComplexLike_co as _ComplexLike_co +from ._scalars import _FloatLike_co as _FloatLike_co +from ._scalars import _IntLike_co as _IntLike_co +from ._scalars import _NumberLike_co as _NumberLike_co +from ._scalars import _ScalarLike_co as _ScalarLike_co +from ._scalars import _TD64Like_co as _TD64Like_co +from ._scalars import _UIntLike_co as _UIntLike_co +from ._scalars import _VoidLike_co as _VoidLike_co + +# +from ._shape import _AnyShape as _AnyShape +from ._shape import _Shape as _Shape +from ._shape import _ShapeLike as _ShapeLike + +# +from ._ufunc import _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1 +from ._ufunc import _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1 +from ._ufunc import _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2 +from ._ufunc import _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1 +from ._ufunc import _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2 diff --git a/numpy/_typing/_add_docstring.py b/numpy/_typing/_add_docstring.py index da415f1b94c6..5330a6b3b715 100644 --- a/numpy/_typing/_add_docstring.py +++ b/numpy/_typing/_add_docstring.py @@ -120,7 +120,7 @@ def _parse_docstrings() -> str: add_newdoc('NDArray', repr(NDArray), """ - A `np.ndarray[tuple[int, ...], np.dtype[+ScalarType]] ` + A `np.ndarray[tuple[Any, ...], np.dtype[ScalarT]] ` type alias :term:`generic ` w.r.t. its `dtype.type `. @@ -137,10 +137,10 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[tuple[int, ...], numpy.dtype[~_ScalarT]] + numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[~_ScalarT]] >>> print(npt.NDArray[np.float64]) - numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.float64]] + numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[numpy.float64]] >>> NDArrayInt = npt.NDArray[np.int_] >>> a: NDArrayInt = np.arange(10) diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index b4c291639d6a..6b071f4a0319 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -1,14 +1,13 @@ -from __future__ import annotations - import sys -from collections.abc import Collection, Callable, Sequence -from typing import Any, Protocol, TypeAlias, TypeVar, runtime_checkable, TYPE_CHECKING +from collections.abc import Callable, Collection, Sequence +from typing import TYPE_CHECKING, Any, Protocol, TypeAlias, TypeVar, runtime_checkable import numpy as np from numpy import dtype + from ._nbit_base import _32Bit, _64Bit from ._nested_sequence import _NestedSequence -from ._shape import _Shape +from ._shape import _AnyShape if TYPE_CHECKING: StringDType = np.dtypes.StringDType @@ -19,11 +18,10 @@ _T = TypeVar("_T") _ScalarT = TypeVar("_ScalarT", bound=np.generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) _DTypeT = TypeVar("_DTypeT", bound=dtype[Any]) _DTypeT_co = TypeVar("_DTypeT_co", covariant=True, bound=dtype[Any]) -NDArray: TypeAlias = np.ndarray[_Shape, dtype[_ScalarT]] +NDArray: TypeAlias = np.ndarray[_AnyShape, dtype[_ScalarT]] # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index dd7ae6fb1131..21df1d983fe6 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -9,39 +9,40 @@ See the `Mypy documentation`_ on protocols for more details. """ from typing import ( + Any, + NoReturn, + Protocol, TypeAlias, TypeVar, final, overload, - Any, - NoReturn, - Protocol, type_check_only, ) import numpy as np from numpy import ( + complex128, + complexfloating, + float64, + floating, generic, - number, - integer, - unsignedinteger, - signedinteger, int8, int_, - floating, - float64, - complexfloating, - complex128, + integer, + number, + signedinteger, + unsignedinteger, ) + +from . import NBitBase +from ._array_like import NDArray from ._nbit import _NBitInt +from ._nested_sequence import _NestedSequence from ._scalars import ( _BoolLike_co, _IntLike_co, _NumberLike_co, ) -from . import NBitBase -from ._array_like import NDArray -from ._nested_sequence import _NestedSequence _T1 = TypeVar("_T1") _T2 = TypeVar("_T2") diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index cd5be8e11864..c406b3098384 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -3,8 +3,8 @@ Any, Protocol, TypeAlias, - TypeVar, TypedDict, + TypeVar, runtime_checkable, ) @@ -12,17 +12,17 @@ from ._char_codes import ( _BoolCodes, - _NumberCodes, - _SignedIntegerCodes, - _UnsignedIntegerCodes, - _FloatingCodes, + _BytesCodes, _ComplexFloatingCodes, _DT64Codes, - _TD64Codes, - _BytesCodes, + _FloatingCodes, + _NumberCodes, + _ObjectCodes, + _SignedIntegerCodes, _StrCodes, + _TD64Codes, + _UnsignedIntegerCodes, _VoidCodes, - _ObjectCodes, ) _ScalarT = TypeVar("_ScalarT", bound=np.generic) diff --git a/numpy/_typing/_extended_precision.py b/numpy/_typing/_extended_precision.py index 73a1847ccbeb..c707e726af7e 100644 --- a/numpy/_typing/_extended_precision.py +++ b/numpy/_typing/_extended_precision.py @@ -6,6 +6,7 @@ """ import numpy as np + from . import _96Bit, _128Bit float96 = np.floating[_96Bit] diff --git a/numpy/_typing/_nbit.py b/numpy/_typing/_nbit.py index 70cfdede8025..60bce3245c7a 100644 --- a/numpy/_typing/_nbit.py +++ b/numpy/_typing/_nbit.py @@ -1,8 +1,8 @@ """A module with the precisions of platform-specific `~numpy.number`s.""" from typing import TypeAlias -from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit +from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit # To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin _NBitByte: TypeAlias = _8Bit diff --git a/numpy/_typing/_nbit_base.py b/numpy/_typing/_nbit_base.py index aa8b85cd1592..28d3e63c1769 100644 --- a/numpy/_typing/_nbit_base.py +++ b/numpy/_typing/_nbit_base.py @@ -1,7 +1,8 @@ """A module with the precisions of generic `~numpy.number` types.""" -from numpy._utils import set_module from typing import final +from numpy._utils import set_module + @final # Disallow the creation of arbitrary `NBitBase` subclasses @set_module("numpy.typing") @@ -29,7 +30,6 @@ class NBitBase: .. code-block:: python - >>> from __future__ import annotations >>> from typing import TypeVar, TYPE_CHECKING >>> import numpy as np >>> import numpy.typing as npt diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi index ccf8f5ceac45..d88c9f4d9fd9 100644 --- a/numpy/_typing/_nbit_base.pyi +++ b/numpy/_typing/_nbit_base.pyi @@ -3,7 +3,6 @@ # mypy: disable-error-code=misc from typing import final - from typing_extensions import deprecated # Deprecated in NumPy 2.3, 2025-05-01 diff --git a/numpy/_typing/_nested_sequence.py b/numpy/_typing/_nested_sequence.py index 23667fd46d89..e3362a9f21fe 100644 --- a/numpy/_typing/_nested_sequence.py +++ b/numpy/_typing/_nested_sequence.py @@ -1,14 +1,6 @@ """A module containing the `_NestedSequence` protocol.""" -from __future__ import annotations - -from typing import ( - Any, - TypeVar, - Protocol, - runtime_checkable, - TYPE_CHECKING, -) +from typing import TYPE_CHECKING, Any, Protocol, TypeVar, runtime_checkable if TYPE_CHECKING: from collections.abc import Iterator @@ -36,8 +28,6 @@ class _NestedSequence(Protocol[_T_co]): -------- .. code-block:: python - >>> from __future__ import annotations - >>> from typing import TYPE_CHECKING >>> import numpy as np >>> from numpy._typing import _NestedSequence @@ -64,7 +54,7 @@ def __len__(self, /) -> int: """Implement ``len(self)``.""" raise NotImplementedError - def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: + def __getitem__(self, index: int, /) -> "_T_co | _NestedSequence[_T_co]": """Implement ``self[x]``.""" raise NotImplementedError @@ -72,11 +62,11 @@ def __contains__(self, x: object, /) -> bool: """Implement ``x in self``.""" raise NotImplementedError - def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + def __iter__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": """Implement ``iter(self)``.""" raise NotImplementedError - def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: + def __reversed__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": """Implement ``reversed(self)``.""" raise NotImplementedError diff --git a/numpy/_typing/_shape.py b/numpy/_typing/_shape.py index 2b854d65153a..e297aef2f554 100644 --- a/numpy/_typing/_shape.py +++ b/numpy/_typing/_shape.py @@ -1,7 +1,8 @@ from collections.abc import Sequence -from typing import SupportsIndex, TypeAlias +from typing import Any, SupportsIndex, TypeAlias _Shape: TypeAlias = tuple[int, ...] +_AnyShape: TypeAlias = tuple[Any, ...] # Anything that can be coerced to a shape tuple _ShapeLike: TypeAlias = SupportsIndex | Sequence[SupportsIndex] diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 766cde1ad420..790149d9c7fb 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -4,8 +4,9 @@ The signatures of the ufuncs are too varied to reasonably type with a single class. So instead, `ufunc` has been expanded into four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. -""" +""" # noqa: PYI021 +from types import EllipsisType from typing import ( Any, Generic, @@ -102,8 +103,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - out: None = ..., + x1: _ScalarLike_co, + /, + out: EllipsisType | None = ..., *, where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., @@ -115,8 +117,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + x1: ArrayLike, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., *, where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., @@ -128,8 +131,9 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _SupportsArrayUFunc, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + x1: _SupportsArrayUFunc, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., *, where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., @@ -146,10 +150,10 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i /, ) -> None: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @@ -176,7 +180,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: None = None, + out: EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -185,9 +189,9 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def __call__( self, x1: ArrayLike, - x2: NDArray[np.generic], + x2: NDArray[Any], /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -195,10 +199,10 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload # (array, array-like) -> array def __call__( self, - x1: NDArray[np.generic], + x1: NDArray[Any], x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -209,7 +213,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]], + out: NDArray[Any] | tuple[NDArray[Any]], *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -220,7 +224,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i x1: ArrayLike, x2: ArrayLike, /, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, *, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], @@ -239,7 +243,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i array: ArrayLike, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., + out: NDArray[Any] | EllipsisType | None = ..., keepdims: bool = ..., initial: Any = ..., where: _ArrayLikeBool_co = ..., @@ -250,7 +254,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., + out: NDArray[Any] | EllipsisType | None = ..., ) -> NDArray[Any]: ... def reduceat( @@ -259,7 +263,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | None = ..., + out: NDArray[Any] | EllipsisType | None = ..., ) -> NDArray[Any]: ... @overload # (scalar, scalar) -> scalar @@ -269,7 +273,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: _ScalarLike_co, /, *, - out: None = None, + out: EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> Any: ... @@ -277,21 +281,21 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def outer( self, A: ArrayLike, - B: NDArray[np.generic], + B: NDArray[Any], /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... @overload # (array, array-like) -> array def outer( self, - A: NDArray[np.generic], + A: NDArray[Any], B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... @@ -302,7 +306,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]], + out: NDArray[Any] | tuple[NDArray[Any]], dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... @@ -313,7 +317,7 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i B: ArrayLike, /, *, - out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = None, dtype: DTypeLike | None = None, **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any] | Any: ... @@ -340,10 +344,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = ..., + /, *, + out: EllipsisType | None = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -354,11 +360,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: ArrayLike, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: ArrayLike, + out1: NDArray[Any] | EllipsisType | None = ..., + out2: NDArray[Any] | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., + out: _2Tuple[NDArray[Any]] | EllipsisType = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -369,11 +376,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _SupportsArrayUFunc, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: _SupportsArrayUFunc, + out1: NDArray[Any] | EllipsisType | None = ..., + out2: NDArray[Any] | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., + out: _2Tuple[NDArray[Any]] | EllipsisType = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -382,11 +390,11 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _3Tuple[str | None] = ..., ) -> _2Tuple[Any]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @@ -410,11 +418,13 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: _ScalarLike_co, - __x2: _ScalarLike_co, - __out1: None = ..., - __out2: None = ..., + x1: _ScalarLike_co, + x2: _ScalarLike_co, + out1: EllipsisType | None = ..., + out2: None = ..., + /, *, + out: EllipsisType | None = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -425,12 +435,13 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - __out1: NDArray[Any] | None = ..., - __out2: NDArray[Any] | None = ..., + x1: ArrayLike, + x2: ArrayLike, + out1: NDArray[Any] | EllipsisType | None = ..., + out2: NDArray[Any] | None = ..., + /, *, - out: _2Tuple[NDArray[Any]] = ..., + out: _2Tuple[NDArray[Any]] | EllipsisType = ..., where: _ArrayLikeBool_co | None = ..., casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -439,11 +450,11 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _4Tuple[str | None] = ..., ) -> _2Tuple[NDArray[Any]]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] @@ -468,9 +479,10 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: None = ..., + x1: ArrayLike, + x2: ArrayLike, + /, + out: EllipsisType | None = ..., *, casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -482,9 +494,10 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @overload def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]], + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType, *, casting: _CastingKind = ..., order: _OrderKACF = ..., @@ -494,11 +507,11 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] axes: list[_2Tuple[SupportsIndex]] = ..., ) -> NDArray[Any]: ... - def at(self, *args, **kwargs) -> NoReturn: ... - def reduce(self, *args, **kwargs) -> NoReturn: ... - def accumulate(self, *args, **kwargs) -> NoReturn: ... - def reduceat(self, *args, **kwargs) -> NoReturn: ... - def outer(self, *args, **kwargs) -> NoReturn: ... + def at(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduce(self, *args: object, **kwargs: object) -> NoReturn: ... + def accumulate(self, *args: object, **kwargs: object) -> NoReturn: ... + def reduceat(self, *args: object, **kwargs: object) -> NoReturn: ... + def outer(self, *args: object, **kwargs: object) -> NoReturn: ... @type_check_only class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): @@ -556,7 +569,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _ScalarLike_co, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co: ... @overload @@ -564,7 +577,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: ArrayLike, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -580,7 +593,7 @@ class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, x1: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], ) -> Any: ... @@ -611,7 +624,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _ScalarLike_co, x2: _ScalarLike_co, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -620,7 +633,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: ArrayLike, x2: ArrayLike, /, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -638,7 +651,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: _SupportsArrayUFunc, x2: _SupportsArrayUFunc | ArrayLike, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @overload @@ -647,7 +660,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno x1: ArrayLike, x2: _SupportsArrayUFunc, /, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @@ -656,11 +669,11 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno @overload def reduce( self, + /, array: ArrayLike, axis: _ShapeLike | None, dtype: DTypeLike, out: _ArrayT, - /, keepdims: bool = ..., initial: _ScalarLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -685,7 +698,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., *, keepdims: Literal[True], initial: _ScalarLike_co = ..., @@ -698,7 +711,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., keepdims: bool = ..., initial: _ScalarLike_co = ..., where: _ArrayLikeBool_co = ..., @@ -707,12 +720,12 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno @overload def reduceat( self, + /, array: ArrayLike, indices: _ArrayLikeInt_co, axis: SupportsIndex, dtype: DTypeLike, out: _ArrayT, - /, ) -> _ArrayT: ... @overload def reduceat( @@ -733,7 +746,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., ) -> NDArray[np.object_]: ... @overload def reduceat( @@ -743,21 +756,22 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno indices: _ArrayLikeInt_co, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., ) -> Any: ... @overload def accumulate( self, + /, array: ArrayLike, axis: SupportsIndex, dtype: DTypeLike, out: _ArrayT, - /, ) -> _ArrayT: ... @overload def accumulate( self, + /, array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., @@ -771,7 +785,7 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno array: ArrayLike, axis: SupportsIndex = ..., dtype: DTypeLike = ..., - out: None = ..., + out: EllipsisType | None = ..., ) -> NDArray[np.object_]: ... @overload @@ -779,8 +793,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: _ScalarLike_co, B: _ScalarLike_co, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co: ... @overload @@ -788,8 +803,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: ArrayLike, B: ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -797,7 +813,8 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: ArrayLike, B: ArrayLike, - /, *, + /, + *, out: _ArrayT, **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> _ArrayT: ... @@ -806,8 +823,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: _SupportsArrayUFunc, B: _SupportsArrayUFunc | ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @overload @@ -815,8 +833,9 @@ class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: igno self, A: _ScalarLike_co, B: _SupportsArrayUFunc | ArrayLike, - /, *, - out: None = ..., + /, + *, + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], ) -> Any: ... @@ -841,7 +860,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _ScalarLike_co, /, *xs: _ScalarLike_co, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> _ReturnType_co: ... @overload @@ -852,7 +871,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: ArrayLike, /, *xs: ArrayLike, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> _ReturnType_co | NDArray[np.object_]: ... @overload @@ -874,7 +893,7 @@ class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # typ x3: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], ) -> Any: ... @@ -903,7 +922,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: _ScalarLike_co, /, *xs: _ScalarLike_co, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> _2PTuple[_ReturnType_co]: ... @overload @@ -912,7 +931,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: ArrayLike, /, *xs: ArrayLike, - out: None = ..., + out: EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... @overload @@ -930,7 +949,7 @@ class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]) x1: _SupportsArrayUFunc | ArrayLike, /, *xs: _SupportsArrayUFunc | ArrayLike, - out: _2PTuple[NDArray[Any]] | None = ..., + out: _2PTuple[NDArray[Any]] | EllipsisType | None = ..., **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], ) -> Any: ... diff --git a/numpy/_utils/__init__.py b/numpy/_utils/__init__.py index 262c00674695..84ee99db1be8 100644 --- a/numpy/_utils/__init__.py +++ b/numpy/_utils/__init__.py @@ -9,9 +9,9 @@ """ import functools -import sys import warnings -from ._convertions import asunicode, asbytes + +from ._convertions import asbytes, asunicode def set_module(module): @@ -29,8 +29,8 @@ def decorator(func): if module is not None: if isinstance(func, type): try: - func._module_file = sys.modules.get(func.__module__).__file__ - except (AttributeError, KeyError): + func._module_source = func.__module__ + except (AttributeError): pass func.__module__ = module diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index f3472df9a554..2ed4e88b3e32 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -1,8 +1,7 @@ +from _typeshed import IdentityFunction from collections.abc import Callable, Iterable from typing import Protocol, TypeVar, overload, type_check_only -from _typeshed import IdentityFunction - from ._convertions import asbytes as asbytes from ._convertions import asunicode as asunicode diff --git a/numpy/_utils/_inspect.pyi b/numpy/_utils/_inspect.pyi index d53c3c40fcf5..40546d2f4497 100644 --- a/numpy/_utils/_inspect.pyi +++ b/numpy/_utils/_inspect.pyi @@ -1,8 +1,7 @@ import types +from _typeshed import SupportsLenAndGetItem from collections.abc import Callable, Mapping from typing import Any, Final, TypeAlias, TypeVar, overload - -from _typeshed import SupportsLenAndGetItem from typing_extensions import TypeIs __all__ = ["formatargspec", "getargspec"] diff --git a/numpy/_utils/_pep440.py b/numpy/_utils/_pep440.py index 93d3053a08c7..035a0695e5ee 100644 --- a/numpy/_utils/_pep440.py +++ b/numpy/_utils/_pep440.py @@ -33,7 +33,6 @@ import itertools import re - __all__ = [ "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN", ] diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi index 29dd4c912aa9..2c338d4e5b14 100644 --- a/numpy/_utils/_pep440.pyi +++ b/numpy/_utils/_pep440.pyi @@ -13,7 +13,6 @@ from typing import ( from typing import ( Literal as L, ) - from typing_extensions import TypeIs __all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] diff --git a/numpy/char/__init__.py b/numpy/char/__init__.py index 9eb66c180f59..d98d38c1d6af 100644 --- a/numpy/char/__init__.py +++ b/numpy/char/__init__.py @@ -1,2 +1,2 @@ -from numpy._core.defchararray import __all__, __doc__ from numpy._core.defchararray import * +from numpy._core.defchararray import __all__, __doc__ diff --git a/numpy/char/__init__.pyi b/numpy/char/__init__.pyi index 2abf86d305f8..e151f20e5f38 100644 --- a/numpy/char/__init__.pyi +++ b/numpy/char/__init__.pyi @@ -1,34 +1,39 @@ from numpy._core.defchararray import ( - equal, - not_equal, - greater_equal, - less_equal, - greater, - less, - str_len, add, - multiply, - mod, + array, + asarray, capitalize, center, + chararray, + compare_chararrays, count, decode, encode, endswith, + equal, expandtabs, find, + greater, + greater_equal, index, isalnum, isalpha, + isdecimal, isdigit, islower, + isnumeric, isspace, istitle, isupper, join, + less, + less_equal, ljust, lower, lstrip, + mod, + multiply, + not_equal, partition, replace, rfind, @@ -40,18 +45,13 @@ from numpy._core.defchararray import ( split, splitlines, startswith, + str_len, strip, swapcase, title, translate, upper, zfill, - isnumeric, - isdecimal, - array, - asarray, - compare_chararrays, - chararray ) __all__ = [ diff --git a/numpy/compat/__init__.py b/numpy/compat/__init__.py deleted file mode 100644 index 8f926c4bd568..000000000000 --- a/numpy/compat/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -Compatibility module. - -This module contains duplicated code from Python itself or 3rd party -extensions, which may be included for the following reasons: - - * compatibility - * we may only need a small subset of the copied library/module - -This module is deprecated since 1.26.0 and will be removed in future versions. - -""" - -import warnings - -from numpy._utils import _inspect -from numpy._utils._inspect import getargspec, formatargspec -from . import py3k -from .py3k import * - -warnings.warn( - "`np.compat`, which was used during the Python 2 to 3 transition," - " is deprecated since 1.26.0, and will be removed", - DeprecationWarning, stacklevel=2 -) - -__all__ = [] -__all__.extend(_inspect.__all__) -__all__.extend(py3k.__all__) diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py deleted file mode 100644 index 5a94b1f209ba..000000000000 --- a/numpy/compat/py3k.py +++ /dev/null @@ -1,144 +0,0 @@ -""" -Python 3.X compatibility tools. - -While this file was originally intended for Python 2 -> 3 transition, -it is now used to create a compatibility layer between different -minor versions of Python 3. - -While the active version of numpy may not support a given version of python, we -allow downstream libraries to continue to use these shims for forward -compatibility with numpy while they transition their code to newer versions of -Python. -""" -__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', - 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', - 'asstr', 'open_latin1', 'long', 'basestring', 'sixu', - 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path', - 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike'] - -import sys -import os -from pathlib import Path -import io -try: - import pickle5 as pickle -except ImportError: - import pickle - -long = int -integer_types = (int,) -basestring = str -unicode = str -bytes = bytes - -def asunicode(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - -def asbytes(s): - if isinstance(s, bytes): - return s - return str(s).encode('latin1') - -def asstr(s): - if isinstance(s, bytes): - return s.decode('latin1') - return str(s) - -def isfileobj(f): - if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)): - return False - try: - # BufferedReader/Writer may raise OSError when - # fetching `fileno()` (e.g. when wrapping BytesIO). - f.fileno() - return True - except OSError: - return False - -def open_latin1(filename, mode='r'): - return open(filename, mode=mode, encoding='iso-8859-1') - -def sixu(s): - return s - - -strchar = 'U' - -def getexception(): - return sys.exc_info()[1] - -def asbytes_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asbytes_nested(y) for y in x] - else: - return asbytes(x) - -def asunicode_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asunicode_nested(y) for y in x] - else: - return asunicode(x) - -def is_pathlib_path(obj): - """ - Check whether obj is a `pathlib.Path` object. - - Prefer using ``isinstance(obj, os.PathLike)`` instead of this function. - """ - return isinstance(obj, Path) - -# from Python 3.7 -class contextlib_nullcontext: - """Context manager that does no additional processing. - - Used as a stand-in for a normal context manager, when a particular - block of code is only sometimes used with a normal context manager: - - cm = optional_cm if condition else nullcontext() - with cm: - # Perform operation, using optional_cm if condition is True - - .. note:: - Prefer using `contextlib.nullcontext` instead of this context manager. - """ - - def __init__(self, enter_result=None): - self.enter_result = enter_result - - def __enter__(self): - return self.enter_result - - def __exit__(self, *excinfo): - pass - - -def npy_load_module(name, fn, info=None): - """ - Load a module. Uses ``load_module`` which will be deprecated in python - 3.12. An alternative that uses ``exec_module`` is in - numpy.distutils.misc_util.exec_mod_from_location - - Parameters - ---------- - name : str - Full module name. - fn : str - Path to module file. - info : tuple, optional - Only here for backward compatibility with Python 2.*. - - Returns - ------- - mod : module - - """ - # Explicitly lazy import this to avoid paying the cost - # of importing importlib at startup - from importlib.machinery import SourceFileLoader - return SourceFileLoader(name, fn).load_module() - - -os_fspath = os.fspath -os_PathLike = os.PathLike diff --git a/numpy/compat/tests/__init__.py b/numpy/compat/tests/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/numpy/conftest.py b/numpy/conftest.py index f323320d9f47..fde4defc926d 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -5,16 +5,16 @@ import string import sys import tempfile -from contextlib import contextmanager import warnings +from contextlib import contextmanager import hypothesis import pytest + import numpy import numpy as np - from numpy._core._multiarray_tests import get_fpu_mode -from numpy._core.tests._natype import pd_NA, get_stringdtype_dtype +from numpy._core.tests._natype import get_stringdtype_dtype, pd_NA from numpy.testing._private.utils import NOGIL_BUILD try: @@ -166,7 +166,6 @@ def warnings_errors_and_rng(test=None): "msvccompiler", "Deprecated call", "numpy.core", - "`np.compat`", "Importing from numpy.matlib", "This function is deprecated.", # random_integers "Data type alias 'a'", # numpy.rec.fromfile @@ -229,7 +228,6 @@ def warnings_errors_and_rng(test=None): 'numpy/_core/cversions.py', 'numpy/_pyinstaller', 'numpy/random/_examples', - 'numpy/compat', 'numpy/f2py/_backends/_distutils.py', ] diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py index e7d3c678b429..cfd96ede6895 100644 --- a/numpy/core/__init__.py +++ b/numpy/core/__init__.py @@ -4,6 +4,7 @@ `numpy.core` will be removed in the future. """ from numpy import _core + from ._utils import _raise_warning @@ -21,7 +22,7 @@ def _ufunc_reconstruct(module, name): # force lazy-loading of submodules to ensure a warning is printed -__all__ = ["arrayprint", "defchararray", "_dtype_ctypes", "_dtype", +__all__ = ["arrayprint", "defchararray", "_dtype_ctypes", "_dtype", # noqa: F822 "einsumfunc", "fromnumeric", "function_base", "getlimits", "_internal", "multiarray", "_multiarray_umath", "numeric", "numerictypes", "overrides", "records", "shape_base", "umath"] diff --git a/numpy/core/_dtype.py b/numpy/core/_dtype.py index 613a1d259a15..5446079097bc 100644 --- a/numpy/core/_dtype.py +++ b/numpy/core/_dtype.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import _dtype + from ._utils import _raise_warning ret = getattr(_dtype, attr_name, None) if ret is None: diff --git a/numpy/core/_dtype_ctypes.py b/numpy/core/_dtype_ctypes.py index 0dadd7949ecb..10cfba25ec6a 100644 --- a/numpy/core/_dtype_ctypes.py +++ b/numpy/core/_dtype_ctypes.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import _dtype_ctypes + from ._utils import _raise_warning ret = getattr(_dtype_ctypes, attr_name, None) if ret is None: diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index 7755c7c35505..63a6ccc75ef7 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -1,5 +1,6 @@ from numpy._core import _internal + # Build a new array from the information in a pickle. # Note that the name numpy.core._internal._reconstruct is embedded in # pickles of ndarrays made with NumPy before release 1.0 @@ -16,6 +17,7 @@ def _reconstruct(subtype, shape, dtype): def __getattr__(attr_name): from numpy._core import _internal + from ._utils import _raise_warning ret = getattr(_internal, attr_name, None) if ret is None: diff --git a/numpy/core/_multiarray_umath.py b/numpy/core/_multiarray_umath.py index 04cc88229aac..c1e6b4e8c932 100644 --- a/numpy/core/_multiarray_umath.py +++ b/numpy/core/_multiarray_umath.py @@ -1,5 +1,5 @@ -from numpy._core import _multiarray_umath from numpy import ufunc +from numpy._core import _multiarray_umath for item in _multiarray_umath.__dir__(): # ufuncs appear in pickles with a path in numpy.core._multiarray_umath @@ -11,13 +11,15 @@ def __getattr__(attr_name): from numpy._core import _multiarray_umath + from ._utils import _raise_warning if attr_name in {"_ARRAY_API", "_UFUNC_API"}: - from numpy.version import short_version + import sys import textwrap import traceback - import sys + + from numpy.version import short_version msg = textwrap.dedent(f""" A module that was compiled using NumPy 1.x cannot be run in diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 4e746546acf0..8be5c5c7cf77 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import arrayprint + from ._utils import _raise_warning ret = getattr(arrayprint, attr_name, None) if ret is None: diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py index ffab82acff5b..1c8706875e1c 100644 --- a/numpy/core/defchararray.py +++ b/numpy/core/defchararray.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import defchararray + from ._utils import _raise_warning ret = getattr(defchararray, attr_name, None) if ret is None: diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py index 74aa410ff4b5..fe5aa399fd17 100644 --- a/numpy/core/einsumfunc.py +++ b/numpy/core/einsumfunc.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import einsumfunc + from ._utils import _raise_warning ret = getattr(einsumfunc, attr_name, None) if ret is None: diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 1ea11d799d6f..fae7a0399f10 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import fromnumeric + from ._utils import _raise_warning ret = getattr(fromnumeric, attr_name, None) if ret is None: diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py index 20e098b6fe44..e15c9714167c 100644 --- a/numpy/core/function_base.py +++ b/numpy/core/function_base.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import function_base + from ._utils import _raise_warning ret = getattr(function_base, attr_name, None) if ret is None: diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py index faa084ae7770..dc009cbd961a 100644 --- a/numpy/core/getlimits.py +++ b/numpy/core/getlimits.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import getlimits + from ._utils import _raise_warning ret = getattr(getlimits, attr_name, None) if ret is None: diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py index 0290c852a8ab..b226709426fc 100644 --- a/numpy/core/multiarray.py +++ b/numpy/core/multiarray.py @@ -12,6 +12,7 @@ def __getattr__(attr_name): from numpy._core import multiarray + from ._utils import _raise_warning ret = getattr(multiarray, attr_name, None) if ret is None: diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index af0658d4fb66..ddd70b363acc 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import numeric + from ._utils import _raise_warning sentinel = object() diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index 0e887cbf30ad..cf2ad99f911b 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import numerictypes + from ._utils import _raise_warning ret = getattr(numerictypes, attr_name, None) if ret is None: diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py index 3297999c5b01..17830ed41021 100644 --- a/numpy/core/overrides.py +++ b/numpy/core/overrides.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import overrides + from ._utils import _raise_warning ret = getattr(overrides, attr_name, None) if ret is None: diff --git a/numpy/core/records.py b/numpy/core/records.py index 94c0d26926a0..0cc45037d22d 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import records + from ._utils import _raise_warning ret = getattr(records, attr_name, None) if ret is None: diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 10b8712c8b96..9cffce705908 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import shape_base + from ._utils import _raise_warning ret = getattr(shape_base, attr_name, None) if ret is None: diff --git a/numpy/core/umath.py b/numpy/core/umath.py index 6ef031d7d62a..25a60cc9dc62 100644 --- a/numpy/core/umath.py +++ b/numpy/core/umath.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): from numpy._core import umath + from ._utils import _raise_warning ret = getattr(umath, attr_name, None) if ret is None: diff --git a/numpy/ctypeslib/__init__.py b/numpy/ctypeslib/__init__.py index 5c84f7a35a8b..fd3c773e43bb 100644 --- a/numpy/ctypeslib/__init__.py +++ b/numpy/ctypeslib/__init__.py @@ -1,13 +1,13 @@ from ._ctypeslib import ( __all__, __doc__, + _concrete_ndptr, + _ndptr, as_array, as_ctypes, as_ctypes_type, - ctypes, c_intp, + ctypes, load_library, ndpointer, - _concrete_ndptr, - _ndptr, ) diff --git a/numpy/ctypeslib/__init__.pyi b/numpy/ctypeslib/__init__.pyi index 00207d7670cf..adc51da2696c 100644 --- a/numpy/ctypeslib/__init__.pyi +++ b/numpy/ctypeslib/__init__.pyi @@ -3,13 +3,31 @@ from ctypes import c_int64 as _c_intp from ._ctypeslib import ( __all__ as __all__, +) +from ._ctypeslib import ( __doc__ as __doc__, +) +from ._ctypeslib import ( + _concrete_ndptr as _concrete_ndptr, +) +from ._ctypeslib import ( + _ndptr as _ndptr, +) +from ._ctypeslib import ( as_array as as_array, +) +from ._ctypeslib import ( as_ctypes as as_ctypes, +) +from ._ctypeslib import ( as_ctypes_type as as_ctypes_type, +) +from ._ctypeslib import ( c_intp as c_intp, +) +from ._ctypeslib import ( load_library as load_library, +) +from ._ctypeslib import ( ndpointer as ndpointer, - _concrete_ndptr as _concrete_ndptr, - _ndptr as _ndptr, ) diff --git a/numpy/ctypeslib/_ctypeslib.py b/numpy/ctypeslib/_ctypeslib.py index 40b9e58b5912..9255603cd5d0 100644 --- a/numpy/ctypeslib/_ctypeslib.py +++ b/numpy/ctypeslib/_ctypeslib.py @@ -53,6 +53,7 @@ 'as_ctypes_type'] import os + import numpy as np import numpy._core.multiarray as mu from numpy._utils import set_module @@ -409,7 +410,7 @@ def _ctype_from_dtype_structured(dtype): # ctypes doesn't care about field order field_data = sorted(field_data, key=lambda f: f[0]) - if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data): + if len(field_data) > 1 and all(offset == 0 for offset, _, _ in field_data): # union, if multiple fields all at address 0 size = 0 _fields_ = [] diff --git a/numpy/ctypeslib/_ctypeslib.pyi b/numpy/ctypeslib/_ctypeslib.pyi index 3fe1f7927961..aecb3899bdf5 100644 --- a/numpy/ctypeslib/_ctypeslib.pyi +++ b/numpy/ctypeslib/_ctypeslib.pyi @@ -1,69 +1,63 @@ # NOTE: Numpy's mypy plugin is used for importing the correct # platform-specific `ctypes._SimpleCData[int]` sub-type import ctypes -from ctypes import c_int64 as _c_intp - from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence +from ctypes import c_int64 as _c_intp from typing import ( - Literal as L, Any, + ClassVar, + Generic, TypeAlias, TypeVar, - Generic, overload, - ClassVar, ) +from typing import Literal as L import numpy as np from numpy import ( - ndarray, + byte, + double, dtype, generic, - byte, - short, intc, long, + longdouble, longlong, + ndarray, + short, + single, ubyte, - ushort, uintc, ulong, ulonglong, - single, - double, - longdouble, + ushort, void, ) from numpy._core._internal import _ctypes from numpy._core.multiarray import flagsobj from numpy._typing import ( - # Arrays + DTypeLike, NDArray, + _AnyShape, _ArrayLike, - - # Shapes - _Shape, - _ShapeLike, - - # DTypes - DTypeLike, - _DTypeLike, - _VoidDTypeLike, _BoolCodes, - _UByteCodes, - _UShortCodes, - _UIntCCodes, - _ULongCodes, - _ULongLongCodes, _ByteCodes, - _ShortCodes, + _DoubleCodes, + _DTypeLike, _IntCCodes, _LongCodes, + _LongDoubleCodes, _LongLongCodes, + _ShapeLike, + _ShortCodes, _SingleCodes, - _DoubleCodes, - _LongDoubleCodes, + _UByteCodes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, + _UShortCodes, + _VoidDTypeLike, ) __all__ = ["load_library", "ndpointer", "c_intp", "as_ctypes", "as_array", "as_ctypes_type"] @@ -100,9 +94,9 @@ class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]): class _concrete_ndptr(_ndptr[_DTypeT]): _dtype_: ClassVar[_DTypeT] - _shape_: ClassVar[tuple[int, ...]] + _shape_: ClassVar[_AnyShape] @property - def contents(self) -> ndarray[_Shape, _DTypeT]: ... + def contents(self) -> ndarray[_AnyShape, _DTypeT]: ... def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ... diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 07f889406353..f76b08fc28dc 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -1,7 +1,17 @@ # ruff: noqa: ANN401 -from typing import Any, Generic, LiteralString, Never, NoReturn, Self, TypeAlias, final, overload, type_check_only +from typing import ( + Any, + Generic, + LiteralString, + Never, + NoReturn, + Self, + TypeAlias, + final, + overload, + type_check_only, +) from typing import Literal as L - from typing_extensions import TypeVar import numpy as np diff --git a/numpy/exceptions.py b/numpy/exceptions.py index 082027841520..0e8688ae9eba 100644 --- a/numpy/exceptions.py +++ b/numpy/exceptions.py @@ -243,5 +243,5 @@ class DTypePromotionError(TypeError): DTypePromotionError: field names `('field1', 'field2')` and `('field1',)` mismatch. - """ + """ # noqa: E501 pass diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index a7ce9f2b080b..e34dd99aec1c 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -9,14 +9,14 @@ """ __all__ = ['run_main', 'get_include'] -import sys -import subprocess import os +import subprocess +import sys import warnings from numpy.exceptions import VisibleDeprecationWarning -from . import f2py2e -from . import diagnose + +from . import diagnose, f2py2e run_main = f2py2e.run_main main = f2py2e.main diff --git a/numpy/f2py/__version__.py b/numpy/f2py/__version__.py index e20d7c1dbb38..8d12d955a2f2 100644 --- a/numpy/f2py/__version__.py +++ b/numpy/f2py/__version__.py @@ -1 +1 @@ -from numpy.version import version +from numpy.version import version # noqa: F401 diff --git a/numpy/f2py/_backends/_backend.py b/numpy/f2py/_backends/_backend.py index a7d43d2587b2..5dda4004375e 100644 --- a/numpy/f2py/_backends/_backend.py +++ b/numpy/f2py/_backends/_backend.py @@ -1,5 +1,3 @@ -from __future__ import annotations - from abc import ABC, abstractmethod diff --git a/numpy/f2py/_backends/_distutils.py b/numpy/f2py/_backends/_distutils.py index aa7680a07ff9..5c8f1092b568 100644 --- a/numpy/f2py/_backends/_distutils.py +++ b/numpy/f2py/_backends/_distutils.py @@ -1,14 +1,15 @@ -from ._backend import Backend - -from numpy.distutils.core import setup, Extension -from numpy.distutils.system_info import get_info -from numpy.distutils.misc_util import dict_append -from numpy.exceptions import VisibleDeprecationWarning import os -import sys import shutil +import sys import warnings +from numpy.distutils.core import Extension, setup +from numpy.distutils.misc_util import dict_append +from numpy.distutils.system_info import get_info +from numpy.exceptions import VisibleDeprecationWarning + +from ._backend import Backend + class DistutilsBackend(Backend): def __init__(sef, *args, **kwargs): diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py index b65b2def4d1f..cbd9b0e32729 100644 --- a/numpy/f2py/_backends/_meson.py +++ b/numpy/f2py/_backends/_meson.py @@ -1,16 +1,14 @@ -from __future__ import annotations - -import os import errno +import os +import re import shutil import subprocess import sys -import re +from itertools import chain from pathlib import Path +from string import Template from ._backend import Backend -from string import Template -from itertools import chain class MesonTemplate: diff --git a/numpy/f2py/_backends/_meson.pyi b/numpy/f2py/_backends/_meson.pyi index b9f959537214..67baf9b76845 100644 --- a/numpy/f2py/_backends/_meson.pyi +++ b/numpy/f2py/_backends/_meson.pyi @@ -2,7 +2,6 @@ from collections.abc import Callable from pathlib import Path from typing import Final from typing import Literal as L - from typing_extensions import override from ._backend import Backend diff --git a/numpy/f2py/_src_pyf.pyi b/numpy/f2py/_src_pyf.pyi index f5aecbf1decd..50ddd07bf638 100644 --- a/numpy/f2py/_src_pyf.pyi +++ b/numpy/f2py/_src_pyf.pyi @@ -1,9 +1,8 @@ import re +from _typeshed import StrOrBytesPath from collections.abc import Mapping from typing import Final -from _typeshed import StrOrBytesPath - routine_start_re: Final[re.Pattern[str]] = ... routine_end_re: Final[re.Pattern[str]] = ... function_start_re: Final[re.Pattern[str]] = ... diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 3c1b4500793b..a5af31d976ec 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -9,13 +9,12 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ import pprint -import sys import re +import sys import types from functools import reduce -from . import __version__ -from . import cfuncs +from . import __version__, cfuncs from .cfuncs import errmess __all__ = [ @@ -43,7 +42,7 @@ 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', - 'process_f2cmap_dict', 'containscommon' + 'process_f2cmap_dict', 'containscommon', 'containsderivedtypes' ] @@ -570,6 +569,20 @@ def containscommon(rout): return 0 +def hasderivedtypes(rout): + return ('block' in rout) and rout['block'] == 'type' + + +def containsderivedtypes(rout): + if hasderivedtypes(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if hasderivedtypes(b): + return 1 + return 0 + + def containsmodule(block): if ismodule(block): return 1 @@ -708,9 +721,8 @@ def getcallprotoargument(rout, cb_map={}): pass elif isstring(var): pass - else: - if not isattr_value(var): - ctype = ctype + '*' + elif not isattr_value(var): + ctype = ctype + '*' if (isstring(var) or isarrayofstrings(var) # obsolete? or isstringarray(var)): @@ -983,11 +995,10 @@ def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose=False): if verbose: outmess(f'\tMapping "{k}(kind={k1})" to "{v1}\"\n') f2cmap_mapped.append(v1) - else: - if verbose: - errmess( - "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" - % (k, k1, v1, v1, list(c2py_map.keys())) - ) + elif verbose: + errmess( + "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" + % (k, k1, v1, v1, list(c2py_map.keys())) + ) return f2cmap_all, f2cmap_mapped diff --git a/numpy/f2py/auxfuncs.pyi b/numpy/f2py/auxfuncs.pyi index 2a0d4e106bcc..dfbae5c7d94d 100644 --- a/numpy/f2py/auxfuncs.pyi +++ b/numpy/f2py/auxfuncs.pyi @@ -1,15 +1,15 @@ +from _typeshed import FileDescriptorOrPath from collections.abc import Callable, Mapping from pprint import pprint as show from typing import Any, Final, Never, TypeAlias, TypeVar, overload from typing import Literal as L -from _typeshed import FileDescriptorOrPath - from .cfuncs import errmess __all__ = [ "applyrules", "containscommon", + "containsderivedtypes", "debugcapi", "dictappend", "errmess", @@ -200,11 +200,13 @@ def isintent_inplace(var: _Var) -> bool: ... def isintent_aux(var: _Var) -> bool: ... # +def containsderivedtypes(rout: _ROut) -> L[0, 1]: ... def containscommon(rout: _ROut) -> _Bool: ... def hasexternals(rout: _ROut) -> bool: ... def hasresultnote(rout: _ROut) -> _Bool: ... def hasbody(rout: _ROut) -> _Bool: ... def hascommon(rout: _ROut) -> bool: ... +def hasderivedtypes(rout: _ROut) -> bool: ... def hascallstatement(rout: _ROut) -> bool: ... def isroutine(rout: _ROut) -> bool: ... def ismodule(rout: _ROut) -> bool: ... diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index 2fa11bce3374..290ac2f467ad 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -7,19 +7,21 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ from . import __version__ + f2py_version = __version__.version import copy -import re import os -from .crackfortran import markoutercomma +import re + from . import cb_rules -from ._isocbind import iso_c_binding_map, isoc_c2pycode_map, iso_c2py_map +from ._isocbind import iso_c2py_map, iso_c_binding_map, isoc_c2pycode_map # The environment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * +from .crackfortran import markoutercomma __all__ = [ 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', @@ -229,9 +231,8 @@ def getctype(var): errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' % (typespec, var['kindselector']['kind'], ctype, typespec, var['kindselector']['kind'], os.getcwd())) - else: - if not isexternal(var): - errmess(f'getctype: No C-type found in "{var}", assuming void.\n') + elif not isexternal(var): + errmess(f'getctype: No C-type found in "{var}", assuming void.\n') return ctype @@ -767,10 +768,9 @@ def cb_routsign2map(rout, um): void #endif """ - else: - if hasnote(rout): - ret['note'] = rout['note'] - rout['note'] = ['See elsewhere.'] + elif hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] nofargs = 0 nofoptargs = 0 if 'args' in rout and 'vars' in rout: diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py index 6fa655d39069..238d473113e0 100644 --- a/numpy/f2py/cb_rules.py +++ b/numpy/f2py/cb_rules.py @@ -8,16 +8,39 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ -from . import __version__ +from . import __version__, cfuncs from .auxfuncs import ( - applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray, - iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c, - isintent_hide, isintent_in, isintent_inout, isintent_nothide, - isintent_out, isoptional, isrequired, isscalar, isstring, - isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace, - stripcomma, throw_error + applyrules, + debugcapi, + dictappend, + errmess, + getargs, + hasnote, + isarray, + iscomplex, + iscomplexarray, + iscomplexfunction, + isfunction, + isintent_c, + isintent_hide, + isintent_in, + isintent_inout, + isintent_nothide, + isintent_out, + isoptional, + isrequired, + isscalar, + isstring, + isstringfunction, + issubroutine, + l_and, + l_not, + l_or, + outmess, + replace, + stripcomma, + throw_error, ) -from . import cfuncs f2py_version = __version__.version @@ -639,5 +662,4 @@ def buildcallback(rout, um): 'argname': rd['argname'] } outmess(f" {ar['docstrshort']}\n") - return ################## Build call-back function ############# diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index e5e1328b33f7..b2b1cad3d867 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -9,8 +9,8 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ -import sys import copy +import sys from . import __version__ @@ -598,32 +598,37 @@ def errmess(s: str) -> None: return ii; }""" cfuncs['forcomb'] = """ -static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache; -static int initforcomb(npy_intp *dims,int nd,int tr) { +struct ForcombCache { int nd;npy_intp *d;int *i,*i_tr,tr; }; +static int initforcomb(struct ForcombCache *cache, npy_intp *dims,int nd,int tr) { int k; if (dims==NULL) return 0; if (nd<0) return 0; - forcombcache.nd = nd; - forcombcache.d = dims; - forcombcache.tr = tr; - if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0; - if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0; + cache->nd = nd; + cache->d = dims; + cache->tr = tr; + + cache->i = (int *)malloc(sizeof(int)*nd); + if (cache->i==NULL) return 0; + cache->i_tr = (int *)malloc(sizeof(int)*nd); + if (cache->i_tr==NULL) {free(cache->i); return 0;}; + for (k=1;ki[k] = cache->i_tr[nd-k-1] = 0; } - forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1; + cache->i[0] = cache->i_tr[nd-1] = -1; return 1; } -static int *nextforcomb(void) { +static int *nextforcomb(struct ForcombCache *cache) { + if (cache==NULL) return NULL; int j,*i,*i_tr,k; - int nd=forcombcache.nd; - if ((i=forcombcache.i) == NULL) return NULL; - if ((i_tr=forcombcache.i_tr) == NULL) return NULL; - if (forcombcache.d == NULL) return NULL; + int nd=cache->nd; + if ((i=cache->i) == NULL) return NULL; + if ((i_tr=cache->i_tr) == NULL) return NULL; + if (cache->d == NULL) return NULL; i[0]++; - if (i[0]==forcombcache.d[0]) { + if (i[0]==cache->d[0]) { j=1; - while ((jd[j]-1)) j++; if (j==nd) { free(i); free(i_tr); @@ -634,7 +639,7 @@ def errmess(s: str) -> None: i_tr[nd-j-1]++; } else i_tr[nd-1]++; - if (forcombcache.tr) return i_tr; + if (cache->tr) return i_tr; return i; }""" needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] @@ -1047,9 +1052,12 @@ def errmess(s: str) -> None: PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) { - (*v) = *((npy_longdouble *)PyArray_DATA(obj)); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr) == NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(arr)); + return 1; + } } } if (double_from_pyobj(&d, obj, errmess)) { @@ -1131,10 +1139,13 @@ def errmess(s: str) -> None: PyArray_ScalarAsCtype(obj, v); return 1; } - else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) { - (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(obj)))); - (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(obj)))); - return 1; + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(arr)))); + return 1; + } } } if (complex_double_from_pyobj(&cd,obj,errmess)) { diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py index 4c8cef7ad832..cef757b6c5a3 100644 --- a/numpy/f2py/common_rules.py +++ b/numpy/f2py/common_rules.py @@ -9,13 +9,11 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ from . import __version__ + f2py_version = __version__.version -from .auxfuncs import ( - hasbody, hascommon, hasnote, isintent_hide, outmess, getuseblocks -) -from . import capi_maps -from . import func2subr +from . import capi_maps, func2subr +from .auxfuncs import getuseblocks, hasbody, hascommon, hasnote, isintent_hide, outmess from .crackfortran import rmbadname diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index ea590722c835..22d804389ad4 100644 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -136,27 +136,27 @@ The above may be solved by creating appropriate preprocessor program, for example. """ -import sys -import string +import codecs +import copy import fileinput -import re import os -import copy import platform -import codecs +import re +import string +import sys from pathlib import Path + try: import charset_normalizer except ImportError: charset_normalizer = None -from . import __version__ +from . import __version__, symbolic # The environment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * -from . import symbolic f2py_version = __version__.version @@ -1124,13 +1124,12 @@ def analyzeline(m, case, line): groupcache[groupcounter]['result'] = result if groupcounter == 1: groupcache[groupcounter]['from'] = currentfilename + elif f77modulename and groupcounter == 3: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], currentfilename) else: - if f77modulename and groupcounter == 3: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], currentfilename) - else: - groupcache[groupcounter]['from'] = '%s:%s' % ( - groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) for k in list(groupcache[groupcounter].keys()): if not groupcache[groupcounter][k]: del groupcache[groupcounter][k] @@ -1250,8 +1249,7 @@ def analyzeline(m, case, line): continue else: k = rmbadname1(m1.group('name')) - if case in ['public', 'private'] and \ - (k == 'operator' or k == 'assignment'): + if case in ['public', 'private'] and k in {'operator', 'assignment'}: k += m1.group('after') if k not in edecl: edecl[k] = {} @@ -1554,10 +1552,9 @@ def analyzeline(m, case, line): appendmultiline(groupcache[gc], previous_context[:2], m.group('this')) - else: - if verbose > 1: - print(m.groupdict()) - outmess('analyzeline: No code implemented for line.\n') + elif verbose > 1: + print(m.groupdict()) + outmess('analyzeline: No code implemented for line.\n') def appendmultiline(group, context_name, ml): @@ -1567,7 +1564,6 @@ def appendmultiline(group, context_name, ml): if context_name not in d: d[context_name] = [] d[context_name].append(ml) - return def cracktypespec0(typespec, ll): @@ -2117,9 +2113,8 @@ def postcrack(block, args=None, tab=''): del interfaced[interfaced.index(e)] break interface['body'].append(edef) - else: - if e in mvars and not isexternal(mvars[e]): - interface['vars'][e] = mvars[e] + elif e in mvars and not isexternal(mvars[e]): + interface['vars'][e] = mvars[e] if interface['vars'] or interface['body']: block['interfaced'] = interfaced mblock = {'block': 'python module', 'body': [ @@ -2187,12 +2182,11 @@ def analyzecommon(block): else: block['vars'][n]['attrspec'] = [ f"dimension({','.join(dims)})"] + elif dims: + block['vars'][n] = { + 'attrspec': [f"dimension({','.join(dims)})"]} else: - if dims: - block['vars'][n] = { - 'attrspec': [f"dimension({','.join(dims)})"]} - else: - block['vars'][n] = {} + block['vars'][n] = {} if n not in commonvars: commonvars.append(n) else: @@ -2440,11 +2434,10 @@ def _selected_real_kind_func(p, r=0, radix=0): if machine.startswith(('aarch64', 'alpha', 'arm64', 'loongarch', 'mips', 'power', 'ppc', 'riscv', 's390x', 'sparc')): if p <= 33: return 16 - else: - if p < 19: - return 10 - elif p <= 33: - return 16 + elif p < 19: + return 10 + elif p <= 33: + return 16 return -1 @@ -3432,15 +3425,14 @@ def vars2fortran(block, vars, args, tab='', as_interface=False): vardef = f"{vardef}*({selector['*']})" else: vardef = f"{vardef}*{selector['*']}" - else: - if 'len' in selector: - vardef = f"{vardef}(len={selector['len']}" - if 'kind' in selector: - vardef = f"{vardef},kind={selector['kind']})" - else: - vardef = f'{vardef})' - elif 'kind' in selector: - vardef = f"{vardef}(kind={selector['kind']})" + elif 'len' in selector: + vardef = f"{vardef}(len={selector['len']}" + if 'kind' in selector: + vardef = f"{vardef},kind={selector['kind']})" + else: + vardef = f'{vardef})' + elif 'kind' in selector: + vardef = f"{vardef}(kind={selector['kind']})" c = ' ' if 'attrspec' in vars[a]: attr = [l for l in vars[a]['attrspec'] @@ -3574,16 +3566,16 @@ def visit(item, parents, result, *args, **kwargs): new_result = [] for index, value in enumerate(obj): new_index, new_item = traverse((index, value), visit, - parents=parents + [parent], - result=result, *args, **kwargs) + parents + [parent], result, + *args, **kwargs) if new_index is not None: new_result.append(new_item) elif isinstance(obj, dict): new_result = {} for key, value in obj.items(): new_key, new_value = traverse((key, value), visit, - parents=parents + [parent], - result=result, *args, **kwargs) + parents + [parent], result, + *args, **kwargs) if new_key is not None: new_result[new_key] = new_value else: diff --git a/numpy/f2py/crackfortran.pyi b/numpy/f2py/crackfortran.pyi index 6b08f8784f01..c5f4fd7585ba 100644 --- a/numpy/f2py/crackfortran.pyi +++ b/numpy/f2py/crackfortran.pyi @@ -1,10 +1,9 @@ import re +from _typeshed import StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Mapping from typing import IO, Any, Concatenate, Final, Never, ParamSpec, TypeAlias, overload from typing import Literal as L -from _typeshed import StrOrBytesPath, StrPath - from .__version__ import version from .auxfuncs import isintent_dict as isintent_dict diff --git a/numpy/f2py/diagnose.pyi b/numpy/f2py/diagnose.pyi index 29cc2b4988b3..b88194ac6bff 100644 --- a/numpy/f2py/diagnose.pyi +++ b/numpy/f2py/diagnose.pyi @@ -1,4 +1 @@ -from _typeshed import StrOrBytesPath - -def run_command(cmd: StrOrBytesPath) -> None: ... def run() -> None: ... diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index 62efa1241e4c..459299f8e127 100644 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -10,23 +10,26 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ -import sys +import argparse import os import pprint import re -import argparse +import sys -from . import crackfortran -from . import rules -from . import cb_rules -from . import auxfuncs -from . import cfuncs -from . import f90mod_rules -from . import __version__ -from . import capi_maps -from .cfuncs import errmess from numpy.f2py._backends import f2py_build_generator +from . import ( + __version__, + auxfuncs, + capi_maps, + cb_rules, + cfuncs, + crackfortran, + f90mod_rules, + rules, +) +from .cfuncs import errmess + f2py_version = __version__.version numpy_version = __version__.version @@ -539,7 +542,7 @@ def __call__(self, parser, namespace, values, option_string=None): include_paths_set = set(getattr(namespace, 'include_paths', []) or []) if option_string == "--include_paths": outmess("Use --include-paths or -I instead of --include_paths which will be removed") - if option_string == "--include-paths" or option_string == "--include_paths": + if option_string in {"--include-paths", "--include_paths"}: include_paths_set.update(values.split(':')) else: include_paths_set.add(values) @@ -679,7 +682,7 @@ def run_compile(): print(f'Unknown vendor: "{s[len(v):]}"') nv = ov i = flib_flags.index(s) - flib_flags[i] = '--fcompiler=' + nv + flib_flags[i] = '--fcompiler=' + nv # noqa: B909 continue for s in del_list: i = flib_flags.index(s) diff --git a/numpy/f2py/f2py2e.pyi b/numpy/f2py/f2py2e.pyi index 2e3d30de2b5c..03aeffc5dcdd 100644 --- a/numpy/f2py/f2py2e.pyi +++ b/numpy/f2py/f2py2e.pyi @@ -2,10 +2,8 @@ import argparse import pprint from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence from types import ModuleType -from typing import Any, Final, TypedDict, type_check_only - +from typing import Any, Final, NotRequired, TypedDict, type_check_only from typing_extensions import TypeVar, override -from typing import NotRequired from .__version__ import version from .auxfuncs import _Bool diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index 305611fa0521..d13a42a9d71f 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -14,14 +14,13 @@ import numpy as np -from . import capi_maps -from . import func2subr -from .crackfortran import undo_rmbadname, undo_rmbadname1 +from . import capi_maps, func2subr # The environment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * +from .crackfortran import undo_rmbadname, undo_rmbadname1 options = {} @@ -121,6 +120,10 @@ def dadd(line, s=doc): if m['name'] in usenames and containscommon(m): outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") continue + # skip modules with derived types + if m['name'] in usenames and containsderivedtypes(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a derived type...\n") + continue if onlyvars: outmess(f"\t\t Variables: {' '.join(onlyvars)}\n") chooks = [''] @@ -199,15 +202,14 @@ def iadd(line, s=ihooks): fhooks[0] = fhooks[0] + wrap fargs.append(f"f2pywrap_{m['name']}_{b['name']}") ifargs.append(func2subr.createfuncwrapper(b, signature=1)) + elif wrap: + fhooks[0] = fhooks[0] + wrap + fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + ifargs.append( + func2subr.createsubrwrapper(b, signature=1)) else: - if wrap: - fhooks[0] = fhooks[0] + wrap - fargs.append(f"f2pywrap_{m['name']}_{b['name']}") - ifargs.append( - func2subr.createsubrwrapper(b, signature=1)) - else: - fargs.append(b['name']) - mfargs.append(fargs[-1]) + fargs.append(b['name']) + mfargs.append(fargs[-1]) api['externroutines'] = [] ar = applyrules(api, vrd) ar['docs'] = [] diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py index 93598259991b..0a875006ed75 100644 --- a/numpy/f2py/func2subr.py +++ b/numpy/f2py/func2subr.py @@ -11,13 +11,23 @@ """ import copy +from ._isocbind import isoc_kindmap from .auxfuncs import ( - getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in, - isintent_out, islogicalfunction, ismoduleroutine, isscalar, - issubroutine, issubroutine_wrap, outmess, show + getfortranname, + isexternal, + isfunction, + isfunction_wrap, + isintent_in, + isintent_out, + islogicalfunction, + ismoduleroutine, + isscalar, + issubroutine, + issubroutine_wrap, + outmess, + show, ) -from ._isocbind import isoc_kindmap def var2fixfortran(vars, a, fa=None, f90mode=None): if fa is None: @@ -47,20 +57,18 @@ def var2fixfortran(vars, a, fa=None, f90mode=None): vardef = f'{vardef}(len=*)' else: vardef = f"{vardef}({lk}={selector['*']})" + elif selector['*'] in ['*', ':']: + vardef = f"{vardef}*({selector['*']})" else: - if selector['*'] in ['*', ':']: - vardef = f"{vardef}*({selector['*']})" - else: - vardef = f"{vardef}*{selector['*']}" - else: - if 'len' in selector: - vardef = f"{vardef}(len={selector['len']}" - if 'kind' in selector: - vardef = f"{vardef},kind={selector['kind']})" - else: - vardef = f'{vardef})' - elif 'kind' in selector: - vardef = f"{vardef}(kind={selector['kind']})" + vardef = f"{vardef}*{selector['*']}" + elif 'len' in selector: + vardef = f"{vardef}(len={selector['len']}" + if 'kind' in selector: + vardef = f"{vardef},kind={selector['kind']})" + else: + vardef = f'{vardef})' + elif 'kind' in selector: + vardef = f"{vardef}(kind={selector['kind']})" vardef = f'{vardef} {fa}' if 'dimension' in vars[a]: diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 9d967a080bf8..667ef287f92b 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -46,42 +46,92 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ +import copy import os import sys import time -import copy from pathlib import Path # __version__.version is now the same as the NumPy version -from . import __version__ - +from . import ( + __version__, + capi_maps, + cfuncs, + common_rules, + f90mod_rules, + func2subr, + use_rules, +) from .auxfuncs import ( - applyrules, debugcapi, dictappend, errmess, gentitle, getargs2, - hascallstatement, hasexternals, hasinitvalue, hasnote, - hasresultnote, isarray, isarrayofstrings, ischaracter, - ischaracterarray, ischaracter_or_characterarray, iscomplex, - iscomplexarray, iscomplexfunction, iscomplexfunction_warn, - isdummyroutine, isexternal, isfunction, isfunction_wrap, isint1, - isint1array, isintent_aux, isintent_c, isintent_callback, - isintent_copy, isintent_hide, isintent_inout, isintent_nothide, - isintent_out, isintent_overwrite, islogical, islong_complex, - islong_double, islong_doublefunction, islong_long, - islong_longfunction, ismoduleroutine, isoptional, isrequired, - isscalar, issigned_long_longarray, isstring, isstringarray, - isstringfunction, issubroutine, isattr_value, - issubroutine_wrap, isthreadsafe, isunsigned, isunsigned_char, - isunsigned_chararray, isunsigned_long_long, - isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray, - l_and, l_not, l_or, outmess, replace, stripcomma, requiresf90wrapper + applyrules, + debugcapi, + dictappend, + errmess, + gentitle, + getargs2, + hascallstatement, + hasexternals, + hasinitvalue, + hasnote, + hasresultnote, + isarray, + isarrayofstrings, + isattr_value, + ischaracter, + ischaracter_or_characterarray, + ischaracterarray, + iscomplex, + iscomplexarray, + iscomplexfunction, + iscomplexfunction_warn, + isdummyroutine, + isexternal, + isfunction, + isfunction_wrap, + isint1, + isint1array, + isintent_aux, + isintent_c, + isintent_callback, + isintent_copy, + isintent_hide, + isintent_inout, + isintent_nothide, + isintent_out, + isintent_overwrite, + islogical, + islong_complex, + islong_double, + islong_doublefunction, + islong_long, + islong_longfunction, + ismoduleroutine, + isoptional, + isrequired, + isscalar, + issigned_long_longarray, + isstring, + isstringarray, + isstringfunction, + issubroutine, + issubroutine_wrap, + isthreadsafe, + isunsigned, + isunsigned_char, + isunsigned_chararray, + isunsigned_long_long, + isunsigned_long_longarray, + isunsigned_short, + isunsigned_shortarray, + l_and, + l_not, + l_or, + outmess, + replace, + requiresf90wrapper, + stripcomma, ) -from . import capi_maps -from . import cfuncs -from . import common_rules -from . import use_rules -from . import f90mod_rules -from . import func2subr - f2py_version = __version__.version numpy_version = __version__.version @@ -1104,7 +1154,7 @@ 'frompyobj': [ ' #setdims#;', ' capi_#varname#_intent |= #intent#;', - (' const char * capi_errmess = "#modulename#.#pyname#:' + (' const char capi_errmess[] = "#modulename#.#pyname#:' ' failed to create array from the #nth# `#varname#`";'), {isintent_hide: ' capi_#varname#_as_array = ndarray_from_pyobj(' @@ -1134,9 +1184,10 @@ """\ int *_i,capi_i=0; CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); - if (initforcomb(PyArray_DIMS(capi_#varname#_as_array), + struct ForcombCache cache; + if (initforcomb(&cache, PyArray_DIMS(capi_#varname#_as_array), PyArray_NDIM(capi_#varname#_as_array),1)) { - while ((_i = nextforcomb())) + while ((_i = nextforcomb(&cache))) #varname#[capi_i++] = #init#; /* fortran way */ } else { PyObject *exc, *val, *tb; diff --git a/numpy/f2py/rules.pyi b/numpy/f2py/rules.pyi index aa91e942698a..58614060ba87 100644 --- a/numpy/f2py/rules.pyi +++ b/numpy/f2py/rules.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable, Iterable, Mapping from typing import Any, Final, TypeAlias from typing import Literal as L - from typing_extensions import TypeVar from .__version__ import version diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 4e2aa370b643..5c2b4bdf0931 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -363,6 +363,8 @@ fortran_getattr(PyFortranObject *fp, char *name) { int i, j, k, flag; if (fp->dict != NULL) { + // python 3.13 added PyDict_GetItemRef +#if PY_VERSION_HEX < 0x030D0000 PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); if (v == NULL && PyErr_Occurred()) { return NULL; @@ -371,6 +373,17 @@ fortran_getattr(PyFortranObject *fp, char *name) Py_INCREF(v); return v; } +#else + PyObject *v; + int result = PyDict_GetItemStringRef(fp->dict, name, &v); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return v; + } +#endif + } for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); i++) diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py index f3012188f58d..11645172fe30 100644 --- a/numpy/f2py/symbolic.py +++ b/numpy/f2py/symbolic.py @@ -310,12 +310,11 @@ def tostring(self, parent_precedence=Precedence.NONE, op = ' + ' if coeff == 1: term = term.tostring(Precedence.SUM, language=language) + elif term == as_number(1): + term = str(coeff) else: - if term == as_number(1): - term = str(coeff) - else: - term = f'{coeff} * ' + term.tostring( - Precedence.PRODUCT, language=language) + term = f'{coeff} * ' + term.tostring( + Precedence.PRODUCT, language=language) if terms: terms.append(op) elif op == ' - ': diff --git a/numpy/f2py/symbolic.pyi b/numpy/f2py/symbolic.pyi index 74e7a48ab327..e7b14f751dc3 100644 --- a/numpy/f2py/symbolic.pyi +++ b/numpy/f2py/symbolic.pyi @@ -2,7 +2,6 @@ from collections.abc import Callable, Mapping from enum import Enum from typing import Any, Generic, ParamSpec, Self, TypeAlias, overload from typing import Literal as L - from typing_extensions import TypeVar __all__ = ["Expr"] diff --git a/numpy/f2py/tests/__init__.py b/numpy/f2py/tests/__init__.py index 5ecb68077b94..4ed8fdd53f8c 100644 --- a/numpy/f2py/tests/__init__.py +++ b/numpy/f2py/tests/__init__.py @@ -1,6 +1,7 @@ -from numpy.testing import IS_WASM, IS_EDITABLE import pytest +from numpy.testing import IS_EDITABLE, IS_WASM + if IS_WASM: pytest.skip( "WASM/Pyodide does not use or support Fortran", diff --git a/numpy/f2py/tests/src/regression/mod_derived_types.f90 b/numpy/f2py/tests/src/regression/mod_derived_types.f90 new file mode 100644 index 000000000000..7692c82cf42e --- /dev/null +++ b/numpy/f2py/tests/src/regression/mod_derived_types.f90 @@ -0,0 +1,23 @@ +module mtypes + implicit none + integer, parameter :: value1 = 100 + type :: master_data + integer :: idat = 200 + end type master_data + type(master_data) :: masterdata +end module mtypes + + +subroutine no_type_subroutine(ain, aout) + use mtypes, only: value1 + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + value1 +end subroutine no_type_subroutine + +subroutine type_subroutine(ain, aout) + use mtypes, only: masterdata + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + masterdata%idat +end subroutine type_subroutine \ No newline at end of file diff --git a/numpy/f2py/tests/test_abstract_interface.py b/numpy/f2py/tests/test_abstract_interface.py index 0bc38b51f95d..21e77db3e8d3 100644 --- a/numpy/f2py/tests/test_abstract_interface.py +++ b/numpy/f2py/tests/test_abstract_interface.py @@ -1,8 +1,10 @@ import pytest -from . import util + from numpy.f2py import crackfortran from numpy.testing import IS_WASM +from . import util + @pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") @pytest.mark.slow diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index 9bdd91f47638..a8f952752cf4 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -1,12 +1,13 @@ -import sys import copy import platform -import pytest +import sys from pathlib import Path -import numpy as np +import pytest +import numpy as np from numpy._core._type_aliases import c_names_dict as _c_names_dict + from . import util wrap = None diff --git a/numpy/f2py/tests/test_assumed_shape.py b/numpy/f2py/tests/test_assumed_shape.py index d4664cf88cbe..cf75644d40ee 100644 --- a/numpy/f2py/tests/test_assumed_shape.py +++ b/numpy/f2py/tests/test_assumed_shape.py @@ -1,7 +1,8 @@ import os -import pytest import tempfile +import pytest + from . import util diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py index 16b5559e8e42..ba255a1b473c 100644 --- a/numpy/f2py/tests/test_block_docstring.py +++ b/numpy/f2py/tests/test_block_docstring.py @@ -1,9 +1,11 @@ import sys + import pytest -from . import util from numpy.testing import IS_PYPY +from . import util + @pytest.mark.slow class TestBlockDocString(util.F2PyTest): diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 332c4cc0d79a..6614efb16db8 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -1,14 +1,16 @@ import math -import textwrap +import platform import sys -import pytest +import textwrap import threading -import traceback import time -import platform +import traceback + +import pytest import numpy as np from numpy.testing import IS_PYPY + from . import util diff --git a/numpy/f2py/tests/test_character.py b/numpy/f2py/tests/test_character.py index 431feaa98bc2..74868a6f09f7 100644 --- a/numpy/f2py/tests/test_character.py +++ b/numpy/f2py/tests/test_character.py @@ -1,8 +1,10 @@ -import pytest import textwrap -from numpy.testing import assert_array_equal, assert_equal, assert_raises + +import pytest + import numpy as np from numpy.f2py.tests import util +from numpy.testing import assert_array_equal, assert_equal, assert_raises @pytest.mark.slow @@ -607,7 +609,7 @@ def test_gh24662(self): a = np.array('hi', dtype='S32') self.module.string_inout_optional(a) assert "output string" in a.tobytes().decode() - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 aa = "Hi" self.module.string_inout_optional(aa) diff --git a/numpy/f2py/tests/test_common.py b/numpy/f2py/tests/test_common.py index 09bd6147f0f3..b9fbd84d52fb 100644 --- a/numpy/f2py/tests/test_common.py +++ b/numpy/f2py/tests/test_common.py @@ -1,7 +1,10 @@ import pytest + import numpy as np + from . import util + @pytest.mark.slow class TestCommonBlock(util.F2PyTest): sources = [util.getpath("tests", "src", "common", "block.f")] diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index 3f096e389d45..c3967cfb967b 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -1,13 +1,16 @@ +import contextlib import importlib +import io +import textwrap import time + import pytest + import numpy as np +from numpy.f2py import crackfortran from numpy.f2py.crackfortran import markinnerspaces, nameargspattern + from . import util -from numpy.f2py import crackfortran -import textwrap -import contextlib -import io class TestNoSpace(util.F2PyTest): diff --git a/numpy/f2py/tests/test_data.py b/numpy/f2py/tests/test_data.py index 0dc48afb5e2c..0cea5561bd6c 100644 --- a/numpy/f2py/tests/test_data.py +++ b/numpy/f2py/tests/test_data.py @@ -1,8 +1,9 @@ import pytest + import numpy as np +from numpy.f2py.crackfortran import crackfortran from . import util -from numpy.f2py.crackfortran import crackfortran class TestData(util.F2PyTest): diff --git a/numpy/f2py/tests/test_docs.py b/numpy/f2py/tests/test_docs.py index 55a37cf161cf..5d9aaac9f15b 100644 --- a/numpy/f2py/tests/test_docs.py +++ b/numpy/f2py/tests/test_docs.py @@ -1,8 +1,12 @@ +from pathlib import Path + import pytest + import numpy as np from numpy.testing import assert_array_equal, assert_equal + from . import util -from pathlib import Path + def get_docdir(): parents = Path(__file__).resolve().parents diff --git a/numpy/f2py/tests/test_f2cmap.py b/numpy/f2py/tests/test_f2cmap.py index 6596ada33a54..a35320ccc18a 100644 --- a/numpy/f2py/tests/test_f2cmap.py +++ b/numpy/f2py/tests/test_f2cmap.py @@ -1,6 +1,8 @@ -from . import util import numpy as np +from . import util + + class TestF2Cmap(util.F2PyTest): sources = [ util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90"), diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 936883ffc1fd..2f91eb77c4bd 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -1,19 +1,19 @@ +import platform import re import shlex import subprocess import sys import textwrap -from pathlib import Path from collections import namedtuple - -import platform +from pathlib import Path import pytest -from . import util from numpy.f2py.f2py2e import main as f2pycli from numpy.testing._private.utils import NOGIL_BUILD +from . import util + ####################### # F2PY Test utilities # ###################### diff --git a/numpy/f2py/tests/test_isoc.py b/numpy/f2py/tests/test_isoc.py index 4d7ce52b1e60..f3450f15fead 100644 --- a/numpy/f2py/tests/test_isoc.py +++ b/numpy/f2py/tests/test_isoc.py @@ -1,8 +1,11 @@ -from . import util -import numpy as np import pytest + +import numpy as np from numpy.testing import assert_allclose +from . import util + + class TestISOC(util.F2PyTest): sources = [ util.getpath("tests", "src", "isocintrin", "isoCtests.f90"), diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py index a8403ca36606..ce223a555456 100644 --- a/numpy/f2py/tests/test_kind.py +++ b/numpy/f2py/tests/test_kind.py @@ -1,11 +1,15 @@ +import platform import sys + import pytest -import platform from numpy.f2py.crackfortran import ( _selected_int_kind_func as selected_int_kind, +) +from numpy.f2py.crackfortran import ( _selected_real_kind_func as selected_real_kind, ) + from . import util diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py index 688c1630fda6..07f43e2bcfaa 100644 --- a/numpy/f2py/tests/test_mixed.py +++ b/numpy/f2py/tests/test_mixed.py @@ -1,7 +1,9 @@ import textwrap + import pytest from numpy.testing import IS_PYPY + from . import util diff --git a/numpy/f2py/tests/test_modules.py b/numpy/f2py/tests/test_modules.py index 436e0c700017..96d5ffc66093 100644 --- a/numpy/f2py/tests/test_modules.py +++ b/numpy/f2py/tests/test_modules.py @@ -1,9 +1,11 @@ -import pytest import textwrap -from . import util +import pytest + from numpy.testing import IS_PYPY +from . import util + @pytest.mark.slow class TestModuleFilterPublicEntities(util.F2PyTest): diff --git a/numpy/f2py/tests/test_pyf_src.py b/numpy/f2py/tests/test_pyf_src.py index f77ded2f31d4..2ecb0fbeb8c8 100644 --- a/numpy/f2py/tests/test_pyf_src.py +++ b/numpy/f2py/tests/test_pyf_src.py @@ -2,7 +2,6 @@ from numpy.f2py._src_pyf import process_str from numpy.testing import assert_equal - pyf_src = """ python module foo <_rd=real,double precision> diff --git a/numpy/f2py/tests/test_quoted_character.py b/numpy/f2py/tests/test_quoted_character.py index 85e83a781e7b..3cbcb3c55b4f 100644 --- a/numpy/f2py/tests/test_quoted_character.py +++ b/numpy/f2py/tests/test_quoted_character.py @@ -2,6 +2,7 @@ """ import sys + import pytest from . import util diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index bf994ffa07a5..93eb29e8e723 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -1,7 +1,8 @@ import os -import pytest import platform +import pytest + import numpy as np import numpy.testing as npt @@ -36,6 +37,16 @@ def test_mdat(self): assert self.module.simple_subroutine(5) == 1014 +class TestModuleWithDerivedType(util.F2PyTest): + # Check that modules with derived types work + sources = [util.getpath("tests", "src", "regression", "mod_derived_types.f90")] + + @pytest.mark.slow + def test_mtypes(self): + assert self.module.no_type_subroutine(10) == 110 + assert self.module.type_subroutine(10) == 210 + + class TestNegativeBounds(util.F2PyTest): # Check that negative bounds work correctly sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py index 37560b8097d7..aae3f0f91671 100644 --- a/numpy/f2py/tests/test_return_character.py +++ b/numpy/f2py/tests/test_return_character.py @@ -1,8 +1,10 @@ +import platform + import pytest from numpy import array + from . import util -import platform IS_S390X = platform.machine() == "s390x" diff --git a/numpy/f2py/tests/test_return_complex.py b/numpy/f2py/tests/test_return_complex.py index 7560ae78f7e1..aa3f28e679f8 100644 --- a/numpy/f2py/tests/test_return_complex.py +++ b/numpy/f2py/tests/test_return_complex.py @@ -1,6 +1,7 @@ import pytest from numpy import array + from . import util diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py index 4c2eaa9fbf0d..13a9f862f311 100644 --- a/numpy/f2py/tests/test_return_integer.py +++ b/numpy/f2py/tests/test_return_integer.py @@ -1,6 +1,7 @@ import pytest from numpy import array + from . import util diff --git a/numpy/f2py/tests/test_return_logical.py b/numpy/f2py/tests/test_return_logical.py index 712ce24da04f..a4a339572366 100644 --- a/numpy/f2py/tests/test_return_logical.py +++ b/numpy/f2py/tests/test_return_logical.py @@ -1,6 +1,7 @@ import pytest from numpy import array + from . import util diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py index e40f2a3bf84d..c871ed3d4fc2 100644 --- a/numpy/f2py/tests/test_return_real.py +++ b/numpy/f2py/tests/test_return_real.py @@ -1,8 +1,10 @@ import platform + import pytest from numpy import array from numpy.testing import IS_64BIT + from . import util diff --git a/numpy/f2py/tests/test_routines.py b/numpy/f2py/tests/test_routines.py index d6ab475d899e..01135dd692a6 100644 --- a/numpy/f2py/tests/test_routines.py +++ b/numpy/f2py/tests/test_routines.py @@ -1,4 +1,5 @@ import pytest + from . import util diff --git a/numpy/f2py/tests/test_semicolon_split.py b/numpy/f2py/tests/test_semicolon_split.py index 8a9eb8743501..2a16b191beba 100644 --- a/numpy/f2py/tests/test_semicolon_split.py +++ b/numpy/f2py/tests/test_semicolon_split.py @@ -1,4 +1,5 @@ import platform + import pytest from numpy.testing import IS_64BIT diff --git a/numpy/f2py/tests/test_size.py b/numpy/f2py/tests/test_size.py index b354711b457f..ac2eaf1413ef 100644 --- a/numpy/f2py/tests/test_size.py +++ b/numpy/f2py/tests/test_size.py @@ -1,4 +1,5 @@ import pytest + import numpy as np from . import util diff --git a/numpy/f2py/tests/test_string.py b/numpy/f2py/tests/test_string.py index 1888f649f543..f484ea3f11a9 100644 --- a/numpy/f2py/tests/test_string.py +++ b/numpy/f2py/tests/test_string.py @@ -1,5 +1,7 @@ import pytest + import numpy as np + from . import util diff --git a/numpy/f2py/tests/test_symbolic.py b/numpy/f2py/tests/test_symbolic.py index 8452783111eb..ec23f522128b 100644 --- a/numpy/f2py/tests/test_symbolic.py +++ b/numpy/f2py/tests/test_symbolic.py @@ -1,34 +1,35 @@ import pytest from numpy.f2py.symbolic import ( - Expr, - Op, ArithOp, + Expr, Language, - as_symbol, - as_number, - as_string, + Op, + as_apply, as_array, as_complex, - as_terms, - as_factors, - eliminate_quotes, - insert_quotes, - fromstring, - as_expr, - as_apply, - as_numer_denom, - as_ternary, - as_ref, as_deref, - normalize, as_eq, - as_ne, - as_lt, + as_expr, + as_factors, + as_ge, as_gt, as_le, - as_ge, + as_lt, + as_ne, + as_number, + as_numer_denom, + as_ref, + as_string, + as_symbol, + as_terms, + as_ternary, + eliminate_quotes, + fromstring, + insert_quotes, + normalize, ) + from . import util diff --git a/numpy/f2py/tests/test_value_attrspec.py b/numpy/f2py/tests/test_value_attrspec.py index 1f3fa676ba8c..1afae08bfe0e 100644 --- a/numpy/f2py/tests/test_value_attrspec.py +++ b/numpy/f2py/tests/test_value_attrspec.py @@ -2,6 +2,7 @@ from . import util + class TestValueAttr(util.F2PyTest): sources = [util.getpath("tests", "src", "value_attrspec", "gh21665.f90")] diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index ab2a1b6f8710..35e5d3bd8ac0 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -6,23 +6,24 @@ - determining paths to tests """ +import atexit +import concurrent.futures +import contextlib import glob import os -import sys +import shutil import subprocess +import sys import tempfile -import shutil -import atexit +from importlib import import_module +from pathlib import Path + import pytest -import contextlib -import numpy -import concurrent.futures -from pathlib import Path +import numpy from numpy._utils import asunicode -from numpy.testing import temppath, IS_WASM -from importlib import import_module from numpy.f2py._backends._meson import MesonBackend +from numpy.testing import IS_WASM, temppath # # Check if compilers are available at all... diff --git a/numpy/f2py/use_rules.py b/numpy/f2py/use_rules.py index 1a53b871a173..1e06f6c01a39 100644 --- a/numpy/f2py/use_rules.py +++ b/numpy/f2py/use_rules.py @@ -13,10 +13,7 @@ f2py_version = 'See `f2py -v`' -from .auxfuncs import ( - applyrules, dictappend, gentitle, hasnote, outmess -) - +from .auxfuncs import applyrules, dictappend, gentitle, hasnote, outmess usemodule_rules = { 'body': """ diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py index aaea3ea2fe54..55f7320f653f 100644 --- a/numpy/fft/__init__.py +++ b/numpy/fft/__init__.py @@ -200,16 +200,16 @@ """ -from . import _pocketfft, _helper # TODO: `numpy.fft.helper`` was deprecated in NumPy 2.0. It should # be deleted once downstream libraries move to `numpy.fft`. -from . import helper -from ._pocketfft import * +from . import _helper, _pocketfft, helper from ._helper import * +from ._pocketfft import * __all__ = _pocketfft.__all__.copy() # noqa: PLE0605 __all__ += _helper.__all__ from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index feac6a7ff8a1..54d0ea8c79b6 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1,24 +1,24 @@ +from ._helper import ( + fftfreq, + fftshift, + ifftshift, + rfftfreq, +) from ._pocketfft import ( fft, - ifft, - rfft, - irfft, + fft2, + fftn, hfft, + ifft, + ifft2, + ifftn, ihfft, - rfftn, + irfft, + irfft2, irfftn, + rfft, rfft2, - irfft2, - fft2, - ifft2, - fftn, - ifftn, -) -from ._helper import ( - fftshift, - ifftshift, - fftfreq, - rfftfreq, + rfftn, ) __all__ = [ diff --git a/numpy/fft/_helper.py b/numpy/fft/_helper.py index 0fdf2aeb40e9..77adeac9207f 100644 --- a/numpy/fft/_helper.py +++ b/numpy/fft/_helper.py @@ -2,7 +2,7 @@ Discrete Fourier Transforms - _helper.py """ -from numpy._core import integer, empty, arange, asarray, roll +from numpy._core import arange, asarray, empty, integer, roll from numpy._core.overrides import array_function_dispatch, set_module # Created by Pearu Peterson, September 2002 diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index de43949f430f..d06bda7ad9a9 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -2,7 +2,14 @@ from typing import Any, Final, TypeVar, overload from typing import Literal as L from numpy import complexfloating, floating, generic, integer -from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ShapeLike +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ShapeLike, +) __all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 8abb7e862dbf..c7f2f6a8bc3a 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -33,12 +33,19 @@ import functools import warnings +from numpy._core import ( + asarray, + conjugate, + empty_like, + overrides, + reciprocal, + result_type, + sqrt, + take, +) from numpy.lib.array_utils import normalize_axis_index -from numpy._core import (asarray, empty_like, result_type, - conjugate, take, sqrt, reciprocal) -from . import _pocketfft_umath as pfu -from numpy._core import overrides +from . import _pocketfft_umath as pfu array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy.fft') @@ -198,8 +205,7 @@ def fft(a, n=None, axis=-1, norm=None, out=None): >>> t = np.arange(256) >>> sp = np.fft.fft(np.sin(t)) >>> freq = np.fft.fftfreq(t.shape[-1]) - >>> plt.plot(freq, sp.real, freq, sp.imag) - [, ] + >>> _ = plt.plot(freq, sp.real, freq, sp.imag) >>> plt.show() """ diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 4f5e5c944b4c..215cf14d1395 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -1,5 +1,6 @@ from collections.abc import Sequence -from typing import Literal as L, TypeAlias +from typing import Literal as L +from typing import TypeAlias from numpy import complex128, float64 from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index 525b5e5a23da..ab8af5aa522e 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -388,41 +388,57 @@ add_gufuncs(PyObject *dictionary) { return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_multiarray_umath", - NULL, - -1, - NULL, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit__pocketfft_umath(void) +static int +_pocketfft_umath_exec(PyObject *m) { - PyObject *m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; /* Import the array and ufunc objects */ - import_array(); - import_ufunc(); + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (PyUFunc_ImportUFuncAPI() < 0) { + return -1; + } PyObject *d = PyModule_GetDict(m); if (add_gufuncs(d) < 0) { Py_DECREF(d); - Py_DECREF(m); - return NULL; + return -1; } -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _pocketfft_umath_slots[] = { + {Py_mod_exec, (void*)_pocketfft_umath_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, /* m_base */ + "_pocketfft_umath", /* m_name */ + NULL, /* m_doc */ + 0, /* m_size */ + NULL, /* m_methods */ + _pocketfft_umath_slots, /* m_slots */ +}; - return m; +PyMODINIT_FUNC PyInit__pocketfft_umath(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py index 4375cedf7fcf..08d5662c6d17 100644 --- a/numpy/fft/helper.py +++ b/numpy/fft/helper.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): import warnings + from numpy.fft import _helper ret = getattr(_helper, attr_name, None) if ret is None: diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi index 887cbe7e27c9..7cf391a12e1d 100644 --- a/numpy/fft/helper.pyi +++ b/numpy/fft/helper.pyi @@ -1,6 +1,5 @@ from typing import Any from typing import Literal as L - from typing_extensions import deprecated import numpy as np diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py index 0255e053c3a6..c02a73639331 100644 --- a/numpy/fft/tests/test_helper.py +++ b/numpy/fft/tests/test_helper.py @@ -4,8 +4,8 @@ """ import numpy as np -from numpy.testing import assert_array_almost_equal from numpy import fft, pi +from numpy.testing import assert_array_almost_equal class TestFFTShift: @@ -84,8 +84,8 @@ def test_uneven_dims(self): assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs) def test_equal_to_original(self): - """ Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """ - from numpy._core import asarray, concatenate, arange, take + """ Test the new (>=v1.15) and old implementations are equal (see #10073) """ + from numpy._core import arange, asarray, concatenate, take def original_fftshift(x, axes=None): """ How fftshift was implemented in v1.14""" diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index 58562d06ee04..021181845b3b 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -1,11 +1,11 @@ -import numpy as np +import queue +import threading + import pytest + +import numpy as np from numpy.random import random -from numpy.testing import ( - assert_array_equal, assert_raises, assert_allclose, IS_WASM - ) -import threading -import queue +from numpy.testing import IS_WASM, assert_allclose, assert_array_equal, assert_raises def fft1(x): diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index 82297f488264..a248d048f0ec 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -10,38 +10,40 @@ # Public submodules # Note: recfunctions is public, but not imported -from . import array_utils -from . import format -from . import introspect -from . import mixins -from . import npyio -from . import scimath -from . import stride_tricks +from numpy._core._multiarray_umath import add_docstring, tracemalloc_domain +from numpy._core.function_base import add_newdoc # Private submodules # load module names. See https://github.com/networkx/networkx/issues/5838 -from . import _type_check_impl -from . import _index_tricks_impl -from . import _nanfunctions_impl -from . import _function_base_impl -from . import _stride_tricks_impl -from . import _shape_base_impl -from . import _twodim_base_impl -from . import _ufunclike_impl -from . import _histograms_impl -from . import _utils_impl -from . import _arraysetops_impl -from . import _polynomial_impl -from . import _npyio_impl -from . import _arrayterator_impl -from . import _arraypad_impl -from . import _version +from . import ( + _arraypad_impl, + _arraysetops_impl, + _arrayterator_impl, + _function_base_impl, + _histograms_impl, + _index_tricks_impl, + _nanfunctions_impl, + _npyio_impl, + _polynomial_impl, + _shape_base_impl, + _stride_tricks_impl, + _twodim_base_impl, + _type_check_impl, + _ufunclike_impl, + _utils_impl, + _version, + array_utils, + format, + introspect, + mixins, + npyio, + scimath, + stride_tricks, +) # numpy.lib namespace members from ._arrayterator_impl import Arrayterator from ._version import NumpyVersion -from numpy._core._multiarray_umath import add_docstring, tracemalloc_domain -from numpy._core.function_base import add_newdoc __all__ = [ "Arrayterator", "add_docstring", "add_newdoc", "array_utils", @@ -52,6 +54,7 @@ add_newdoc.__module__ = "numpy.lib" from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index 19d6ea7a4d3f..6185a494d035 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -1,15 +1,39 @@ -from numpy._core.multiarray import add_docstring, tracemalloc_domain from numpy._core.function_base import add_newdoc +from numpy._core.multiarray import add_docstring, tracemalloc_domain -from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks # noqa: F401 -from ._version import NumpyVersion +# all submodules of `lib` are accessible at runtime through `__getattr__`, +# so we implicitly re-export them here +from . import _array_utils_impl as _array_utils_impl +from . import _arraypad_impl as _arraypad_impl +from . import _arraysetops_impl as _arraysetops_impl +from . import _arrayterator_impl as _arrayterator_impl +from . import _datasource as _datasource +from . import _format_impl as _format_impl +from . import _function_base_impl as _function_base_impl +from . import _histograms_impl as _histograms_impl +from . import _index_tricks_impl as _index_tricks_impl +from . import _iotools as _iotools +from . import _nanfunctions_impl as _nanfunctions_impl +from . import _npyio_impl as _npyio_impl +from . import _polynomial_impl as _polynomial_impl +from . import _scimath_impl as _scimath_impl +from . import _shape_base_impl as _shape_base_impl +from . import _stride_tricks_impl as _stride_tricks_impl +from . import _twodim_base_impl as _twodim_base_impl +from . import _type_check_impl as _type_check_impl +from . import _ufunclike_impl as _ufunclike_impl +from . import _utils_impl as _utils_impl +from . import _version as _version +from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks from ._arrayterator_impl import Arrayterator +from ._version import NumpyVersion __all__ = [ "Arrayterator", "add_docstring", "add_newdoc", "array_utils", + "format", "introspect", "mixins", "NumpyVersion", diff --git a/numpy/lib/_array_utils_impl.py b/numpy/lib/_array_utils_impl.py index d7b239c2308b..c3996e1f2b92 100644 --- a/numpy/lib/_array_utils_impl.py +++ b/numpy/lib/_array_utils_impl.py @@ -2,7 +2,7 @@ Miscellaneous utils. """ from numpy._core import asarray -from numpy._core.numeric import normalize_axis_tuple, normalize_axis_index +from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple from numpy._utils import set_module __all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] diff --git a/numpy/lib/_array_utils_impl.pyi b/numpy/lib/_array_utils_impl.pyi index a70de058bf8e..d3e0714773f2 100644 --- a/numpy/lib/_array_utils_impl.pyi +++ b/numpy/lib/_array_utils_impl.pyi @@ -1,5 +1,5 @@ -from typing import Any from collections.abc import Iterable +from typing import Any from numpy import generic from numpy.typing import NDArray diff --git a/numpy/lib/_arraypad_impl.py b/numpy/lib/_arraypad_impl.py index 3956875bf4f5..507a0ab51b52 100644 --- a/numpy/lib/_arraypad_impl.py +++ b/numpy/lib/_arraypad_impl.py @@ -7,7 +7,6 @@ from numpy._core.overrides import array_function_dispatch from numpy.lib._index_tricks_impl import ndindex - __all__ = ['pad'] diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index d82495c3d934..46b43762b87f 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -1,20 +1,21 @@ from typing import ( - Literal as L, Any, + Protocol, TypeAlias, - overload, TypeVar, - Protocol, + overload, type_check_only, ) +from typing import ( + Literal as L, +) from numpy import generic - from numpy._typing import ( ArrayLike, NDArray, - _ArrayLikeInt, _ArrayLike, + _ArrayLikeInt, ) __all__ = ["pad"] diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index a284a9204112..ef0739ba486f 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -20,9 +20,7 @@ import numpy as np from numpy._core import overrides -from numpy._core._multiarray_umath import _array_converter -from numpy._core._multiarray_umath import _unique_hash - +from numpy._core._multiarray_umath import _array_converter, _unique_hash array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 39fe9b7737ff..4279b809f78e 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -1,10 +1,15 @@ from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload from typing import Literal as L - from typing_extensions import TypeVar, deprecated import numpy as np -from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeNumber_co +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeNumber_co, +) __all__ = [ "ediff1d", diff --git a/numpy/lib/_arrayterator_impl.py b/numpy/lib/_arrayterator_impl.py index 5bb1630a9300..5f7c5fc4fb65 100644 --- a/numpy/lib/_arrayterator_impl.py +++ b/numpy/lib/_arrayterator_impl.py @@ -7,8 +7,8 @@ a user-specified number of elements. """ -from operator import mul from functools import reduce +from operator import mul __all__ = ['Arrayterator'] diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index 060ff37d5c66..5fd589a3ac36 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -3,16 +3,16 @@ from collections.abc import Generator from types import EllipsisType from typing import Any, Final, TypeAlias, overload - from typing_extensions import TypeVar import numpy as np +from numpy._typing import _AnyShape, _Shape __all__ = ["Arrayterator"] -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) _DTypeT = TypeVar("_DTypeT", bound=np.dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) _ScalarT = TypeVar("_ScalarT", bound=np.generic) _AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] @@ -35,8 +35,8 @@ class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): # def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ... - def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[tuple[int, ...], _DTypeT_co]: ... # type: ignore[override] - def __iter__(self) -> Generator[np.ndarray[tuple[int, ...], _DTypeT_co]]: ... + def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[_AnyShape, _DTypeT_co]: ... # type: ignore[override] + def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ... # @overload # type: ignore[override] diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 5dafb0ee3843..72398c5479f8 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -38,7 +38,6 @@ from numpy._utils import set_module - _open = open @@ -462,8 +461,8 @@ def exists(self, path): # We import this here because importing urllib is slow and # a significant fraction of numpy's total import time. - from urllib.request import urlopen from urllib.error import URLError + from urllib.request import urlopen # Test cached url upath = self.abspath(path) diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi index 9f91fdf893a0..ad52b7f67af0 100644 --- a/numpy/lib/_datasource.pyi +++ b/numpy/lib/_datasource.pyi @@ -1,8 +1,7 @@ +from _typeshed import OpenBinaryMode, OpenTextMode from pathlib import Path from typing import IO, Any, TypeAlias -from _typeshed import OpenBinaryMode, OpenTextMode - _Mode: TypeAlias = OpenBinaryMode | OpenTextMode ### diff --git a/numpy/lib/_format_impl.py b/numpy/lib/_format_impl.py index 6ef3f3cec023..7378ba554810 100644 --- a/numpy/lib/_format_impl.py +++ b/numpy/lib/_format_impl.py @@ -167,9 +167,8 @@ import warnings import numpy -from numpy.lib._utils_impl import drop_metadata from numpy._utils import set_module - +from numpy.lib._utils_impl import drop_metadata __all__ = [] @@ -724,7 +723,7 @@ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): pickle_kwargs : dict, optional Additional keyword arguments to pass to pickle.dump, excluding 'protocol'. These are only useful when pickling objects in object - arrays on Python 3 to Python 2 compatible format. + arrays to Python 2 compatible format. Raises ------ @@ -769,14 +768,13 @@ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order='F'): fp.write(chunk.tobytes('C')) + elif isfileobj(fp): + array.tofile(fp) else: - if isfileobj(fp): - array.tofile(fp) - else: - for chunk in numpy.nditer( - array, flags=['external_loop', 'buffered', 'zerosize_ok'], - buffersize=buffersize, order='C'): - fp.write(chunk.tobytes('C')) + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='C'): + fp.write(chunk.tobytes('C')) @set_module("numpy.lib.format") @@ -794,8 +792,7 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, Whether to allow writing pickled data. Default: False pickle_kwargs : dict Additional keyword arguments to pass to pickle.load. These are only - useful when loading object arrays saved on Python 2 when using - Python 3. + useful when loading object arrays saved on Python 2. max_header_size : int, optional Maximum allowed size of the header. Large headers may not be safe to load securely and thus require explicitly passing a larger value. diff --git a/numpy/lib/_format_impl.pyi b/numpy/lib/_format_impl.pyi index e0684adeea7a..b45df02796d7 100644 --- a/numpy/lib/_format_impl.pyi +++ b/numpy/lib/_format_impl.pyi @@ -1,26 +1,56 @@ -from typing import Literal, Final +import os +from _typeshed import SupportsRead, SupportsWrite +from typing import Any, BinaryIO, Final, TypeAlias, TypeGuard -from numpy.lib._utils_impl import drop_metadata +import numpy as np +import numpy.typing as npt +from numpy.lib._utils_impl import drop_metadata as drop_metadata __all__: list[str] = [] -EXPECTED_KEYS: Final[set[str]] -MAGIC_PREFIX: Final[bytes] -MAGIC_LEN: Literal[8] -ARRAY_ALIGN: Literal[64] -BUFFER_SIZE: Literal[262144] # 2**18 -GROWTH_AXIS_MAX_DIGITS: Literal[21] +_DTypeDescr: TypeAlias = list[tuple[str, str]] | list[tuple[str, str, tuple[int, ...]]] -def magic(major, minor): ... -def read_magic(fp): ... -def dtype_to_descr(dtype): ... -def descr_to_dtype(descr): ... -def header_data_from_array_1_0(array): ... -def write_array_header_1_0(fp, d): ... -def write_array_header_2_0(fp, d): ... -def read_array_header_1_0(fp): ... -def read_array_header_2_0(fp): ... -def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ... -def read_array(fp, allow_pickle=..., pickle_kwargs=...): ... -def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ... -def isfileobj(f): ... +### + +EXPECTED_KEYS: Final[set[str]] = ... +MAGIC_PREFIX: Final = b"\x93NUMPY" +MAGIC_LEN: Final = 8 +ARRAY_ALIGN: Final = 64 +BUFFER_SIZE: Final = 262_144 # 1 << 18 +GROWTH_AXIS_MAX_DIGITS: Final = 21 +_MAX_HEADER_SIZE: Final = 10_000 + +def magic(major: int, minor: int) -> bytes: ... +def read_magic(fp: SupportsRead[bytes]) -> tuple[int, int]: ... +def dtype_to_descr(dtype: np.dtype) -> _DTypeDescr: ... +def descr_to_dtype(descr: _DTypeDescr) -> np.dtype: ... +def header_data_from_array_1_0(array: np.ndarray) -> dict[str, Any]: ... +def write_array_header_1_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def write_array_header_2_0(fp: SupportsWrite[bytes], d: dict[str, Any]) -> None: ... +def read_array_header_1_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def read_array_header_2_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> tuple[tuple[int, ...], bool, np.dtype]: ... +def write_array( + fp: SupportsWrite[bytes], + array: np.ndarray, + version: tuple[int, int] | None = None, + allow_pickle: bool = True, + pickle_kwargs: dict[str, Any] | None = None, +) -> None: ... +def read_array( + fp: SupportsRead[bytes], + allow_pickle: bool = False, + pickle_kwargs: dict[str, Any] | None = None, + *, + max_header_size: int = 10_000, +) -> np.ndarray: ... +def open_memmap( + filename: str | os.PathLike[Any], + mode: str = "r+", + dtype: npt.DTypeLike | None = None, + shape: tuple[int, ...] | None = None, + fortran_order: bool = False, + version: tuple[int, int] | None = None, + *, + max_header_size: int = 10_000, +) -> np.memmap: ... +def isfileobj(f: object) -> TypeGuard[BinaryIO]: ... # don't use `typing.TypeIs` diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index e44b27a68adb..096043e6316f 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -7,30 +7,51 @@ import numpy as np import numpy._core.numeric as _nx -from numpy._core import transpose, overrides +from numpy._core import overrides, transpose +from numpy._core._multiarray_umath import _array_converter +from numpy._core.fromnumeric import any, mean, nonzero, partition, ravel, sum +from numpy._core.multiarray import _monotonicity, _place, bincount, normalize_axis_index +from numpy._core.multiarray import interp as compiled_interp +from numpy._core.multiarray import interp_complex as compiled_interp_complex from numpy._core.numeric import ( - ones, zeros_like, arange, concatenate, array, asarray, asanyarray, empty, - ndarray, take, dot, where, intp, integer, isscalar, absolute - ) -from numpy._core.umath import ( - pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, - mod, exp, not_equal, subtract, minimum - ) -from numpy._core.fromnumeric import ( - ravel, nonzero, partition, mean, any, sum - ) + absolute, + arange, + array, + asanyarray, + asarray, + concatenate, + dot, + empty, + integer, + intp, + isscalar, + ndarray, + ones, + take, + where, + zeros_like, +) from numpy._core.numerictypes import typecodes -from numpy.lib._twodim_base_impl import diag -from numpy._core.multiarray import ( - _place, bincount, normalize_axis_index, _monotonicity, - interp as compiled_interp, interp_complex as compiled_interp_complex - ) -from numpy._core._multiarray_umath import _array_converter +from numpy._core.umath import ( + add, + arctan2, + cos, + exp, + frompyfunc, + less_equal, + minimum, + mod, + not_equal, + pi, + sin, + sqrt, + subtract, +) from numpy._utils import set_module # needed in this module for compatibility from numpy.lib._histograms_impl import histogram, histogramdd # noqa: F401 - +from numpy.lib._twodim_base_impl import diag array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -996,7 +1017,8 @@ def gradient(f, *varargs, axis=None, edge_order=1): the corresponding dimension 4. Any combination of N scalars/arrays with the meaning of 2. and 3. - If `axis` is given, the number of varargs must equal the number of axes specified in the axis parameter. + If `axis` is given, the number of varargs must equal the number of axes + specified in the axis parameter. Default: 1. (see Examples below). edge_order : {1, 2}, optional @@ -1292,7 +1314,8 @@ def gradient(f, *varargs, axis=None, edge_order=1): shape[axis] = -1 a.shape = b.shape = c.shape = shape # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] - out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] # Numerical differentiation: 1st order edges if edge_order == 1: @@ -1327,7 +1350,8 @@ def gradient(f, *varargs, axis=None, edge_order=1): b = (dx1 + dx2) / (dx1 * dx2) c = - dx1 / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] - out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] slice1[axis] = -1 slice2[axis] = -3 @@ -1344,7 +1368,8 @@ def gradient(f, *varargs, axis=None, edge_order=1): b = - (dx2 + dx1) / (dx1 * dx2) c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] - out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] outvals.append(out) @@ -2137,7 +2162,6 @@ def disp(mesg, device=None, linefeed=True): else: device.write(f'{mesg}') device.flush() - return # See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html @@ -2549,6 +2573,7 @@ def _get_ufunc_and_otypes(self, func, args): # the subsequent call when the ufunc is evaluated. # Assumes that ufunc first evaluates the 0th elements in the input # arrays (the input values are not checked to ensure this) + args = [asarray(a) for a in args] if builtins.any(arg.size == 0 for arg in args): raise ValueError('cannot call `vectorize` on size 0 inputs ' 'unless `otypes` is set') @@ -2594,8 +2619,9 @@ def _vectorize_call(self, func, args): elif not args: res = func() else: - args = [asanyarray(a, dtype=object) for a in args] ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + # gh-29196: `dtype=object` should eventually be removed + args = [asanyarray(a, dtype=object) for a in args] outputs = ufunc(*args, out=...) if ufunc.nout == 1: @@ -3865,11 +3891,10 @@ def _ureduce(a, func, keepdims=False, **kwargs): if axis is not None: axis = _nx.normalize_axis_tuple(axis, nd) - if keepdims: - if out is not None: - index_out = tuple( - 0 if i in axis else slice(None) for i in range(nd)) - kwargs['out'] = out[(Ellipsis, ) + index_out] + if keepdims and out is not None: + index_out = tuple( + 0 if i in axis else slice(None) for i in range(nd)) + kwargs['out'] = out[(Ellipsis, ) + index_out] if len(axis) == 1: kwargs['axis'] = axis[0] @@ -3882,11 +3907,9 @@ def _ureduce(a, func, keepdims=False, **kwargs): # merge reduced axis a = a.reshape(a.shape[:nkeep] + (-1,)) kwargs['axis'] = -1 - else: - if keepdims: - if out is not None: - index_out = (0, ) * nd - kwargs['out'] = out[(Ellipsis, ) + index_out] + elif keepdims and out is not None: + index_out = (0, ) * nd + kwargs['out'] = out[(Ellipsis, ) + index_out] r = func(a, **kwargs) @@ -4560,9 +4583,8 @@ def _quantile_is_valid(q): for i in range(q.size): if not (0.0 <= q[i] <= 1.0): return False - else: - if not (q.min() >= 0 and q.max() <= 1): - return False + elif not (q.min() >= 0 and q.max() <= 1): + return False return True @@ -4691,14 +4713,14 @@ def _inverted_cdf(n, quantiles): def _quantile_ureduce_func( - a: np.array, - q: np.array, - weights: np.array, - axis: int | None = None, - out=None, - overwrite_input: bool = False, - method="linear", -) -> np.array: + a: np.ndarray, + q: np.ndarray, + weights: np.ndarray, + axis: int | None = None, + out: np.ndarray | None = None, + overwrite_input: bool = False, + method: str = "linear", +) -> np.ndarray: if q.ndim > 2: # The code below works fine for nd, but it might not have useful # semantics. For now, keep the supported dimensions the same as it was @@ -4712,14 +4734,13 @@ def _quantile_ureduce_func( else: arr = a wgt = weights + elif axis is None: + axis = 0 + arr = a.flatten() + wgt = None if weights is None else weights.flatten() else: - if axis is None: - axis = 0 - arr = a.flatten() - wgt = None if weights is None else weights.flatten() - else: - arr = a.copy() - wgt = weights + arr = a.copy() + wgt = weights result = _quantile(arr, quantiles=q, axis=axis, @@ -4765,13 +4786,13 @@ def _get_indexes(arr, virtual_indexes, valid_values_count): def _quantile( - arr: np.array, - quantiles: np.array, - axis: int = -1, - method="linear", - out=None, - weights=None, -): + arr: "np.typing.ArrayLike", + quantiles: np.ndarray, + axis: int = -1, + method: str = "linear", + out: np.ndarray | None = None, + weights: "np.typing.ArrayLike | None" = None, +) -> np.ndarray: """ Private function that doesn't support extended axis or keepdims. These methods are extended to this function using _ureduce diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 0f9ddd92b8fc..cb6e18b53fa4 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,4 +1,5 @@ # ruff: noqa: ANN401 +from _typeshed import Incomplete from collections.abc import Callable, Iterable, Sequence from typing import ( Any, @@ -13,8 +14,6 @@ from typing import ( type_check_only, ) from typing import Literal as L - -from _typeshed import Incomplete from typing_extensions import TypeIs, deprecated import numpy as np @@ -105,7 +104,7 @@ _Pss = ParamSpec("_Pss") _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT1 = TypeVar("_ScalarT1", bound=generic) _ScalarT2 = TypeVar("_ScalarT2", bound=generic) -_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) _2Tuple: TypeAlias = tuple[_T, _T] _MeshgridIdx: TypeAlias = L['ij', 'xy'] diff --git a/numpy/lib/_histograms_impl.py b/numpy/lib/_histograms_impl.py index 19237a17dfdc..b4aacd057eaa 100644 --- a/numpy/lib/_histograms_impl.py +++ b/numpy/lib/_histograms_impl.py @@ -123,8 +123,9 @@ def _hist_bin_stone(x, range): """ Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). - The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution. - The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule. + The number of bins is chosen by minimizing the estimated ISE against the unknown + true distribution. The ISE is estimated using cross-validation and can be regarded + as a generalization of Scott's rule. https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule This paper by Stone appears to be the origination of this rule. @@ -141,7 +142,7 @@ def _hist_bin_stone(x, range): Returns ------- h : An estimate of the optimal bin width for the given data. - """ + """ # noqa: E501 n = x.size ptp_x = _ptp(x) @@ -405,7 +406,8 @@ def _get_bin_edges(a, bins, range, weights): if width: if np.issubdtype(a.dtype, np.integer) and width < 1: width = 1 - n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) + delta = _unsigned_subtract(last_edge, first_edge) + n_equal_bins = int(np.ceil(delta / width)) else: # Width can be zero for some estimators, e.g. FD when # the IQR of the data is zero. diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index e91bf2531366..5e7afb5e397b 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -1,14 +1,16 @@ from collections.abc import Sequence from typing import ( - Literal as L, Any, SupportsIndex, TypeAlias, ) +from typing import ( + Literal as L, +) from numpy._typing import ( - NDArray, ArrayLike, + NDArray, ) __all__ = ["histogram", "histogramdd", "histogram_bin_edges"] diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 7fe0539fa86d..131bbae5d098 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -1,20 +1,18 @@ import functools -import sys import math +import sys import warnings import numpy as np -from numpy._utils import set_module import numpy._core.numeric as _nx -from numpy._core.numeric import ScalarType, array -from numpy._core.numerictypes import issubdtype - import numpy.matrixlib as matrixlib +from numpy._core import linspace, overrides from numpy._core.multiarray import ravel_multi_index, unravel_index -from numpy._core import overrides, linspace -from numpy.lib.stride_tricks import as_strided +from numpy._core.numeric import ScalarType, array +from numpy._core.numerictypes import issubdtype +from numpy._utils import set_module from numpy.lib._function_base_impl import diff - +from numpy.lib.stride_tricks import as_strided array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index ef6e42200deb..c4509d9aa3ad 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,8 +1,7 @@ +from _typeshed import Incomplete from collections.abc import Sequence from typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload from typing import Literal as L - -from _typeshed import Incomplete from typing_extensions import TypeVar, deprecated import numpy as np @@ -10,9 +9,9 @@ from numpy._core.multiarray import ravel_multi_index, unravel_index from numpy._typing import ( ArrayLike, NDArray, + _AnyShape, _FiniteNestedSequence, _NestedSequence, - _Shape, _SupportsArray, _SupportsDType, ) @@ -74,11 +73,11 @@ class ndenumerate(Generic[_ScalarT_co]): def __next__( self: ndenumerate[np.bool | np.number | np.flexible | np.datetime64 | np.timedelta64], /, - ) -> tuple[tuple[int, ...], _ScalarT_co]: ... + ) -> tuple[_AnyShape, _ScalarT_co]: ... @overload - def __next__(self: ndenumerate[np.object_], /) -> tuple[tuple[int, ...], Any]: ... + def __next__(self: ndenumerate[np.object_], /) -> tuple[_AnyShape, Incomplete]: ... @overload - def __next__(self, /) -> tuple[tuple[int, ...], _ScalarT_co]: ... + def __next__(self, /) -> tuple[_AnyShape, _ScalarT_co]: ... # def __iter__(self) -> Self: ... @@ -91,7 +90,7 @@ class ndindex: # def __iter__(self) -> Self: ... - def __next__(self) -> tuple[int, ...]: ... + def __next__(self) -> _AnyShape: ... # @deprecated("Deprecated since 1.20.0.") @@ -101,9 +100,9 @@ class nd_grid(Generic[_BoolT_co]): sparse: _BoolT_co def __init__(self, sparse: _BoolT_co = ...) -> None: ... @overload - def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Any]: ... + def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Incomplete]: ... @overload - def __getitem__(self: nd_grid[L[True]], key: slice | Sequence[slice]) -> tuple[NDArray[Any], ...]: ... + def __getitem__(self: nd_grid[L[True]], key: slice | Sequence[slice]) -> tuple[NDArray[Incomplete], ...]: ... @final class MGridClass(nd_grid[L[False]]): @@ -143,7 +142,7 @@ class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co] def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: _ArrayT) -> _ArrayT: ... @staticmethod @overload - def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: None = None) -> NDArray[Any]: ... + def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: None = None) -> NDArray[Incomplete]: ... @final class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): @@ -164,7 +163,7 @@ class IndexExpression(Generic[_BoolT_co]): def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ... @overload -def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DTypeT]]) -> tuple[np.ndarray[_Shape, _DTypeT], ...]: ... +def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ... @overload def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... @overload diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index 56ee65d38575..3586b41de86c 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -3,17 +3,17 @@ """ __docformat__ = "restructuredtext en" +import itertools + import numpy as np import numpy._core.numeric as nx from numpy._utils import asbytes, asunicode -import itertools def _decode_line(line, encoding=None): """Decode bytes from binary input streams. - Defaults to decoding from 'latin1'. That differs from the behavior of - np.compat.asunicode that decodes from 'ascii'. + Defaults to decoding from 'latin1'. Parameters ---------- diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi index 28349000aeba..82275940e137 100644 --- a/numpy/lib/_iotools.pyi +++ b/numpy/lib/_iotools.pyi @@ -1,13 +1,24 @@ from collections.abc import Callable, Iterable, Sequence -from typing import Any, ClassVar, Final, Literal, TypedDict, TypeVar, Unpack, overload, type_check_only +from typing import ( + Any, + ClassVar, + Final, + Literal, + TypedDict, + TypeVar, + Unpack, + overload, + type_check_only, +) import numpy as np import numpy.typing as npt +from numpy._typing._dtype_like import _DTypeLikeNested _T = TypeVar("_T") @type_check_only -class _ValidationKwargs(TypedDict, total=False): +class _NameValidatorKwargs(TypedDict, total=False): excludelist: Iterable[str] | None deletechars: Iterable[str] | None case_sensitive: Literal["upper", "lower"] | bool | None @@ -15,7 +26,7 @@ class _ValidationKwargs(TypedDict, total=False): ### -__docformat__: Final[str] = "restructuredtext en" +__docformat__: Final = "restructuredtext en" class ConverterError(Exception): ... class ConverterLockError(ConverterError): ... @@ -88,17 +99,18 @@ class StringConverter: @classmethod def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... +def _decode_line(line: str | bytes, encoding: str) -> str: ... +def _is_string_like(obj: object) -> bool: ... +def _is_bytes_like(obj: object) -> bool: ... +def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... +def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... @overload def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ... @overload def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ... - -# -def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... -def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... def easy_dtype( - ndtype: npt.DTypeLike, - names: Iterable[str] | None = None, + ndtype: str | Sequence[_DTypeLikeNested], + names: str | Sequence[str] | None = None, defaultfmt: str = "f%i", - **validationargs: Unpack[_ValidationKwargs], + **validationargs: Unpack[_NameValidatorKwargs], ) -> np.dtype[np.void]: ... diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index 563a8574cd13..4a01490301c8 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -22,12 +22,12 @@ """ import functools import warnings + import numpy as np import numpy._core.numeric as _nx +from numpy._core import overrides from numpy.lib import _function_base_impl as fnb from numpy.lib._function_base_impl import _weights_are_valid -from numpy._core import overrides - array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -232,17 +232,16 @@ def _divide_by_count(a, b, out=None): return np.divide(a, b, out=a, casting='unsafe') else: return np.divide(a, b, out=out, casting='unsafe') + elif out is None: + # Precaution against reduced object arrays + try: + return a.dtype.type(a / b) + except AttributeError: + return a / b else: - if out is None: - # Precaution against reduced object arrays - try: - return a.dtype.type(a / b) - except AttributeError: - return a / b - else: - # This is questionable, but currently a numpy scalar can - # be output to a zero dimensional array. - return np.divide(a, b, out=out, casting='unsafe') + # This is questionable, but currently a numpy scalar can + # be output to a zero dimensional array. + return np.divide(a, b, out=out, casting='unsafe') def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None, @@ -1644,37 +1643,36 @@ def _nanquantile_ureduce_func( part = a.ravel() wgt = None if weights is None else weights.ravel() result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt) + # Note that this code could try to fill in `out` right away + elif weights is None: + result = np.apply_along_axis(_nanquantile_1d, axis, a, q, + overwrite_input, method, weights) + # apply_along_axis fills in collapsed axis with results. + # Move those axes to the beginning to match percentile's + # convention. + if q.ndim != 0: + from_ax = [axis + i for i in range(q.ndim)] + result = np.moveaxis(result, from_ax, list(range(q.ndim))) else: - # Note that this code could try to fill in `out` right away - if weights is None: - result = np.apply_along_axis(_nanquantile_1d, axis, a, q, - overwrite_input, method, weights) - # apply_along_axis fills in collapsed axis with results. - # Move those axes to the beginning to match percentile's - # convention. - if q.ndim != 0: - from_ax = [axis + i for i in range(q.ndim)] - result = np.moveaxis(result, from_ax, list(range(q.ndim))) + # We need to apply along axis over 2 arrays, a and weights. + # move operation axes to end for simplicity: + a = np.moveaxis(a, axis, -1) + if weights is not None: + weights = np.moveaxis(weights, axis, -1) + if out is not None: + result = out else: - # We need to apply along axis over 2 arrays, a and weights. - # move operation axes to end for simplicity: - a = np.moveaxis(a, axis, -1) - if weights is not None: - weights = np.moveaxis(weights, axis, -1) - if out is not None: - result = out - else: - # weights are limited to `inverted_cdf` so the result dtype - # is known to be identical to that of `a` here: - result = np.empty_like(a, shape=q.shape + a.shape[:-1]) - - for ii in np.ndindex(a.shape[:-1]): - result[(...,) + ii] = _nanquantile_1d( - a[ii], q, weights=weights[ii], - overwrite_input=overwrite_input, method=method, - ) - # This path dealt with `out` already... - return result + # weights are limited to `inverted_cdf` so the result dtype + # is known to be identical to that of `a` here: + result = np.empty_like(a, shape=q.shape + a.shape[:-1]) + + for ii in np.ndindex(a.shape[:-1]): + result[(...,) + ii] = _nanquantile_1d( + a[ii], q, weights=weights[ii], + overwrite_input=overwrite_input, method=method, + ) + # This path dealt with `out` already... + return result if out is not None: out[...] = result diff --git a/numpy/lib/_nanfunctions_impl.pyi b/numpy/lib/_nanfunctions_impl.pyi index 081b53d8ea44..f39800d58d07 100644 --- a/numpy/lib/_nanfunctions_impl.pyi +++ b/numpy/lib/_nanfunctions_impl.pyi @@ -1,17 +1,16 @@ from numpy._core.fromnumeric import ( - amin, amax, - argmin, + amin, argmax, - sum, - prod, - cumsum, + argmin, cumprod, + cumsum, mean, + prod, + std, + sum, var, - std ) - from numpy.lib._function_base_impl import ( median, percentile, diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 0fff58f1601d..f284eeb74834 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1,33 +1,41 @@ """ IO related functions. """ -import os -import re +import contextlib import functools import itertools +import operator +import os +import pickle +import re import warnings import weakref -import contextlib -import operator -from operator import itemgetter from collections.abc import Mapping -import pickle +from operator import itemgetter import numpy as np -from . import format -from ._datasource import DataSource -from ._format_impl import _MAX_HEADER_SIZE from numpy._core import overrides -from numpy._core.multiarray import packbits, unpackbits from numpy._core._multiarray_umath import _load_from_filelike +from numpy._core.multiarray import packbits, unpackbits from numpy._core.overrides import finalize_array_function_like, set_module -from ._iotools import ( - LineSplitter, NameValidator, StringConverter, ConverterError, - ConverterLockError, ConversionWarning, _is_string_like, - has_nested_fields, flatten_dtype, easy_dtype, _decode_line - ) -from numpy._utils import asunicode, asbytes +from numpy._utils import asbytes, asunicode +from . import format +from ._datasource import DataSource # noqa: F401 +from ._format_impl import _MAX_HEADER_SIZE +from ._iotools import ( + ConversionWarning, + ConverterError, + ConverterLockError, + LineSplitter, + NameValidator, + StringConverter, + _decode_line, + _is_string_like, + easy_dtype, + flatten_dtype, + has_nested_fields, +) __all__ = [ 'savetxt', 'loadtxt', 'genfromtxt', 'load', 'save', 'savez', @@ -136,7 +144,7 @@ class NpzFile(Mapping): pickle_kwargs : dict, optional Additional keyword arguments to pass on to pickle.load. These are only useful when loading object arrays saved on - Python 2 when using Python 3. + Python 2. max_header_size : int, optional Maximum allowed size of the header. Large headers may not be safe to load securely and thus require explicitly passing a larger value. @@ -187,16 +195,13 @@ def __init__(self, fid, own_fid=False, allow_pickle=False, # Import is postponed to here since zipfile depends on gzip, an # optional component of the so-called standard library. _zip = zipfile_factory(fid) - self._files = _zip.namelist() - self.files = [] + _files = _zip.namelist() + self.files = [name.removesuffix(".npy") for name in _files] + self._files = dict(zip(self.files, _files)) + self._files.update(zip(_files, _files)) self.allow_pickle = allow_pickle self.max_header_size = max_header_size self.pickle_kwargs = pickle_kwargs - for x in self._files: - if x.endswith('.npy'): - self.files.append(x[:-4]) - else: - self.files.append(x) self.zip = _zip self.f = BagObj(self) if own_fid: @@ -232,37 +237,34 @@ def __len__(self): return len(self.files) def __getitem__(self, key): - # FIXME: This seems like it will copy strings around - # more than is strictly necessary. The zipfile - # will read the string and then - # the format.read_array will copy the string - # to another place in memory. - # It would be better if the zipfile could read - # (or at least uncompress) the data - # directly into the array memory. - member = False - if key in self._files: - member = True - elif key in self.files: - member = True - key += '.npy' - if member: - bytes = self.zip.open(key) - magic = bytes.read(len(format.MAGIC_PREFIX)) - bytes.close() - if magic == format.MAGIC_PREFIX: - bytes = self.zip.open(key) - return format.read_array(bytes, - allow_pickle=self.allow_pickle, - pickle_kwargs=self.pickle_kwargs, - max_header_size=self.max_header_size) - else: - return self.zip.read(key) + try: + key = self._files[key] + except KeyError: + raise KeyError(f"{key} is not a file in the archive") from None else: - raise KeyError(f"{key} is not a file in the archive") + with self.zip.open(key) as bytes: + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.seek(0) + if magic == format.MAGIC_PREFIX: + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + return format.read_array( + bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs, + max_header_size=self.max_header_size + ) + else: + return bytes.read(key) def __contains__(self, key): - return (key in self._files or key in self.files) + return (key in self._files) def __repr__(self): # Get filename or default to `object` @@ -338,13 +340,13 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, execute arbitrary code. If pickles are disallowed, loading object arrays will fail. Default: False fix_imports : bool, optional - Only useful when loading Python 2 generated pickled files on Python 3, + Only useful when loading Python 2 generated pickled files, which includes npy/npz files containing object arrays. If `fix_imports` is True, pickle will try to map the old Python 2 names to the new names used in Python 3. encoding : str, optional What encoding to use when reading Python 2 strings. Only useful when - loading Python 2 generated pickled files in Python 3, which includes + loading Python 2 generated pickled files, which includes npy/npz files containing object arrays. Values other than 'latin1', 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical data. Default: 'ASCII' @@ -526,7 +528,7 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): .. deprecated:: 2.1 This flag is ignored since NumPy 1.17 and was only needed to - support loading some files in Python 2 written in Python 3. + support loading in Python 2 some files written in Python 3. See Also -------- @@ -948,8 +950,8 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', dtype = np.dtype(dtype) read_dtype_via_object_chunks = None - if dtype.kind in 'SUM' and ( - dtype == "S0" or dtype == "U0" or dtype == "M8" or dtype == 'm8'): + if dtype.kind in 'SUM' and dtype in { + np.dtype("S0"), np.dtype("U0"), np.dtype("M8"), np.dtype("m8")}: # This is a legacy "flexible" dtype. We do not truly support # parametric dtypes currently (no dtype discovery step in the core), # but have to support these for backward compatibility. @@ -985,13 +987,12 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', if isinstance(comments[0], str) and len(comments[0]) == 1: comment = comments[0] comments = None - else: - # Input validation if there are multiple comment characters - if delimiter in comments: - raise TypeError( - f"Comment characters '{comments}' cannot include the " - f"delimiter '{delimiter}'" - ) + # Input validation if there are multiple comment characters + elif delimiter in comments: + raise TypeError( + f"Comment characters '{comments}' cannot include the " + f"delimiter '{delimiter}'" + ) # comment is now either a 1 or 0 character string or a tuple: if comments is not None: @@ -1750,7 +1751,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skip_header=0, skip_footer=0, converters=None, missing_values=None, filling_values=None, usecols=None, names=None, excludelist=None, - deletechars=''.join(sorted(NameValidator.defaultdeletechars)), + deletechars=''.join(sorted(NameValidator.defaultdeletechars)), # noqa: B008 replace_space='_', autostrip=False, case_sensitive=True, defaultfmt="f%i", unpack=None, usemask=False, loose=True, invalid_raise=True, max_rows=None, encoding=None, @@ -2303,7 +2304,8 @@ def tobytes_first(x, conv): try: converter.upgrade(value) except (ConverterError, ValueError): - errmsg += f"(occurred line #{j + 1 + skip_header} for value '{value}')" + line_number = j + 1 + skip_header + errmsg += f"(occurred line #{line_number} for value '{value}')" raise ConverterError(errmsg) # Check that we don't have invalid values diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 8fb65b110ee4..94f014ccd52d 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -1,11 +1,26 @@ import types import zipfile +from _typeshed import ( + StrOrBytesPath, + StrPath, + SupportsKeysAndGetItem, + SupportsRead, + SupportsWrite, +) from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence from re import Pattern -from typing import IO, Any, ClassVar, Generic, Protocol, Self, TypeAlias, overload, type_check_only +from typing import ( + IO, + Any, + ClassVar, + Generic, + Protocol, + Self, + TypeAlias, + overload, + type_check_only, +) from typing import Literal as L - -from _typeshed import StrOrBytesPath, StrPath, SupportsKeysAndGetItem, SupportsRead, SupportsWrite from typing_extensions import TypeVar, deprecated, override import numpy as np diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index f8a31ac4fe13..de4c01ecb95c 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -10,18 +10,24 @@ import re import warnings -from numpy._utils import set_module import numpy._core.numeric as NX - -from numpy._core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, - ones) -from numpy._core import overrides +from numpy._core import ( + abs, + array, + atleast_1d, + dot, + finfo, + hstack, + isscalar, + ones, + overrides, +) +from numpy._utils import set_module from numpy.exceptions import RankWarning -from numpy.lib._twodim_base_impl import diag, vander from numpy.lib._function_base_impl import trim_zeros -from numpy.lib._type_check_impl import iscomplex, real, imag, mintypecode -from numpy.linalg import eigvals, lstsq, inv - +from numpy.lib._twodim_base_impl import diag, vander +from numpy.lib._type_check_impl import imag, iscomplex, mintypecode, real +from numpy.linalg import eigvals, inv, lstsq array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -514,9 +520,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): - residuals -- sum of squared residuals of the least squares fit - rank -- the effective rank of the scaled Vandermonde - coefficient matrix + coefficient matrix - singular_values -- singular values of the scaled Vandermonde - coefficient matrix + coefficient matrix - rcond -- value of `rcond`. For more details, see `numpy.linalg.lstsq`. @@ -1305,11 +1311,10 @@ def fmt_float(q): if power == 0: if coefstr != '0': newstr = f'{coefstr}' + elif k == 0: + newstr = '0' else: - if k == 0: - newstr = '0' - else: - newstr = '' + newstr = '' elif power == 1: if coefstr == '0': newstr = '' @@ -1317,13 +1322,12 @@ def fmt_float(q): newstr = var else: newstr = f'{coefstr} {var}' + elif coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = '%s**%d' % (var, power,) else: - if coefstr == '0': - newstr = '' - elif coefstr == 'b': - newstr = '%s**%d' % (var, power,) - else: - newstr = '%s %s**%d' % (coefstr, var, power) + newstr = '%s %s**%d' % (coefstr, var, power) if k > 0: if newstr != '': @@ -1425,7 +1429,6 @@ def __setitem__(self, key, val): self._coeffs = NX.concatenate((zr, self.coeffs)) ind = 0 self._coeffs[ind] = val - return def __iter__(self): return iter(self.coeffs) diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index b40f4b78ddf5..faf2f01e6a22 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -1,37 +1,38 @@ from typing import ( - Literal as L, - TypeAlias, - overload, Any, - SupportsInt, + NoReturn, SupportsIndex, + SupportsInt, + TypeAlias, TypeVar, - NoReturn, + overload, +) +from typing import ( + Literal as L, ) import numpy as np from numpy import ( - poly1d, - unsignedinteger, - signedinteger, - floating, + complex128, complexfloating, + float64, + floating, int32, int64, - float64, - complex128, object_, + poly1d, + signedinteger, + unsignedinteger, ) - from numpy._typing import ( - NDArray, ArrayLike, + NDArray, _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, _ArrayLikeObject_co, + _ArrayLikeUInt_co, ) _T = TypeVar("_T") diff --git a/numpy/lib/_scimath_impl.py b/numpy/lib/_scimath_impl.py index ab798a9901f4..8136a7d54515 100644 --- a/numpy/lib/_scimath_impl.py +++ b/numpy/lib/_scimath_impl.py @@ -16,11 +16,10 @@ """ import numpy._core.numeric as nx import numpy._core.numerictypes as nt -from numpy._core.numeric import asarray, any +from numpy._core.numeric import any, asarray from numpy._core.overrides import array_function_dispatch, set_module from numpy.lib._type_check_impl import isreal - __all__ = [ 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', 'arctanh' diff --git a/numpy/lib/_scimath_impl.pyi b/numpy/lib/_scimath_impl.pyi index a47a4f932d21..e6390c29ccb3 100644 --- a/numpy/lib/_scimath_impl.pyi +++ b/numpy/lib/_scimath_impl.pyi @@ -1,11 +1,10 @@ -from typing import overload, Any +from typing import Any, overload from numpy import complexfloating - from numpy._typing import ( NDArray, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, _ComplexLike_co, _FloatLike_co, ) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index f9b41f77943c..89b86c80964d 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -2,19 +2,23 @@ import warnings import numpy._core.numeric as _nx -from numpy._core.numeric import asarray, zeros, zeros_like, array, asanyarray +from numpy._core import atleast_3d, overrides, vstack +from numpy._core._multiarray_umath import _array_converter from numpy._core.fromnumeric import reshape, transpose from numpy._core.multiarray import normalize_axis_index -from numpy._core._multiarray_umath import _array_converter -from numpy._core import overrides -from numpy._core import vstack, atleast_3d -from numpy._core.numeric import normalize_axis_tuple +from numpy._core.numeric import ( + array, + asanyarray, + asarray, + normalize_axis_tuple, + zeros, + zeros_like, +) from numpy._core.overrides import set_module from numpy._core.shape_base import _arrays_for_stack_dispatcher from numpy.lib._index_tricks_impl import ndindex from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells - __all__ = [ 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', @@ -589,7 +593,7 @@ def expand_dims(a, axis): else: a = asanyarray(a) - if type(axis) not in (tuple, list): + if not isinstance(axis, (tuple, list)): axis = (axis,) out_ndim = len(axis) + a.ndim diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 15cbef7e4773..0206d95109fa 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -1,31 +1,40 @@ from collections.abc import Callable, Sequence from typing import ( - TypeVar, Any, - overload, - SupportsIndex, - Protocol, - ParamSpec, Concatenate, + ParamSpec, + Protocol, + SupportsIndex, + TypeVar, + overload, type_check_only, ) - from typing_extensions import deprecated import numpy as np -from numpy import _CastingKind, generic, integer, ufunc, unsignedinteger, signedinteger, floating, complexfloating, object_ +from numpy import ( + _CastingKind, + complexfloating, + floating, + generic, + integer, + object_, + signedinteger, + ufunc, + unsignedinteger, +) from numpy._typing import ( ArrayLike, DTypeLike, NDArray, - _ShapeLike, _ArrayLike, _ArrayLikeBool_co, - _ArrayLikeUInt_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, _ArrayLikeObject_co, + _ArrayLikeUInt_co, + _ShapeLike, ) __all__ = [ diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index de2400a9ee98..a7005d702d96 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -1,14 +1,8 @@ from collections.abc import Iterable -from typing import Any, TypeVar, overload, SupportsIndex +from typing import Any, SupportsIndex, TypeVar, overload from numpy import generic -from numpy._typing import ( - NDArray, - ArrayLike, - _ShapeLike, - _Shape, - _ArrayLike -) +from numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike __all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] @@ -72,7 +66,7 @@ def broadcast_to( subok: bool = ..., ) -> NDArray[Any]: ... -def broadcast_shapes(*args: _ShapeLike) -> _Shape: ... +def broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ... def broadcast_arrays( *args: ArrayLike, diff --git a/numpy/lib/_twodim_base_impl.py b/numpy/lib/_twodim_base_impl.py index 65c59118aa31..dc6a55886fdb 100644 --- a/numpy/lib/_twodim_base_impl.py +++ b/numpy/lib/_twodim_base_impl.py @@ -4,18 +4,31 @@ import functools import operator +from numpy._core import iinfo, overrides from numpy._core._multiarray_umath import _array_converter from numpy._core.numeric import ( - asanyarray, arange, zeros, greater_equal, multiply, ones, - asarray, where, int8, int16, int32, int64, intp, empty, promote_types, - diagonal, nonzero, indices - ) + arange, + asanyarray, + asarray, + diagonal, + empty, + greater_equal, + indices, + int8, + int16, + int32, + int64, + intp, + multiply, + nonzero, + ones, + promote_types, + where, + zeros, +) from numpy._core.overrides import finalize_array_function_like, set_module -from numpy._core import overrides -from numpy._core import iinfo from numpy.lib._stride_tricks_impl import broadcast_to - __all__ = [ 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu', 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices', @@ -815,7 +828,7 @@ def histogram2d(x, y, bins=10, range=None, density=None, weights=None): except TypeError: N = 1 - if N != 1 and N != 2: + if N not in {1, 2}: xedges = yedges = asarray(bins) bins = [xedges, yedges] hist, edges = histogramdd([x, y], bins, range, density, weights) diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index b2880961fe1f..43df38ed5b06 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -2,39 +2,40 @@ from collections.abc import Callable, Sequence from typing import ( Any, TypeAlias, - overload, TypeVar, + overload, +) +from typing import ( Literal as L, ) import numpy as np from numpy import ( - generic, - timedelta64, + _OrderCF, + complex128, + complexfloating, datetime64, - int_, - intp, float64, - complex128, - signedinteger, floating, - complexfloating, + generic, + int_, + intp, object_, - _OrderCF, + signedinteger, + timedelta64, ) - from numpy._typing import ( - DTypeLike, - _DTypeLike, ArrayLike, - _ArrayLike, + DTypeLike, NDArray, - _SupportsArray, - _SupportsArrayFunc, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, + _ArrayLike, _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, _ArrayLikeObject_co, + _DTypeLike, + _SupportsArray, + _SupportsArrayFunc, ) __all__ = [ diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 671f27adc0d7..977609caa299 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -8,12 +8,12 @@ 'typename', 'mintypecode', 'common_type'] -from numpy._utils import set_module import numpy._core.numeric as _nx -from numpy._core.numeric import asarray, asanyarray, isnan, zeros -from numpy._core import overrides, getlimits -from ._ufunclike_impl import isneginf, isposinf +from numpy._core import getlimits, overrides +from numpy._core.numeric import asanyarray, asarray, isnan, zeros +from numpy._utils import set_module +from ._ufunclike_impl import isneginf, isposinf array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index 2c7a9c4a10bc..b9ab2a02f5f5 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,12 +1,21 @@ +from _typeshed import Incomplete from collections.abc import Container, Iterable from typing import Any, Protocol, TypeAlias, overload, type_check_only from typing import Literal as L - -from _typeshed import Incomplete from typing_extensions import TypeVar import numpy as np -from numpy._typing import ArrayLike, NDArray, _16Bit, _32Bit, _64Bit, _ArrayLike, _NestedSequence, _ScalarLike_co, _SupportsArray +from numpy._typing import ( + ArrayLike, + NDArray, + _16Bit, + _32Bit, + _64Bit, + _ArrayLike, + _NestedSequence, + _ScalarLike_co, + _SupportsArray, +) __all__ = [ "common_type", diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index 947532aa07a3..a673f05c010d 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -1,12 +1,12 @@ -from typing import Any, overload, TypeVar +from typing import Any, TypeVar, overload import numpy as np from numpy import floating, object_ from numpy._typing import ( NDArray, - _FloatLike_co, _ArrayLikeFloat_co, _ArrayLikeObject_co, + _FloatLike_co, ) __all__ = ["fix", "isneginf", "isposinf"] diff --git a/numpy/lib/_user_array_impl.py b/numpy/lib/_user_array_impl.py index fce4e4261453..f3a6c0f518be 100644 --- a/numpy/lib/_user_array_impl.py +++ b/numpy/lib/_user_array_impl.py @@ -8,10 +8,33 @@ """ from numpy._core import ( - array, asarray, absolute, add, subtract, multiply, divide, - remainder, power, left_shift, right_shift, bitwise_and, bitwise_or, - bitwise_xor, invert, less, less_equal, not_equal, equal, greater, - greater_equal, shape, reshape, arange, sin, sqrt, transpose + absolute, + add, + arange, + array, + asarray, + bitwise_and, + bitwise_or, + bitwise_xor, + divide, + equal, + greater, + greater_equal, + invert, + left_shift, + less, + less_equal, + multiply, + not_equal, + power, + remainder, + reshape, + right_shift, + shape, + sin, + sqrt, + subtract, + transpose, ) from numpy._core.overrides import set_module diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi index 13da406c99f5..0aeec42129af 100644 --- a/numpy/lib/_user_array_impl.pyi +++ b/numpy/lib/_user_array_impl.pyi @@ -1,18 +1,23 @@ -from types import EllipsisType -from typing import Any, Generic, Self, SupportsIndex, TypeAlias, TypeVar, overload - from _typeshed import Incomplete -from typing_extensions import deprecated, override +from types import EllipsisType +from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload +from typing_extensions import TypeVar, override import numpy as np import numpy.typing as npt -from numpy._typing import _ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co, _DTypeLike +from numpy._typing import ( + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeInt_co, + _DTypeLike, +) ### _ScalarT = TypeVar("_ScalarT", bound=np.generic) _ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=Any, covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) _DTypeT = TypeVar("_DTypeT", bound=np.dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) @@ -89,7 +94,7 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): @overload def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> container[_ShapeT_co, _DTypeT_co]: ... @overload - def __getitem__(self, key: _ToIndexSlices, /) -> container[Any, _DTypeT_co]: ... + def __getitem__(self, key: _ToIndexSlices, /) -> container[_AnyShape, _DTypeT_co]: ... @overload def __getitem__(self, key: _ToIndices, /) -> Any: ... @overload @@ -152,20 +157,22 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): def __ipow__(self, other: npt.ArrayLike, /) -> Self: ... # - def __lshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... - def __rlshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __lshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __rlshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... def __ilshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... # - def __rshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... - def __rrshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __rshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __rrshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... def __irshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... # @overload - def __and__(self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /) -> container[Any, np.dtype[np.bool]]: ... + def __and__( + self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / + ) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload - def __and__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.bool | np.integer]]: ... + def __and__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __rand__ = __and__ @overload def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @@ -174,9 +181,11 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # @overload - def __xor__(self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /) -> container[Any, np.dtype[np.bool]]: ... + def __xor__( + self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / + ) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload - def __xor__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.bool | np.integer]]: ... + def __xor__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __rxor__ = __xor__ @overload def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @@ -185,9 +194,11 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # @overload - def __or__(self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /) -> container[Any, np.dtype[np.bool]]: ... + def __or__( + self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / + ) -> container[_AnyShape, np.dtype[np.bool]]: ... @overload - def __or__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.bool | np.integer]]: ... + def __or__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... __ror__ = __or__ @overload def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... @@ -208,8 +219,6 @@ class container(Generic[_ShapeT_co, _DTypeT_co]): # def copy(self, /) -> Self: ... - @deprecated("tostring() is deprecated. Use tobytes() instead.") - def tostring(self, /) -> bytes: ... def tobytes(self, /) -> bytes: ... def byteswap(self, /) -> Self: ... def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... diff --git a/numpy/lib/_utils_impl.py b/numpy/lib/_utils_impl.py index ac10aec698d6..2e1ee23d7d58 100644 --- a/numpy/lib/_utils_impl.py +++ b/numpy/lib/_utils_impl.py @@ -1,14 +1,14 @@ +import functools import os +import platform import sys import textwrap import types import warnings -import functools -import platform +import numpy as np from numpy._core import ndarray from numpy._utils import set_module -import numpy as np __all__ = [ 'get_include', 'info', 'show_runtime' @@ -36,10 +36,13 @@ def show_runtime(): ``__cpu_baseline__`` and ``__cpu_dispatch__`` """ + from pprint import pprint + from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, ) - from pprint import pprint config_found = [{ "numpy_version": np.__version__, "python": sys.version, @@ -476,8 +479,8 @@ def info(object=None, maxwidth=76, output=None, toplevel='numpy'): """ global _namedict, _dictlist # Local import to speed up numpy's import time. - import pydoc import inspect + import pydoc if (hasattr(object, '_ppimport_importer') or hasattr(object, '_ppimport_module')): @@ -696,7 +699,9 @@ def _opt_info(): str: A formatted string indicating the supported CPU features. """ from numpy._core._multiarray_umath import ( - __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, ) if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0: diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index 00ed47c9fb67..7a34f273c423 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,10 +1,16 @@ from _typeshed import SupportsWrite +from typing import LiteralString +from typing_extensions import TypeVar -from numpy._typing import DTypeLike +import numpy as np __all__ = ["get_include", "info", "show_runtime"] -def get_include() -> str: ... +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) + +def get_include() -> LiteralString: ... def show_runtime() -> None: ... -def info(object: object = ..., maxwidth: int = ..., output: SupportsWrite[str] | None = ..., toplevel: str = ...) -> None: ... -def drop_metadata(dtype: DTypeLike, /) -> DTypeLike: ... +def info( + object: object = None, maxwidth: int = 76, output: SupportsWrite[str] | None = None, toplevel: str = "numpy" +) -> None: ... +def drop_metadata(dtype: _DTypeT, /) -> _DTypeT: ... diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index dac2876b1e97..d70a61040a40 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -7,7 +7,6 @@ """ import re - __all__ = ['NumpyVersion'] @@ -23,8 +22,7 @@ class NumpyVersion: - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) - Development versions after a1: '1.8.0a1.dev-f1234afa', - '1.8.0b2.dev-f1234afa', - '1.8.1rc1.dev-f1234afa', etc. + '1.8.0b2.dev-f1234afa', '1.8.1rc1.dev-f1234afa', etc. - Development versions (no git hash available): '1.8.0.dev-Unknown' Comparing needs to be done against a valid version string or other diff --git a/numpy/lib/array_utils.py b/numpy/lib/array_utils.py index b4e7976131d2..c267eb021ad8 100644 --- a/numpy/lib/array_utils.py +++ b/numpy/lib/array_utils.py @@ -1,4 +1,4 @@ -from ._array_utils_impl import ( +from ._array_utils_impl import ( # noqa: F401 __all__, __doc__, byte_bounds, diff --git a/numpy/lib/array_utils.pyi b/numpy/lib/array_utils.pyi index 4b9ebe334a1f..8adc3c5b22a6 100644 --- a/numpy/lib/array_utils.pyi +++ b/numpy/lib/array_utils.pyi @@ -1,6 +1,12 @@ from ._array_utils_impl import ( __all__ as __all__, +) +from ._array_utils_impl import ( byte_bounds as byte_bounds, +) +from ._array_utils_impl import ( normalize_axis_index as normalize_axis_index, +) +from ._array_utils_impl import ( normalize_axis_tuple as normalize_axis_tuple, ) diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 70f903ecbc79..8e0c79942d23 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -1,12 +1,12 @@ -from ._format_impl import ( - __all__, - __doc__, +from ._format_impl import ( # noqa: F401 ARRAY_ALIGN, BUFFER_SIZE, EXPECTED_KEYS, GROWTH_AXIS_MAX_DIGITS, MAGIC_LEN, MAGIC_PREFIX, + __all__, + __doc__, descr_to_dtype, drop_metadata, dtype_to_descr, diff --git a/numpy/lib/format.pyi b/numpy/lib/format.pyi index 4c8a07aa03e9..dd9470e1e6a3 100644 --- a/numpy/lib/format.pyi +++ b/numpy/lib/format.pyi @@ -1,24 +1,66 @@ from ._format_impl import ( - __all__ as __all__, - __doc__ as __doc__, ARRAY_ALIGN as ARRAY_ALIGN, +) +from ._format_impl import ( BUFFER_SIZE as BUFFER_SIZE, +) +from ._format_impl import ( EXPECTED_KEYS as EXPECTED_KEYS, +) +from ._format_impl import ( GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS, +) +from ._format_impl import ( MAGIC_LEN as MAGIC_LEN, +) +from ._format_impl import ( MAGIC_PREFIX as MAGIC_PREFIX, +) +from ._format_impl import ( + __all__ as __all__, +) +from ._format_impl import ( + __doc__ as __doc__, +) +from ._format_impl import ( descr_to_dtype as descr_to_dtype, +) +from ._format_impl import ( drop_metadata as drop_metadata, +) +from ._format_impl import ( dtype_to_descr as dtype_to_descr, +) +from ._format_impl import ( header_data_from_array_1_0 as header_data_from_array_1_0, +) +from ._format_impl import ( isfileobj as isfileobj, +) +from ._format_impl import ( magic as magic, +) +from ._format_impl import ( open_memmap as open_memmap, +) +from ._format_impl import ( read_array as read_array, +) +from ._format_impl import ( read_array_header_1_0 as read_array_header_1_0, +) +from ._format_impl import ( read_array_header_2_0 as read_array_header_2_0, +) +from ._format_impl import ( read_magic as read_magic, +) +from ._format_impl import ( write_array as write_array, +) +from ._format_impl import ( write_array_header_1_0 as write_array_header_1_0, +) +from ._format_impl import ( write_array_header_2_0 as write_array_header_2_0, ) diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index 60b7fd98a4da..a7e4c93932c6 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -34,7 +34,7 @@ def opt_func_info(func_name=None, signature=None): ... func_name="add|abs", signature="float64|complex64" ... ) >>> import json - >>> print(json.dumps(dict, indent=2)) + >>> print(json.dumps(dict, indent=2)) # may vary (architecture) { "absolute": { "dd": { @@ -64,9 +64,9 @@ def opt_func_info(func_name=None, signature=None): """ import re - from numpy._core._multiarray_umath import ( - __cpu_targets_info__ as targets, dtype - ) + + from numpy._core._multiarray_umath import __cpu_targets_info__ as targets + from numpy._core._multiarray_umath import dtype if func_name is not None: func_pattern = re.compile(func_name) diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index c42f2e3d8ed4..4f4801feac8f 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -1,5 +1,6 @@ from abc import ABC, abstractmethod -from typing import Literal as L, Any +from typing import Any +from typing import Literal as L from numpy import ufunc diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index 1003ef5be4b1..84d8079266d7 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -1,3 +1 @@ -from ._npyio_impl import ( - __doc__, DataSource, NpzFile -) +from ._npyio_impl import DataSource, NpzFile, __doc__ # noqa: F401 diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi index fd3ae8f5a287..49fb4d1fc736 100644 --- a/numpy/lib/npyio.pyi +++ b/numpy/lib/npyio.pyi @@ -1,5 +1,9 @@ from numpy.lib._npyio_impl import ( DataSource as DataSource, +) +from numpy.lib._npyio_impl import ( NpzFile as NpzFile, +) +from numpy.lib._npyio_impl import ( __doc__ as __doc__, ) diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index 19337cad1943..c8a6dd818e96 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -13,7 +13,6 @@ from numpy._core.overrides import array_function_dispatch from numpy.lib._iotools import _is_string_like - __all__ = [ 'append_fields', 'apply_along_fields', 'assign_fields_by_name', 'drop_fields', 'find_duplicates', 'flatten_descr', @@ -996,7 +995,7 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1) array([ 3. , 5.5, 9. , 11. ]) - """ + """ # noqa: E501 if arr.dtype.names is None: raise ValueError('arr must be a structured array') @@ -1128,7 +1127,7 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False, (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])], dtype=[('a', ' np.recarray[Any, np.dtype[np.void]]: ... +) -> np.recarray[_AnyShape, np.dtype[np.void]]: ... # @overload @@ -319,7 +318,7 @@ def stack_arrays( usemask: Literal[False], asrecarray: Literal[True], autoconvert: bool = False, -) -> np.recarray[tuple[int, ...], np.dtype[np.void]]: ... +) -> np.recarray[_AnyShape, np.dtype[np.void]]: ... @overload def stack_arrays( arrays: Sequence[npt.NDArray[Any]], @@ -327,7 +326,7 @@ def stack_arrays( usemask: Literal[True] = True, asrecarray: Literal[False] = False, autoconvert: bool = False, -) -> np.ma.MaskedArray[tuple[int, ...], np.dtype[np.void]]: ... +) -> np.ma.MaskedArray[_AnyShape, np.dtype[np.void]]: ... @overload def stack_arrays( arrays: Sequence[npt.NDArray[Any]], @@ -335,7 +334,7 @@ def stack_arrays( usemask: Literal[True], asrecarray: Literal[True], autoconvert: bool = False, -) -> MaskedRecords[tuple[int, ...], np.dtype[np.void]]: ... +) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ... @overload def stack_arrays( arrays: Sequence[npt.NDArray[Any]], @@ -344,7 +343,7 @@ def stack_arrays( *, asrecarray: Literal[True], autoconvert: bool = False, -) -> MaskedRecords[tuple[int, ...], np.dtype[np.void]]: ... +) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ... # @overload diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py index ffd05ef9f364..fb6824d9bb89 100644 --- a/numpy/lib/scimath.py +++ b/numpy/lib/scimath.py @@ -1,4 +1,13 @@ -from ._scimath_impl import ( - __all__, __doc__, sqrt, log, log2, logn, log10, power, arccos, arcsin, - arctanh +from ._scimath_impl import ( # noqa: F401 + __all__, + __doc__, + arccos, + arcsin, + arctanh, + log, + log2, + log10, + logn, + power, + sqrt, ) diff --git a/numpy/lib/scimath.pyi b/numpy/lib/scimath.pyi index cff5b9097fae..253235dfc576 100644 --- a/numpy/lib/scimath.pyi +++ b/numpy/lib/scimath.pyi @@ -1,12 +1,30 @@ from ._scimath_impl import ( __all__ as __all__, - sqrt as sqrt, +) +from ._scimath_impl import ( + arccos as arccos, +) +from ._scimath_impl import ( + arcsin as arcsin, +) +from ._scimath_impl import ( + arctanh as arctanh, +) +from ._scimath_impl import ( log as log, +) +from ._scimath_impl import ( log2 as log2, - logn as logn, +) +from ._scimath_impl import ( log10 as log10, +) +from ._scimath_impl import ( + logn as logn, +) +from ._scimath_impl import ( power as power, - arccos as arccos, - arcsin as arcsin, - arctanh as arctanh, +) +from ._scimath_impl import ( + sqrt as sqrt, ) diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py index ba567be0c823..721a548f4d48 100644 --- a/numpy/lib/stride_tricks.py +++ b/numpy/lib/stride_tricks.py @@ -1,3 +1 @@ -from ._stride_tricks_impl import ( - __doc__, as_strided, sliding_window_view -) +from ._stride_tricks_impl import __doc__, as_strided, sliding_window_view # noqa: F401 diff --git a/numpy/lib/stride_tricks.pyi b/numpy/lib/stride_tricks.pyi index eb46f28ae5f4..42d8fe9ef43b 100644 --- a/numpy/lib/stride_tricks.pyi +++ b/numpy/lib/stride_tricks.pyi @@ -1,4 +1,6 @@ from numpy.lib._stride_tricks_impl import ( as_strided as as_strided, +) +from numpy.lib._stride_tricks_impl import ( sliding_window_view as sliding_window_view, ) diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index e0e41c2651c7..65137324d1a9 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -1,15 +1,15 @@ import os -import pytest -from tempfile import mkdtemp, mkstemp, NamedTemporaryFile +import urllib.request as urllib_request from shutil import rmtree +from tempfile import NamedTemporaryFile, mkdtemp, mkstemp +from urllib.error import URLError +from urllib.parse import urlparse + +import pytest import numpy.lib._datasource as datasource from numpy.testing import assert_, assert_equal, assert_raises -import urllib.request as urllib_request -from urllib.parse import urlparse -from urllib.error import URLError - def urlopen_stub(url, data=None): '''Stub to replace urlopen for testing.''' diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py index 396d4147c6c5..1581ffbe95fd 100644 --- a/numpy/lib/tests/test__iotools.py +++ b/numpy/lib/tests/test__iotools.py @@ -2,13 +2,20 @@ from datetime import date import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_allclose, assert_raises, - ) from numpy.lib._iotools import ( - LineSplitter, NameValidator, StringConverter, - has_nested_fields, easy_dtype, flatten_dtype - ) + LineSplitter, + NameValidator, + StringConverter, + easy_dtype, + flatten_dtype, + has_nested_fields, +) +from numpy.testing import ( + assert_, + assert_allclose, + assert_equal, + assert_raises, +) class TestLineSplitter: diff --git a/numpy/lib/tests/test__version.py b/numpy/lib/tests/test__version.py index e6d41ad93932..6e6a34a241ac 100644 --- a/numpy/lib/tests/test__version.py +++ b/numpy/lib/tests/test__version.py @@ -1,8 +1,8 @@ """Tests for the NumpyVersion class. """ -from numpy.testing import assert_, assert_raises from numpy.lib import NumpyVersion +from numpy.testing import assert_, assert_raises def test_main_versions(): diff --git a/numpy/lib/tests/test_array_utils.py b/numpy/lib/tests/test_array_utils.py index 3d8b2bd4616e..55b9d283b15b 100644 --- a/numpy/lib/tests/test_array_utils.py +++ b/numpy/lib/tests/test_array_utils.py @@ -1,5 +1,4 @@ import numpy as np - from numpy.lib import array_utils from numpy.testing import assert_equal diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index 3f8e5d67db13..6efbe348ca81 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -4,9 +4,8 @@ import pytest import numpy as np -from numpy.testing import assert_array_equal, assert_allclose, assert_equal from numpy.lib._arraypad_impl import _as_pairs - +from numpy.testing import assert_allclose, assert_array_equal, assert_equal _numeric_dtypes = ( np._core.sctypes["uint"] diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 788a4cecdb44..7865e1b16ee9 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -1,15 +1,17 @@ """Test functions for 1D array set operations. """ -import numpy as np +import pytest -from numpy import ( - ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, isin - ) +import numpy as np +from numpy import ediff1d, intersect1d, isin, setdiff1d, setxor1d, union1d, unique from numpy.exceptions import AxisError -from numpy.testing import (assert_array_equal, assert_equal, - assert_raises, assert_raises_regex) -import pytest +from numpy.testing import ( + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) class TestSetOps: diff --git a/numpy/lib/tests/test_arrayterator.py b/numpy/lib/tests/test_arrayterator.py index e64d1d1e3ece..800c9a2a5f77 100644 --- a/numpy/lib/tests/test_arrayterator.py +++ b/numpy/lib/tests/test_arrayterator.py @@ -1,9 +1,9 @@ -from operator import mul from functools import reduce +from operator import mul import numpy as np -from numpy.random import randint from numpy.lib import Arrayterator +from numpy.random import randint from numpy.testing import assert_ diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index a0f9bdd9ebfe..2ab7026ccc7c 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -274,20 +274,26 @@ "v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" ''' -import sys import os +import sys import warnings -import pytest from io import BytesIO +import pytest + import numpy as np +from numpy.lib import format from numpy.testing import ( - assert_, assert_array_equal, assert_raises, assert_raises_regex, - assert_warns, IS_PYPY, IS_WASM, IS_64BIT - ) + IS_64BIT, + IS_PYPY, + IS_WASM, + assert_, + assert_array_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) from numpy.testing._private.utils import requires_memory -from numpy.lib import format - # Generate some basic arrays to test with. scalars = [ @@ -378,9 +384,6 @@ ('z', 'u1')] NbufferT = [ - # x Info color info y z - # value y2 Info2 name z2 Name Value - # name value y3 z3 ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 7329287721c4..f2dba193c849 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1,32 +1,69 @@ +import decimal +import math import operator -import warnings import sys -import decimal +import warnings from fractions import Fraction -import math -import pytest +from functools import partial + import hypothesis -from hypothesis.extra.numpy import arrays import hypothesis.strategies as st -from functools import partial +import pytest +from hypothesis.extra.numpy import arrays import numpy as np +import numpy.lib._function_base_impl as nfb from numpy import ( - ma, angle, average, bartlett, blackman, corrcoef, cov, - delete, diff, digitize, extract, flipud, gradient, hamming, hanning, - i0, insert, interp, kaiser, meshgrid, piecewise, place, rot90, - select, setxor1d, sinc, trapezoid, trim_zeros, unwrap, unique, vectorize - ) + angle, + average, + bartlett, + blackman, + corrcoef, + cov, + delete, + diff, + digitize, + extract, + flipud, + gradient, + hamming, + hanning, + i0, + insert, + interp, + kaiser, + ma, + meshgrid, + piecewise, + place, + rot90, + select, + setxor1d, + sinc, + trapezoid, + trim_zeros, + unique, + unwrap, + vectorize, +) +from numpy._core.numeric import normalize_axis_tuple from numpy.exceptions import AxisError -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_allclose, - assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT, - IS_WASM, NOGIL_BUILD - ) -import numpy.lib._function_base_impl as nfb from numpy.random import rand -from numpy._core.numeric import normalize_axis_tuple +from numpy.testing import ( + HAS_REFCOUNT, + IS_WASM, + NOGIL_BUILD, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, + suppress_warnings, +) def get_mat(n): @@ -850,11 +887,11 @@ def test_n(self): output = [diff(x, n=n) for n in range(1, 5)] expected = [[1, 1], [0], [], []] assert_(diff(x, n=0) is x) - for n, (expected, out) in enumerate(zip(expected, output), start=1): - assert_(type(out) is np.ndarray) - assert_array_equal(out, expected) - assert_equal(out.dtype, np.int_) - assert_equal(len(out), max(0, len(x) - n)) + for n, (expected_n, output_n) in enumerate(zip(expected, output), start=1): + assert_(type(output_n) is np.ndarray) + assert_array_equal(output_n, expected_n) + assert_equal(output_n.dtype, np.int_) + assert_equal(len(output_n), max(0, len(x) - n)) def test_times(self): x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) @@ -1695,6 +1732,15 @@ def test_string_ticket_1892(self): s = '0123456789' * 10 assert_equal(s, f(s)) + def test_dtype_promotion_gh_29189(self): + # dtype should not be silently promoted (int32 -> int64) + dtypes = [np.int16, np.int32, np.int64, np.float16, np.float32, np.float64] + + for dtype in dtypes: + x = np.asarray([1, 2, 3], dtype=dtype) + y = np.vectorize(lambda x: x + x)(x) + assert x.dtype == y.dtype + def test_cache(self): # Ensure that vectorized func called exactly once per argument. _calls = [0] @@ -2009,8 +2055,8 @@ def test_frompyfunc_leaks(self, name, incr): # exposed in gh-11867 as np.vectorized, but the problem stems from # frompyfunc. # class.attribute = np.frompyfunc() creates a - # reference cycle if is a bound class method. It requires a - # gc collection cycle to break the cycle (on CPython 3) + # reference cycle if is a bound class method. + # It requires a gc collection cycle to break the cycle. import gc A_func = getattr(self.A, name) gc.disable() diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index bfb0248ebdcf..4ba953f462fc 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -1,12 +1,19 @@ -import numpy as np +import pytest -from numpy import histogram, histogramdd, histogram_bin_edges +import numpy as np +from numpy import histogram, histogram_bin_edges, histogramdd from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_allclose, - assert_array_max_ulp, assert_raises_regex, suppress_warnings, - ) -import pytest + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, + assert_raises_regex, + suppress_warnings, +) class TestHistogram: @@ -547,7 +554,8 @@ def test_outlier(self): assert_equal(len(a), numbins) def test_scott_vs_stone(self): - """Verify that Scott's rule and Stone's rule converges for normally distributed data""" + # Verify that Scott's rule and Stone's rule converges for normally + # distributed data def nbins_ratio(seed, size): rng = np.random.RandomState(seed) @@ -555,10 +563,11 @@ def nbins_ratio(seed, size): a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) return a / (a + b) - ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] - for seed in range(10)] + geom_space = np.geomspace(start=10, stop=100, num=4).round().astype(int) + ll = [[nbins_ratio(seed, size) for size in geom_space] for seed in range(10)] - # the average difference between the two methods decreases as the dataset size increases. + # the average difference between the two methods decreases as the dataset + # size increases. avg = abs(np.mean(ll, axis=0) - 0.5) assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index d17bd9e6259b..ed8709db5238 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -1,14 +1,29 @@ import pytest import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_raises_regex, - ) from numpy.lib._index_tricks_impl import ( - mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, - index_exp, ndindex, c_, r_, s_, ix_ - ) + c_, + diag_indices, + diag_indices_from, + fill_diagonal, + index_exp, + ix_, + mgrid, + ndenumerate, + ndindex, + ogrid, + r_, + s_, +) +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) class TestRavelUnravelIndex: diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 6939e5ceffac..79fca0dd690b 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -1,35 +1,47 @@ -import sys import gc import gzip +import locale import os +import re +import sys import threading import time import warnings -import re -import pytest -from pathlib import Path -from tempfile import NamedTemporaryFile -from io import BytesIO, StringIO +from ctypes import c_bool from datetime import datetime -import locale +from io import BytesIO, StringIO from multiprocessing import Value, get_context -from ctypes import c_bool +from pathlib import Path +from tempfile import NamedTemporaryFile + +import pytest import numpy as np import numpy.ma as ma +from numpy._utils import asbytes from numpy.exceptions import VisibleDeprecationWarning -from numpy.lib._iotools import ConverterError, ConversionWarning from numpy.lib import _npyio_impl +from numpy.lib._iotools import ConversionWarning, ConverterError from numpy.lib._npyio_impl import recfromcsv, recfromtxt from numpy.ma.testutils import assert_equal from numpy.testing import ( - assert_warns, assert_, assert_raises_regex, assert_raises, - assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY, - HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings, - break_cycles, IS_WASM - ) + HAS_REFCOUNT, + IS_PYPY, + IS_WASM, + assert_, + assert_allclose, + assert_array_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_raises_regex, + assert_warns, + break_cycles, + suppress_warnings, + tempdir, + temppath, +) from numpy.testing._private.utils import requires_memory -from numpy._utils import asbytes class TextIO(BytesIO): @@ -70,7 +82,7 @@ def strptime(s, fmt=None): 2.5. """ - if type(s) == bytes: + if isinstance(s, bytes): s = s.decode("latin1") return datetime(*time.strptime(s, fmt)[:3]) @@ -217,7 +229,6 @@ def test_big_arrays(self): npfile = np.load(tmp) a = npfile['a'] # Should succeed npfile.close() - del a # Avoid pyflakes unused variable warning. def test_multiple_arrays(self): a = np.array([[1, 2], [3, 4]], float) @@ -305,7 +316,7 @@ def test_closing_fid(self): np.savez(tmp, data='LOVELY LOAD') # We need to check if the garbage collector can properly close # numpy npz file returned by np.load when their reference count - # goes to zero. Python 3 running in debug mode raises a + # goes to zero. Python running in debug mode raises a # ResourceWarning when file closing is left to the garbage # collector, so we catch the warnings. with suppress_warnings() as sup: @@ -625,7 +636,7 @@ def check_large_zip(memoryerror_raised): # Since Python 3.8, the default start method for multiprocessing has # been changed from 'fork' to 'spawn' on macOS, causing inconsistency - # on memory sharing model, lead to failed test for check_large_zip + # on memory sharing model, leading to failed test for check_large_zip ctx = get_context('fork') p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,)) p.start() diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index 589ca4e326f9..a2022a0d5175 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -4,15 +4,16 @@ These tests complement those found in `test_io.py`. """ -import sys import os -import pytest -from tempfile import NamedTemporaryFile, mkstemp +import sys from io import StringIO +from tempfile import NamedTemporaryFile, mkstemp + +import pytest import numpy as np from numpy.ma.testutils import assert_equal -from numpy.testing import assert_array_equal, HAS_REFCOUNT, IS_PYPY +from numpy.testing import HAS_REFCOUNT, IS_PYPY, assert_array_equal def test_scientific_notation(): diff --git a/numpy/lib/tests/test_mixins.py b/numpy/lib/tests/test_mixins.py index 2ff4b49011f8..f0aec156d0ee 100644 --- a/numpy/lib/tests/test_mixins.py +++ b/numpy/lib/tests/test_mixins.py @@ -4,7 +4,6 @@ import numpy as np from numpy.testing import assert_, assert_equal, assert_raises - # NOTE: This class should be kept as an exact copy of the example from the # docstring for NDArrayOperatorsMixin. diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 85ba8ff53d5a..89a6d1f95fed 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -1,17 +1,22 @@ -import warnings -import pytest import inspect +import warnings from functools import partial +import pytest + import numpy as np from numpy._core.numeric import normalize_axis_tuple from numpy.exceptions import AxisError, ComplexWarning from numpy.lib._nanfunctions_impl import _nan_mask, _replace_nan from numpy.testing import ( - assert_, assert_equal, assert_almost_equal, assert_raises, - assert_raises_regex, assert_array_equal, suppress_warnings - ) - + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + suppress_warnings, +) # Test data _ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py index 27ec396417f3..0b0e9d1857c8 100644 --- a/numpy/lib/tests/test_packbits.py +++ b/numpy/lib/tests/test_packbits.py @@ -1,7 +1,10 @@ +from itertools import chain + +import pytest + import numpy as np from numpy.testing import assert_array_equal, assert_equal, assert_raises -import pytest -from itertools import chain + def test_packbits(): # Copied from the docstring. diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index a4961b580d08..c173ac321d74 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -1,11 +1,16 @@ +import pytest + import numpy as np import numpy.polynomial.polynomial as poly from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_allclose - ) - -import pytest + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) # `poly1d` has some support for `np.bool` and `np.timedelta64`, # but it is limited and they are therefore excluded here diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index 9266817ce9ae..72377b8f7c35 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -1,14 +1,27 @@ import numpy as np import numpy.ma as ma +from numpy.lib.recfunctions import ( + append_fields, + apply_along_fields, + assign_fields_by_name, + drop_fields, + find_duplicates, + get_fieldstructure, + join_by, + merge_arrays, + recursive_fill_fields, + rename_fields, + repack_fields, + require_fields, + stack_arrays, + structured_to_unstructured, + unstructured_to_structured, +) from numpy.ma.mrecords import MaskedRecords from numpy.ma.testutils import assert_equal from numpy.testing import assert_, assert_raises -from numpy.lib.recfunctions import ( - drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields, - find_duplicates, merge_arrays, append_fields, stack_arrays, join_by, - repack_fields, unstructured_to_structured, structured_to_unstructured, - apply_along_fields, require_fields, assign_fields_by_name) + get_fieldspec = np.lib.recfunctions._get_fieldspec get_names = np.lib.recfunctions.get_names get_names_flat = np.lib.recfunctions.get_names_flat @@ -511,9 +524,8 @@ def test_flatten_wflexible(self): assert_equal(test, control) test = merge_arrays((x, w), flatten=False) - controldtype = [('f0', int), - ('f1', [('a', int), - ('b', [('ba', float), ('bb', int), ('bc', [])])])] + f1_descr = [('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])] + controldtype = [('f0', int), ('f1', f1_descr)] control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], dtype=controldtype) assert_equal(test, control) @@ -550,7 +562,6 @@ def test_w_shorter_flex(self): # dtype=[('A', '|S3'), ('B', float), ('C', int)]) #assert_equal(test, control) - # Hack to avoid pyflakes warnings about unused variables merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], dtype=[('A', '|S3'), ('B', float), ('C', int)]) @@ -823,7 +834,6 @@ def test_join(self): # ('c', int), ('d', int)]) #assert_equal(test, control) - # Hack to avoid pyflakes unused variable warnings join_by(('a', 'b'), a, b) np.array([(5, 55, 105, 100), (6, 56, 106, 101), (7, 57, 107, 102), (8, 58, 108, 103), diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index a4e561091b2d..8839ed53c506 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -2,9 +2,13 @@ import numpy as np from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_array_almost_equal, - assert_raises, _assert_valid_refcount, - ) + _assert_valid_refcount, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) class TestRegression: diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py index 3c7479c0f276..b0b68dda773c 100644 --- a/numpy/lib/tests/test_shape_base.py +++ b/numpy/lib/tests/test_shape_base.py @@ -1,18 +1,27 @@ -import numpy as np import functools import sys + import pytest +import numpy as np from numpy import ( - apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, - vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis, - put_along_axis - ) + apply_along_axis, + apply_over_axes, + array_split, + column_stack, + dsplit, + dstack, + expand_dims, + hsplit, + kron, + put_along_axis, + split, + take_along_axis, + tile, + vsplit, +) from numpy.exceptions import AxisError -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises, assert_warns - ) - +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises IS_64BIT = sys.maxsize > 2**32 diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index d82e9b801e27..fe40c953a147 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -1,14 +1,23 @@ +import pytest + import numpy as np from numpy._core._rational_tests import rational -from numpy.testing import ( - assert_equal, assert_array_equal, assert_raises, assert_, - assert_raises_regex, assert_warns, - ) from numpy.lib._stride_tricks_impl import ( - as_strided, broadcast_arrays, _broadcast_shape, broadcast_to, - broadcast_shapes, sliding_window_view, - ) -import pytest + _broadcast_shape, + as_strided, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + sliding_window_view, +) +from numpy.testing import ( + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) def assert_shapes_correct(input_shapes, expected_shape): diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py index 4fe5af0d06c4..eb6aa69a443c 100644 --- a/numpy/lib/tests/test_twodim_base.py +++ b/numpy/lib/tests/test_twodim_base.py @@ -1,18 +1,36 @@ """Test functions for matrix module """ -from numpy.testing import ( - assert_equal, assert_array_equal, assert_array_max_ulp, - assert_array_almost_equal, assert_raises, assert_ -) +import pytest + +import numpy as np from numpy import ( - arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d, - tri, mask_indices, triu_indices, triu_indices_from, tril_indices, - tril_indices_from, vander, + add, + arange, + array, + diag, + eye, + fliplr, + flipud, + histogram2d, + mask_indices, + ones, + tri, + tril_indices, + tril_indices_from, + triu_indices, + triu_indices_from, + vander, + zeros, +) +from numpy.testing import ( + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, ) -import numpy as np - -import pytest def get_mat(n): diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py index ebbb43b9f43b..447c2c36c192 100644 --- a/numpy/lib/tests/test_type_check.py +++ b/numpy/lib/tests/test_type_check.py @@ -1,11 +1,17 @@ import numpy as np from numpy import ( - common_type, mintypecode, isreal, iscomplex, isposinf, isneginf, - nan_to_num, isrealobj, iscomplexobj, real_if_close - ) -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises - ) + common_type, + iscomplex, + iscomplexobj, + isneginf, + isposinf, + isreal, + isrealobj, + mintypecode, + nan_to_num, + real_if_close, +) +from numpy.testing import assert_, assert_array_equal, assert_equal def assert_all(x): diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py index f7f988b29385..b4257ebf9191 100644 --- a/numpy/lib/tests/test_ufunclike.py +++ b/numpy/lib/tests/test_ufunclike.py @@ -1,9 +1,6 @@ import numpy as np - -from numpy import fix, isposinf, isneginf -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_raises -) +from numpy import fix, isneginf, isposinf +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises class TestUfunclike: diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index 644912d941e3..0106ee0d8414 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -1,10 +1,10 @@ +from io import StringIO + import pytest import numpy as np -from numpy.testing import assert_raises_regex import numpy.lib._utils_impl as _utils_impl - -from io import StringIO +from numpy.testing import assert_raises_regex def test_assert_raises_regex_context_manager(): diff --git a/numpy/lib/user_array.py b/numpy/lib/user_array.py index d8217c56f256..2e96d03b5952 100644 --- a/numpy/lib/user_array.py +++ b/numpy/lib/user_array.py @@ -1 +1 @@ -from ._user_array_impl import __doc__, container +from ._user_array_impl import __doc__, container # noqa: F401 diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py index acb04d4e2136..fa230ece580c 100644 --- a/numpy/linalg/__init__.py +++ b/numpy/linalg/__init__.py @@ -84,12 +84,15 @@ """ # To get sub-modules -from . import linalg # deprecated in NumPy 2.0 -from . import _linalg +from . import ( + _linalg, + linalg, # deprecated in NumPy 2.0 +) from ._linalg import * __all__ = _linalg.__all__.copy() # noqa: PLE0605 from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/linalg/_linalg.py b/numpy/linalg/_linalg.py index f2d7e14fdf28..d7850c4a0204 100644 --- a/numpy/linalg/_linalg.py +++ b/numpy/linalg/_linalg.py @@ -19,26 +19,85 @@ import functools import operator import warnings -from typing import NamedTuple, Any +from typing import Any, NamedTuple -from numpy._utils import set_module from numpy._core import ( - array, asarray, zeros, empty, empty_like, intc, single, double, - csingle, cdouble, inexact, complexfloating, newaxis, all, inf, dot, - add, multiply, sqrt, sum, isfinite, finfo, errstate, moveaxis, amin, - amax, prod, abs, atleast_2d, intp, asanyarray, object_, - swapaxes, divide, count_nonzero, isnan, sign, argsort, sort, - reciprocal, overrides, diagonal as _core_diagonal, trace as _core_trace, - cross as _core_cross, outer as _core_outer, tensordot as _core_tensordot, - matmul as _core_matmul, matrix_transpose as _core_matrix_transpose, - transpose as _core_transpose, vecdot as _core_vecdot, + abs, + add, + all, + amax, + amin, + argsort, + array, + asanyarray, + asarray, + atleast_2d, + cdouble, + complexfloating, + count_nonzero, + csingle, + divide, + dot, + double, + empty, + empty_like, + errstate, + finfo, + inexact, + inf, + intc, + intp, + isfinite, + isnan, + moveaxis, + multiply, + newaxis, + object_, + overrides, + prod, + reciprocal, + sign, + single, + sort, + sqrt, + sum, + swapaxes, + zeros, +) +from numpy._core import ( + cross as _core_cross, +) +from numpy._core import ( + diagonal as _core_diagonal, +) +from numpy._core import ( + matmul as _core_matmul, +) +from numpy._core import ( + matrix_transpose as _core_matrix_transpose, +) +from numpy._core import ( + outer as _core_outer, +) +from numpy._core import ( + tensordot as _core_tensordot, +) +from numpy._core import ( + trace as _core_trace, +) +from numpy._core import ( + transpose as _core_transpose, +) +from numpy._core import ( + vecdot as _core_vecdot, ) from numpy._globals import _NoValue -from numpy.lib._twodim_base_impl import triu, eye +from numpy._typing import NDArray +from numpy._utils import set_module +from numpy.lib._twodim_base_impl import eye, triu from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple from numpy.linalg import _umath_linalg -from numpy._typing import NDArray class EigResult(NamedTuple): eigenvalues: NDArray[Any] @@ -1763,7 +1822,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): True """ - import numpy as _nx + import numpy as np a, wrap = _makearray(a) if hermitian: @@ -1775,9 +1834,9 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): sgn = sign(s) s = abs(s) sidx = argsort(s)[..., ::-1] - sgn = _nx.take_along_axis(sgn, sidx, axis=-1) - s = _nx.take_along_axis(s, sidx, axis=-1) - u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1) + sgn = np.take_along_axis(sgn, sidx, axis=-1) + s = np.take_along_axis(s, sidx, axis=-1) + u = np.take_along_axis(u, sidx[..., None, :], axis=-1) # singular values are unsigned, move the sign into v vt = transpose(u * sgn[..., None, :]).conjugate() return SVDResult(wrap(u), s, wrap(vt)) @@ -1958,7 +2017,7 @@ def cond(x, p=None): x = asarray(x) # in case we have a matrix if _is_empty_2d(x): raise LinAlgError("cond is not defined on empty arrays") - if p is None or p == 2 or p == -2: + if p is None or p in {2, -2}: s = svd(x, compute_uv=False) with errstate(all='ignore'): if p == -2: diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 5c4cdd73d9c1..3f318a892da5 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -1,39 +1,47 @@ from collections.abc import Iterable -from typing import Any, NamedTuple, Never, SupportsIndex, SupportsInt, TypeAlias, TypeVar, overload +from typing import ( + Any, + NamedTuple, + Never, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + overload, +) from typing import Literal as L import numpy as np from numpy import ( - # re-exports - vecdot, - + complex128, + complexfloating, + float64, # other floating, - complexfloating, + int32, + object_, signedinteger, - unsignedinteger, timedelta64, - object_, - int32, - float64, - complex128, + unsignedinteger, + # re-exports + vecdot, ) -from numpy.linalg import LinAlgError from numpy._core.fromnumeric import matrix_transpose from numpy._core.numeric import tensordot from numpy._typing import ( - NDArray, ArrayLike, DTypeLike, + NDArray, _ArrayLike, _ArrayLikeBool_co, - _ArrayLikeInt_co, - _ArrayLikeUInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, - _ArrayLikeTD64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, _ArrayLikeObject_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, ) +from numpy.linalg import LinAlgError __all__ = [ "matrix_power", diff --git a/numpy/linalg/lapack_lite.pyi b/numpy/linalg/lapack_lite.pyi index 0f6bfa3a022b..835293a26762 100644 --- a/numpy/linalg/lapack_lite.pyi +++ b/numpy/linalg/lapack_lite.pyi @@ -1,4 +1,4 @@ -from typing import Any, Final, TypedDict, type_check_only +from typing import Final, TypedDict, type_check_only import numpy as np from numpy._typing import NDArray diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py index 1d903bd6409d..fea0d6a77ad4 100644 --- a/numpy/linalg/lapack_lite/clapack_scrub.py +++ b/numpy/linalg/lapack_lite/clapack_scrub.py @@ -4,7 +4,7 @@ import re import sys -from plex import Scanner, Str, Lexicon, Opt, Bol, State, AnyChar, TEXT, IGNORE +from plex import IGNORE, TEXT, AnyChar, Bol, Lexicon, Opt, Scanner, State, Str from plex.traditional import re as Re try: diff --git a/numpy/linalg/lapack_lite/fortran.py b/numpy/linalg/lapack_lite/fortran.py index 708a095c19f6..22eb666ef26f 100644 --- a/numpy/linalg/lapack_lite/fortran.py +++ b/numpy/linalg/lapack_lite/fortran.py @@ -1,6 +1,7 @@ # WARNING! This a Python 2 script. Read README.rst for rationale. -import re import itertools +import re + def isBlank(line): return not line diff --git a/numpy/linalg/lapack_lite/make_lite.py b/numpy/linalg/lapack_lite/make_lite.py index 4de2b337328f..d5bb1e01cc7f 100755 --- a/numpy/linalg/lapack_lite/make_lite.py +++ b/numpy/linalg/lapack_lite/make_lite.py @@ -12,14 +12,14 @@ * patch """ -import sys import os import re -import subprocess import shutil +import subprocess +import sys -import fortran import clapack_scrub +import fortran try: from distutils.spawn import find_executable as which # Python 2 diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c index e5f3af05af22..cad5f3f92f09 100644 --- a/numpy/linalg/lapack_litemodule.c +++ b/numpy/linalg/lapack_litemodule.c @@ -377,28 +377,25 @@ static struct PyMethodDef lapack_lite_module_methods[] = { { NULL,NULL,0, NULL} }; +static int module_loaded = 0; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "lapack_lite", - NULL, - -1, - lapack_lite_module_methods, - NULL, - NULL, - NULL, - NULL -}; - -/* Initialization function for the module */ -PyMODINIT_FUNC PyInit_lapack_lite(void) +static int +lapack_lite_exec(PyObject *m) { - PyObject *m,*d; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + PyObject *d; + + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; + } + module_loaded = 1; + + if (PyArray_ImportNumPyAPI() < 0) { + return -1; } - import_array(); + d = PyModule_GetDict(m); LapackError = PyErr_NewException("numpy.linalg.lapack_lite.LapackError", NULL, NULL); PyDict_SetItemString(d, "LapackError", LapackError); @@ -409,10 +406,29 @@ PyMODINIT_FUNC PyInit_lapack_lite(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot lapack_lite_slots[] = { + {Py_mod_exec, lapack_lite_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "lapack_lite", + .m_size = 0, + .m_methods = lapack_lite_module_methods, + .m_slots = lapack_lite_slots, +}; - return m; +PyMODINIT_FUNC PyInit_lapack_lite(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index d75b07342b58..81c80d0fd690 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -1,5 +1,6 @@ def __getattr__(attr_name): import warnings + from numpy.linalg import _linalg ret = getattr(_linalg, attr_name, None) if ret is None: diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index b47bb180a486..cbf7dd63be5e 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -1,28 +1,50 @@ """ Test functions for linalg module """ +import itertools import os +import subprocess import sys -import itertools +import textwrap import threading import traceback -import textwrap -import subprocess + import pytest import numpy as np -from numpy import array, single, double, csingle, cdouble, dot, identity, matmul +from numpy import ( + array, + asarray, + atleast_2d, + cdouble, + csingle, + dot, + double, + identity, + inf, + linalg, + matmul, + multiply, + single, +) from numpy._core import swapaxes from numpy.exceptions import AxisError -from numpy import multiply, atleast_2d, inf, asarray -from numpy import linalg -from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError +from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm from numpy.linalg._linalg import _multi_dot_matrix_chain_order from numpy.testing import ( - assert_, assert_equal, assert_raises, assert_array_equal, - assert_almost_equal, assert_allclose, suppress_warnings, - assert_raises_regex, HAS_LAPACK64, IS_WASM, NOGIL_BUILD, - ) + HAS_LAPACK64, + IS_WASM, + NOGIL_BUILD, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + suppress_warnings, +) + try: import numpy.linalg.lapack_lite except ImportError: diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index e8159fd570bf..c46f83adb0af 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -4,10 +4,14 @@ import pytest import numpy as np -from numpy import linalg, arange, float64, array, dot, transpose +from numpy import arange, array, dot, float64, linalg, transpose from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_array_equal, - assert_array_almost_equal, assert_array_less + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, + assert_equal, + assert_raises, ) diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index ead6d84a73a2..1b6850145bc8 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -4688,57 +4688,54 @@ static PyMethodDef UMath_LinAlgMethods[] = { {NULL, NULL, 0, NULL} /* Sentinel */ }; -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - UMATH_LINALG_MODULE_NAME, - NULL, - -1, - UMath_LinAlgMethods, - NULL, - NULL, - NULL, - NULL -}; +static int module_loaded = 0; -PyMODINIT_FUNC PyInit__umath_linalg(void) +static int +_umath_linalg_exec(PyObject *m) { - PyObject *m; PyObject *d; PyObject *version; - m = PyModule_Create(&moduledef); - if (m == NULL) { - return NULL; + // https://docs.python.org/3/howto/isolating-extensions.html#opt-out-limiting-to-one-module-object-per-process + if (module_loaded) { + PyErr_SetString(PyExc_ImportError, + "cannot load module more than once per process"); + return -1; } + module_loaded = 1; - import_array(); - import_ufunc(); + if (PyArray_ImportNumPyAPI() < 0) { + return -1; + } + if (PyUFunc_ImportUFuncAPI() < 0) { + return -1; + } d = PyModule_GetDict(m); if (d == NULL) { - return NULL; + return -1; } version = PyUnicode_FromString(umath_linalg_version_string); if (version == NULL) { - return NULL; + return -1; } int ret = PyDict_SetItemString(d, "__version__", version); Py_DECREF(version); if (ret < 0) { - return NULL; + return -1; } /* Load the ufunc operators into the module's namespace */ if (addUfuncs(d) < 0) { - return NULL; + return -1; } #if PY_VERSION_HEX < 0x30d00b3 && !HAVE_EXTERNAL_LAPACK lapack_lite_lock = PyThread_allocate_lock(); if (lapack_lite_lock == NULL) { PyErr_NoMemory(); - return NULL; + return -1; } #endif @@ -4748,10 +4745,30 @@ PyMODINIT_FUNC PyInit__umath_linalg(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif -#if Py_GIL_DISABLED - // signal this module supports running with the GIL disabled - PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); + return 0; +} + +static struct PyModuleDef_Slot _umath_linalg_slots[] = { + {Py_mod_exec, (void*)_umath_linalg_exec}, +#if PY_VERSION_HEX >= 0x030c00f0 // Python 3.12+ + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED}, #endif +#if PY_VERSION_HEX >= 0x030d00f0 // Python 3.13+ + // signal that this module supports running without an active GIL + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, /* m_base */ + "_umath_linalg", /* m_name */ + NULL, /* m_doc */ + 0, /* m_size */ + UMath_LinAlgMethods, /* m_methods */ + _umath_linalg_slots, /* m_slots */ +}; - return m; +PyMODINIT_FUNC PyInit__umath_linalg(void) { + return PyModuleDef_Init(&moduledef); } diff --git a/numpy/ma/__init__.py b/numpy/ma/__init__.py index 03e9fcd075cc..e2a742e9b64a 100644 --- a/numpy/ma/__init__.py +++ b/numpy/ma/__init__.py @@ -39,10 +39,8 @@ .. moduleauthor:: Jarrod Millman """ -from . import core +from . import core, extras from .core import * - -from . import extras from .extras import * __all__ = ['core', 'extras'] @@ -50,5 +48,6 @@ __all__ += extras.__all__ from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/ma/__init__.pyi b/numpy/ma/__init__.pyi index 7e38d1793460..176e929a8228 100644 --- a/numpy/ma/__init__.pyi +++ b/numpy/ma/__init__.pyi @@ -1,9 +1,9 @@ from . import core, extras from .core import ( MAError, + MaskedArray, MaskError, MaskType, - MaskedArray, abs, absolute, add, @@ -33,10 +33,10 @@ from .core import ( array, asanyarray, asarray, - bool_, bitwise_and, bitwise_or, bitwise_xor, + bool_, ceil, choose, clip, @@ -86,17 +86,17 @@ from .core import ( indices, inner, innerproduct, - isMA, - isMaskedArray, is_mask, is_masked, isarray, + isMA, + isMaskedArray, left_shift, less, less_equal, log, - log10, log2, + log10, logical_and, logical_not, logical_or, @@ -193,8 +193,8 @@ from .extras import ( compress_nd, compress_rowcols, compress_rows, - count_masked, corrcoef, + count_masked, cov, diagflat, dot, @@ -204,9 +204,9 @@ from .extras import ( flatnotmasked_edges, hsplit, hstack, - isin, in1d, intersect1d, + isin, mask_cols, mask_rowcols, mask_rows, @@ -222,8 +222,8 @@ from .extras import ( setdiff1d, setxor1d, stack, - unique, union1d, + unique, vander, vstack, ) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index a4084c7dc938..05ea373a6a12 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -19,25 +19,34 @@ .. moduleauthor:: Pierre Gerard-Marchant """ -# pylint: disable-msg=E1002 import builtins import functools import inspect import operator -import warnings -import textwrap import re +import textwrap +import warnings import numpy as np -import numpy._core.umath as umath import numpy._core.numerictypes as ntypes +import numpy._core.umath as umath +from numpy import ( + _NoValue, + amax, + amin, + angle, + bool_, + expand_dims, + finfo, # noqa: F401 + iinfo, # noqa: F401 + iscomplexobj, + ndarray, +) +from numpy import array as narray # noqa: F401 from numpy._core import multiarray as mu -from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue, angle -from numpy import array as narray, expand_dims, iinfo, finfo from numpy._core.numeric import normalize_axis_tuple -from numpy._utils._inspect import getargspec, formatargspec from numpy._utils import set_module - +from numpy._utils._inspect import formatargspec, getargspec __all__ = [ 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', @@ -489,22 +498,21 @@ def _check_fill_value(fill_value, ndtype): fill_value = np.asarray(fill_value, dtype=object) fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype) + elif isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): + # Note this check doesn't work if fill_value is not a scalar + err_msg = "Cannot set fill value of string with array of dtype %s" + raise TypeError(err_msg % ndtype) else: - if isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): - # Note this check doesn't work if fill_value is not a scalar - err_msg = "Cannot set fill value of string with array of dtype %s" - raise TypeError(err_msg % ndtype) - else: - # In case we want to convert 1e20 to int. - # Also in case of converting string arrays. - try: - fill_value = np.asarray(fill_value, dtype=ndtype) - except (OverflowError, ValueError) as e: - # Raise TypeError instead of OverflowError or ValueError. - # OverflowError is seldom used, and the real problem here is - # that the passed fill_value is not compatible with the ndtype. - err_msg = "Cannot convert fill_value %s to dtype %s" - raise TypeError(err_msg % (fill_value, ndtype)) from e + # In case we want to convert 1e20 to int. + # Also in case of converting string arrays. + try: + fill_value = np.asarray(fill_value, dtype=ndtype) + except (OverflowError, ValueError) as e: + # Raise TypeError instead of OverflowError or ValueError. + # OverflowError is seldom used, and the real problem here is + # that the passed fill_value is not compatible with the ndtype. + err_msg = "Cannot convert fill_value %s to dtype %s" + raise TypeError(err_msg % (fill_value, ndtype)) from e return np.array(fill_value) @@ -571,7 +579,6 @@ def set_fill_value(a, fill_value): """ if isinstance(a, MaskedArray): a.set_fill_value(fill_value) - return def get_fill_value(a): @@ -1308,7 +1315,7 @@ def __call__(self, a, b, *args, **kwargs): # Domained binary ufuncs divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) -true_divide = divide # Since Python 3 just an alias for divide. +true_divide = divide # Just an alias for divide. floor_divide = _DomainedBinaryOperation(umath.floor_divide, _DomainSafeDivide(), 0, 1) remainder = _DomainedBinaryOperation(umath.remainder, @@ -2498,7 +2505,6 @@ def _recursive_printoption(result, mask, printopt): _recursive_printoption(curdata, curmask, printopt) else: np.copyto(result, printopt, where=mask) - return # For better or worse, these end in a newline @@ -2976,32 +2982,32 @@ def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, elif nm == nd: mask = np.reshape(mask, _data.shape) else: - msg = f"Mask and data not compatible: data size is {nd}, mask size is {nm}." + msg = (f"Mask and data not compatible:" + f" data size is {nd}, mask size is {nm}.") raise MaskError(msg) copy = True # Set the mask to the new value if _data._mask is nomask: _data._mask = mask _data._sharedmask = not copy + elif not keep_mask: + _data._mask = mask + _data._sharedmask = not copy else: - if not keep_mask: - _data._mask = mask - _data._sharedmask = not copy + if _data.dtype.names is not None: + def _recursive_or(a, b): + "do a|=b on each field of a, recursively" + for name in a.dtype.names: + (af, bf) = (a[name], b[name]) + if af.dtype.names is not None: + _recursive_or(af, bf) + else: + af |= bf + + _recursive_or(_data._mask, mask) else: - if _data.dtype.names is not None: - def _recursive_or(a, b): - "do a|=b on each field of a, recursively" - for name in a.dtype.names: - (af, bf) = (a[name], b[name]) - if af.dtype.names is not None: - _recursive_or(af, bf) - else: - af |= bf - - _recursive_or(_data._mask, mask) - else: - _data._mask = np.logical_or(mask, _data._mask) - _data._sharedmask = False + _data._mask = np.logical_or(mask, _data._mask) + _data._sharedmask = False # Update fill_value. if fill_value is None: @@ -3041,7 +3047,6 @@ def _update_from(self, obj): '_basedict': _optinfo} self.__dict__.update(_dict) self.__dict__.update(_optinfo) - return def __array_finalize__(self, obj): """ @@ -3362,11 +3367,10 @@ def _scalar_heuristic(arr, elem): return dout # Just a scalar + elif mout: + return masked else: - if mout: - return masked - else: - return dout + return dout else: # Force dout to MA dout = dout.view(type(self)) @@ -4158,7 +4162,7 @@ def __repr__(self): suffix=',') if self._fill_value is None: - self.fill_value # initialize fill_value + self.fill_value # initialize fill_value # noqa: B018 if (self._fill_value.dtype.kind in ("S", "U") and self.dtype.kind == self._fill_value.dtype.kind): @@ -4416,9 +4420,8 @@ def __iadd__(self, other): if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m - else: - if m is not nomask: - self._mask += m + elif m is not nomask: + self._mask += m other_data = getdata(other) other_data = np.where(self._mask, other_data.dtype.type(0), other_data) self._data.__iadd__(other_data) @@ -6792,16 +6795,17 @@ def __repr__(self): return object.__repr__(self) def __format__(self, format_spec): - # Replace ndarray.__format__ with the default, which supports no format characters. - # Supporting format characters is unwise here, because we do not know what type - # the user was expecting - better to not guess. + # Replace ndarray.__format__ with the default, which supports no + # format characters. + # Supporting format characters is unwise here, because we do not know + # what type the user was expecting - better to not guess. try: return object.__format__(self, format_spec) except TypeError: # 2020-03-23, NumPy 1.19.0 warnings.warn( - "Format strings passed to MaskedConstant are ignored, but in future may " - "error or produce different behavior", + "Format strings passed to MaskedConstant are ignored," + " but in future may error or produce different behavior", FutureWarning, stacklevel=2 ) return object.__format__(self, "") @@ -6956,10 +6960,11 @@ def reduce(self, target, axis=np._NoValue): m = getmask(target) if axis is np._NoValue and target.ndim > 1: + name = self.__name__ # 2017-05-06, Numpy 1.13.0: warn on axis default warnings.warn( - f"In the future the default for ma.{self.__name__}.reduce will be axis=0, " - f"not the current None, to match np.{self.__name__}.reduce. " + f"In the future the default for ma.{name}.reduce will be axis=0, " + f"not the current None, to match np.{name}.reduce. " "Explicitly pass 0 or None to silence this warning.", MaskedArrayFutureWarning, stacklevel=2) axis = None @@ -7599,7 +7604,6 @@ def putmask(a, mask, values): # , mode='raise'): valmask = getmaskarray(values) np.copyto(a._mask, valmask, where=mask) np.copyto(a._data, valdata, where=mask) - return def transpose(a, axes=None): @@ -8323,9 +8327,9 @@ def correlate(a, v, mode='valid', propagate_mask=True): Refer to the `np.convolve` docstring. Note that the default is 'valid', unlike `convolve`, which uses 'full'. propagate_mask : bool - If True, then a result element is masked if any masked element contributes towards it. - If False, then a result element is only masked if no non-masked element - contribute towards it + If True, then a result element is masked if any masked element contributes + towards it. If False, then a result element is only masked if no non-masked + element contribute towards it Returns ------- diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 9641e7f63671..de6db7873faa 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,11 +1,10 @@ # pyright: reportIncompatibleMethodOverride=false # ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 -from collections.abc import Sequence -from typing import Any, Literal, Self, SupportsIndex, TypeAlias, TypeVar, overload - from _typeshed import Incomplete -from typing_extensions import TypeIs, deprecated +from collections.abc import Sequence +from typing import Any, Literal, NoReturn, Self, SupportsIndex, TypeAlias, overload +from typing_extensions import TypeIs, TypeVar import numpy as np from numpy import ( @@ -17,22 +16,54 @@ from numpy import ( amax, amin, bool_, + bytes_, + character, + complex128, + complexfloating, + datetime64, dtype, + dtypes, expand_dims, + float16, + float32, float64, + floating, generic, + inexact, int_, + integer, intp, ndarray, + number, + object_, + signedinteger, + str_, + timedelta64, + unsignedinteger, ) from numpy._globals import _NoValueType from numpy._typing import ( ArrayLike, NDArray, + _32Bit, + _64Bit, + _AnyShape, _ArrayLike, _ArrayLikeBool_co, + _ArrayLikeBytes_co, + _ArrayLikeComplex128_co, + _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat64_co, + _ArrayLikeFloat_co, _ArrayLikeInt, _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeStr_co, + _ArrayLikeString_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, _DTypeLikeBool, _IntLike_co, _ScalarLike_co, @@ -221,15 +252,25 @@ __all__ = [ "zeros_like", ] -_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) -_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) _DTypeT = TypeVar("_DTypeT", bound=dtype) -_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, covariant=True) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) _ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) _ScalarT = TypeVar("_ScalarT", bound=generic) _ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) +_NumberT = TypeVar("_NumberT", bound=number) # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` -_MaskedArray: TypeAlias = MaskedArray[_Shape, dtype[_ScalarT]] +_MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] + +_MaskedArrayUInt_co: TypeAlias = _MaskedArray[unsignedinteger | np.bool] +_MaskedArrayInt_co: TypeAlias = _MaskedArray[integer | np.bool] +_MaskedArrayComplex_co: TypeAlias = _MaskedArray[inexact | integer | np.bool] +_MaskedArrayTD64_co: TypeAlias = _MaskedArray[timedelta64 | integer | np.bool] +_MaskedArrayFloat64_co: TypeAlias = _MaskedArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_MaskedArrayComplex128_co: TypeAlias = _MaskedArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_MaskedArrayFloat_co: TypeAlias = _MaskedArray[floating | integer | np.bool] + _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] MaskType = bool_ @@ -444,10 +485,179 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] def __lt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] - def __add__(self, other): ... - def __radd__(self, other): ... - def __sub__(self, other): ... - def __rsub__(self, other): ... + + # Keep in sync with `ndarray.__add__` + @overload + def __add__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __add__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __add__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __add__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __add__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __add__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __add__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __add__( + self: MaskedArray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + @overload + def __add__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __add__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__radd__` + @overload # signature equivalent to __add__ + def __radd__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> _MaskedArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __radd__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __radd__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __radd__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __radd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> _MaskedArray[bytes_]: ... + @overload + def __radd__(self: _MaskedArray[str_], other: _ArrayLikeStr_co, /) -> _MaskedArray[str_]: ... + @overload + def __radd__( + self: MaskedArray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_AnyShape, dtypes.StringDType]: ... + @overload + def __radd__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __radd__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__sub__` + @overload + def __sub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __sub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __sub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __sub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeTD64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __sub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __sub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __sub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # Keep in sync with `ndarray.__rsub__` + @overload + def __rsub__(self: _MaskedArray[_NumberT], other: int | np.bool, /) -> MaskedArray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: _MaskedArray[_NumberT], other: _ArrayLikeBool_co, /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __rsub__(self: _MaskedArray[np.bool], other: _ArrayLike[_NumberT], /) -> _MaskedArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[float64], other: _ArrayLikeFloat64_co, /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> _MaskedArray[float64]: ... + @overload + def __rsub__(self: _MaskedArray[complex128], other: _ArrayLikeComplex128_co, /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> _MaskedArray[complex128]: ... + @overload + def __rsub__(self: _MaskedArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _MaskedArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayInt_co, other: _ArrayLikeInt_co, /) -> _MaskedArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _MaskedArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayComplex_co, other: _ArrayLikeComplex_co, /) -> _MaskedArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArray[number], other: _ArrayLikeNumber_co, /) -> _MaskedArray[number]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeTD64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArrayTD64_co, other: _ArrayLikeDT64_co, /) -> _MaskedArray[datetime64]: ... + @overload + def __rsub__(self: _MaskedArray[datetime64], other: _ArrayLikeDT64_co, /) -> _MaskedArray[timedelta64]: ... + @overload + def __rsub__(self: _MaskedArray[object_], other: Any, /) -> Any: ... + @overload + def __rsub__(self: _MaskedArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __mul__(self, other): ... def __rmul__(self, other): ... def __truediv__(self, other): ... @@ -456,12 +666,126 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rfloordiv__(self, other): ... def __pow__(self, other, mod: None = None, /): ... def __rpow__(self, other, mod: None = None, /): ... - def __iadd__(self, other): ... - def __isub__(self, other): ... - def __imul__(self, other): ... - def __ifloordiv__(self, other): ... - def __itruediv__(self, other): ... - def __ipow__(self, other): ... + + # Keep in sync with `ndarray.__iadd__` + @overload + def __iadd__( + self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: _MaskedArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: MaskedArray[Any, dtype[str_] | dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__isub__` + @overload + def __isub__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__( + self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__( + self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__( + self: _MaskedArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__imul__` + @overload + def __imul__( + self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: MaskedArray[Any, dtype[integer] | dtype[character] | dtypes.StringDType], other: _ArrayLikeInt_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__ifloordiv__` + @overload + def __ifloordiv__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ifloordiv__( + self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ifloordiv__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__itruediv__` + @overload + def __itruediv__( + self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __itruediv__( + self: _MaskedArray[complexfloating], + other: _ArrayLikeComplex_co, + /, + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __itruediv__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__ipow__` + @overload + def __ipow__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__( + self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__( + self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # @property # type: ignore[misc] def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... get_imag: Any @@ -874,7 +1198,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): self, repeats: _ArrayLikeInt_co, axis: SupportsIndex, - ) -> MaskedArray[_Shape, _DTypeT_co]: ... + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... squeeze: Any @@ -883,14 +1207,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): axis1: SupportsIndex, axis2: SupportsIndex, / - ) -> MaskedArray[_Shape, _DTypeT_co]: ... + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... # def toflex(self) -> Incomplete: ... def torecords(self) -> Incomplete: ... def tolist(self, fill_value: Incomplete | None = None) -> Incomplete: ... - @deprecated("tostring() is deprecated. Use tobytes() instead.") - def tostring(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... @@ -902,7 +1224,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @property def dtype(self) -> _DTypeT_co: ... @dtype.setter - def dtype(self: MaskedArray[Any, _DTypeT], dtype: _DTypeT, /) -> None: ... + def dtype(self: MaskedArray[_AnyShape, _DTypeT], dtype: _DTypeT, /) -> None: ... class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __new__( @@ -927,7 +1249,7 @@ isarray = isMaskedArray isMA = isMaskedArray # 0D float64 array -class MaskedConstant(MaskedArray[Any, dtype[float64]]): +class MaskedConstant(MaskedArray[_AnyShape, dtype[float64]]): def __new__(cls): ... __class__: Any def __array_finalize__(self, obj): ... diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 22151b95e27d..094c1e26b191 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -5,7 +5,6 @@ :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu -:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ __all__ = [ @@ -23,18 +22,37 @@ import itertools import warnings -from . import core as ma -from .core import ( - MaskedArray, MAError, add, array, asarray, concatenate, filled, count, - getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or, - nomask, ones, sort, zeros, getdata, get_masked_subclass, dot - ) - import numpy as np -from numpy import ndarray, array as nxarray -from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple +from numpy import array as nxarray +from numpy import ndarray from numpy.lib._function_base_impl import _ureduce from numpy.lib._index_tricks_impl import AxisConcatenator +from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple + +from . import core as ma +from .core import ( # noqa: F401 + MAError, + MaskedArray, + add, + array, + asarray, + concatenate, + count, + dot, + filled, + get_masked_subclass, + getdata, + getmask, + getmaskarray, + make_mask_descr, + mask_or, + masked, + masked_array, + nomask, + ones, + sort, + zeros, +) def issequence(seq): diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index afb3fc95c4c9..835f3ce5b772 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -18,7 +18,6 @@ import numpy as np import numpy.ma as ma - _byteorderconv = np._core.records._byteorderconv @@ -117,7 +116,8 @@ def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, elif nm == nd: mask = np.reshape(mask, self.shape) else: - msg = f"Mask and data not compatible: data size is {nd}, mask size is {nm}." + msg = (f"Mask and data not compatible: data size is {nd}," + " mask size is {nm}.") raise ma.MAError(msg) if not keep_mask: self.__setmask__(mask) @@ -149,7 +149,6 @@ def __array_finalize__(self, obj): self._update_from(obj) if _dict['_baseclass'] == np.ndarray: _dict['_baseclass'] = np.recarray - return @property def _data(self): diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index 6e608c8cbca1..cae687aa7d1a 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -1,6 +1,7 @@ from typing import Any, TypeVar from numpy import dtype + from . import MaskedArray __all__ = [ diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index b3d42c363335..091ba6c99fff 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -1,4 +1,3 @@ -# pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102 """Tests suite for MaskedArray & subclassing. :author: Pierre Gerard-Marchant @@ -6,50 +5,148 @@ """ __author__ = "Pierre GF Gerard-Marchant" -import sys -import warnings import copy -import operator import itertools -import textwrap +import operator import pickle +import sys +import textwrap +import warnings from functools import reduce import pytest import numpy as np -import numpy.ma.core import numpy._core.fromnumeric as fromnumeric import numpy._core.umath as umath -from numpy.exceptions import AxisError -from numpy.testing import ( - assert_raises, assert_warns, suppress_warnings, IS_WASM, temppath - ) -from numpy.testing._private.utils import requires_memory +import numpy.ma.core from numpy import ndarray from numpy._utils import asbytes -from numpy.ma.testutils import ( - assert_, assert_array_equal, assert_equal, assert_almost_equal, - assert_equal_records, fail_if_equal, assert_not_equal, - assert_mask_equal - ) +from numpy.exceptions import AxisError from numpy.ma.core import ( - MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all, - allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2, - arcsin, arctan, argsort, array, asarray, choose, concatenate, - conjugate, cos, cosh, count, default_fill_value, diag, divide, doc_note, - empty, empty_like, equal, exp, flatten_mask, filled, fix_invalid, - flatten_structured_array, fromflex, getmask, getmaskarray, greater, - greater_equal, identity, inner, isMaskedArray, less, less_equal, log, - log10, make_mask, make_mask_descr, mask_or, masked, masked_array, - masked_equal, masked_greater, masked_greater_equal, masked_inside, - masked_less, masked_less_equal, masked_not_equal, masked_outside, - masked_print_option, masked_values, masked_where, max, maximum, - maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply, - mvoid, nomask, not_equal, ones, ones_like, outer, power, product, put, - putmask, ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, - sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, zeros_like, - ) + MAError, + MaskedArray, + MaskError, + MaskType, + abs, + absolute, + add, + all, + allclose, + allequal, + alltrue, + angle, + anom, + arange, + arccos, + arccosh, + arcsin, + arctan, + arctan2, + argsort, + array, + asarray, + choose, + concatenate, + conjugate, + cos, + cosh, + count, + default_fill_value, + diag, + divide, + empty, + empty_like, + equal, + exp, + filled, + fix_invalid, + flatten_mask, + flatten_structured_array, + fromflex, + getmask, + getmaskarray, + greater, + greater_equal, + identity, + inner, + isMaskedArray, + less, + less_equal, + log, + log10, + make_mask, + make_mask_descr, + mask_or, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_less, + masked_less_equal, + masked_not_equal, + masked_outside, + masked_print_option, + masked_values, + masked_where, + max, + maximum, + maximum_fill_value, + min, + minimum, + minimum_fill_value, + mod, + multiply, + mvoid, + nomask, + not_equal, + ones, + ones_like, + outer, + power, + product, + put, + putmask, + ravel, + repeat, + reshape, + resize, + shape, + sin, + sinh, + sometrue, + sort, + sqrt, + subtract, + sum, + take, + tan, + tanh, + transpose, + where, + zeros, + zeros_like, +) +from numpy.ma.testutils import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_equal_records, + assert_mask_equal, + assert_not_equal, + fail_if_equal, +) +from numpy.testing import ( + IS_WASM, + assert_raises, + assert_warns, + suppress_warnings, + temppath, +) +from numpy.testing._private.utils import requires_memory pi = np.pi diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py index 40c8418f5c18..8cc8b9c72bb9 100644 --- a/numpy/ma/tests/test_deprecations.py +++ b/numpy/ma/tests/test_deprecations.py @@ -1,13 +1,16 @@ """Test deprecation and future warnings. """ +import io +import textwrap + import pytest + import numpy as np -from numpy.testing import assert_warns -from numpy.ma.testutils import assert_equal from numpy.ma.core import MaskedArrayFutureWarning -import io -import textwrap +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_warns + class TestArgsort: """ gh-8701 """ diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index d4eeec59723e..3d10e839cbc9 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -1,36 +1,74 @@ -# pylint: disable-msg=W0611, W0612, W0511 """Tests suite for MaskedArray. Adapted from the original test_ma by Pierre Gerard-Marchant :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu -:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ -import warnings import itertools +import warnings + import pytest import numpy as np from numpy._core.numeric import normalize_axis_tuple -from numpy.testing import ( - assert_warns, suppress_warnings - ) -from numpy.ma.testutils import ( - assert_, assert_array_equal, assert_equal, assert_almost_equal - ) from numpy.ma.core import ( - array, arange, masked, MaskedArray, masked_array, getmaskarray, shape, - nomask, ones, zeros, count - ) + MaskedArray, + arange, + array, + count, + getmaskarray, + masked, + masked_array, + nomask, + ones, + shape, + zeros, +) from numpy.ma.extras import ( - atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef, - median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d, - ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, - mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous, - notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin, - diagflat, ndenumerate, stack, vstack, _covhelper - ) + _covhelper, + apply_along_axis, + apply_over_axes, + atleast_1d, + atleast_2d, + atleast_3d, + average, + clump_masked, + clump_unmasked, + compress_nd, + compress_rowcols, + corrcoef, + cov, + diagflat, + dot, + ediff1d, + flatnotmasked_contiguous, + in1d, + intersect1d, + isin, + mask_rowcols, + masked_all, + masked_all_like, + median, + mr_, + ndenumerate, + notmasked_contiguous, + notmasked_edges, + polyfit, + setdiff1d, + setxor1d, + stack, + union1d, + unique, + vstack, +) +from numpy.ma.testutils import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, +) +from numpy.testing import assert_warns, suppress_warnings class TestGeneric: diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index b73d32796772..0da915101511 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -1,4 +1,3 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 """Tests suite for mrecords. :author: Pierre Gerard-Marchant @@ -9,19 +8,24 @@ import numpy as np import numpy.ma as ma +from numpy._core.records import fromarrays as recfromarrays +from numpy._core.records import fromrecords as recfromrecords +from numpy._core.records import recarray from numpy.ma import masked, nomask -from numpy.testing import temppath -from numpy._core.records import ( - recarray, fromrecords as recfromrecords, fromarrays as recfromarrays - ) from numpy.ma.mrecords import ( - MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords, - addfield - ) + MaskedRecords, + addfield, + fromarrays, + fromrecords, + fromtextfile, + mrecarray, +) from numpy.ma.testutils import ( - assert_, assert_equal, + assert_, + assert_equal, assert_equal_records, - ) +) +from numpy.testing import temppath class TestMRecords: diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index e21dd39768e1..30c3311798fc 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -1,27 +1,93 @@ -from functools import reduce import pickle +from functools import reduce import pytest import numpy as np -import numpy._core.umath as umath import numpy._core.fromnumeric as fromnumeric -from numpy.testing import ( - assert_, assert_raises, assert_equal, - ) +import numpy._core.umath as umath from numpy.ma import ( - MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue, - arange, arccos, arcsin, arctan, arctan2, array, average, choose, - concatenate, conjugate, cos, cosh, count, divide, equal, exp, filled, - getmask, greater, greater_equal, inner, isMaskedArray, less, - less_equal, log, log10, make_mask, masked, masked_array, masked_equal, - masked_greater, masked_greater_equal, masked_inside, masked_less, - masked_less_equal, masked_not_equal, masked_outside, - masked_print_option, masked_values, masked_where, maximum, minimum, - multiply, nomask, nonzero, not_equal, ones, outer, product, put, ravel, - repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum, - take, tan, tanh, transpose, where, zeros, - ) + MaskedArray, + MaskType, + absolute, + add, + all, + allclose, + allequal, + alltrue, + arange, + arccos, + arcsin, + arctan, + arctan2, + array, + average, + choose, + concatenate, + conjugate, + cos, + cosh, + count, + divide, + equal, + exp, + filled, + getmask, + greater, + greater_equal, + inner, + isMaskedArray, + less, + less_equal, + log, + log10, + make_mask, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_less, + masked_less_equal, + masked_not_equal, + masked_outside, + masked_print_option, + masked_values, + masked_where, + maximum, + minimum, + multiply, + nomask, + nonzero, + not_equal, + ones, + outer, + product, + put, + ravel, + repeat, + resize, + shape, + sin, + sinh, + sometrue, + sort, + sqrt, + subtract, + sum, + take, + tan, + tanh, + transpose, + where, + zeros, +) +from numpy.testing import ( + assert_, + assert_equal, + assert_raises, +) pi = np.pi diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py index 372cf8bbb340..025387ba454c 100644 --- a/numpy/ma/tests/test_regression.py +++ b/numpy/ma/tests/test_regression.py @@ -1,7 +1,10 @@ import numpy as np from numpy.testing import ( - assert_, assert_array_equal, assert_allclose, suppress_warnings - ) + assert_, + assert_allclose, + assert_array_equal, + suppress_warnings, +) class TestRegression: @@ -17,7 +20,7 @@ def test_masked_array(self): def test_mem_masked_where(self): # Ticket #62 - from numpy.ma import masked_where, MaskType + from numpy.ma import MaskType, masked_where a = np.zeros((1, 1)) b = np.zeros(a.shape, MaskType) c = masked_where(b, a) diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py index 855efd15fc97..3364e563097e 100644 --- a/numpy/ma/tests/test_subclassing.py +++ b/numpy/ma/tests/test_subclassing.py @@ -1,19 +1,28 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 """Tests suite for MaskedArray & subclassing. :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu -:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ import numpy as np from numpy.lib.mixins import NDArrayOperatorsMixin -from numpy.testing import assert_, assert_raises -from numpy.ma.testutils import assert_equal from numpy.ma.core import ( - array, arange, masked, MaskedArray, masked_array, log, add, hypot, - divide, asarray, asanyarray, nomask - ) + MaskedArray, + add, + arange, + array, + asanyarray, + asarray, + divide, + hypot, + log, + masked, + masked_array, + nomask, +) +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises + # from numpy.ma.core import ( def assert_startswith(a, b): @@ -31,7 +40,6 @@ def __new__(cls, arr, info={}): def __array_finalize__(self, obj): super().__array_finalize__(obj) self.info = getattr(obj, 'info', {}).copy() - return def __add__(self, other): result = super().__add__(other) diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py index ce597b3d5e99..bffcc34b759c 100644 --- a/numpy/ma/testutils.py +++ b/numpy/ma/testutils.py @@ -2,20 +2,23 @@ :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu -:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $ """ import operator import numpy as np -from numpy import ndarray import numpy._core.umath as umath import numpy.testing -from numpy.testing import ( - assert_, assert_allclose, assert_array_almost_equal_nulp, - assert_raises, build_err_msg - ) -from .core import mask_or, getmask, masked_array, nomask, masked, filled +from numpy import ndarray +from numpy.testing import ( # noqa: F401 + assert_, + assert_allclose, + assert_array_almost_equal_nulp, + assert_raises, + build_err_msg, +) + +from .core import filled, getmask, mask_or, masked, masked_array, nomask __all__masked = [ 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal', @@ -29,7 +32,8 @@ # have mistakenly included them from this file. SciPy is one. That is # unfortunate, as some of these functions are not intended to work with # masked arrays. But there was no way to tell before. -from unittest import TestCase +from unittest import TestCase # noqa: F401 + __some__from_testing = [ 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp', 'assert_raises' @@ -91,7 +95,6 @@ def _assert_equal_on_sequences(actual, desired, err_msg=''): assert_equal(len(actual), len(desired), err_msg) for k in range(len(desired)): assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') - return def assert_equal_records(a, b): @@ -106,7 +109,6 @@ def assert_equal_records(a, b): (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) if not (af is masked) and not (bf is masked): assert_equal(operator.getitem(a, f), operator.getitem(b, f)) - return def assert_equal(actual, desired, err_msg=''): diff --git a/numpy/matlib.py b/numpy/matlib.py index 4b11015046e8..f27d503cdbca 100644 --- a/numpy/matlib.py +++ b/numpy/matlib.py @@ -10,11 +10,12 @@ PendingDeprecationWarning, stacklevel=2) import numpy as np -from numpy.matrixlib.defmatrix import matrix, asmatrix + # Matlib.py contains all functions in the numpy namespace with a few # replacements. See doc/source/reference/routines.matlib.rst for details. # Need * as we're copying the numpy namespace. from numpy import * # noqa: F403 +from numpy.matrixlib.defmatrix import asmatrix, matrix __version__ = np.__version__ diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index c6a10c6327ef..baeadc078028 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -2,9 +2,7 @@ from typing import Any, Literal, TypeAlias, TypeVar, overload import numpy as np import numpy.typing as npt - -# ruff: noqa: F401 -from numpy import ( +from numpy import ( # noqa: F401 False_, ScalarType, True_, @@ -515,8 +513,6 @@ _Order: TypeAlias = Literal["C", "F"] ### -# ruff: noqa: F811 - # @overload def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... diff --git a/numpy/matrixlib/__init__.py b/numpy/matrixlib/__init__.py index 8a7597d30387..1ff5cb58cc96 100644 --- a/numpy/matrixlib/__init__.py +++ b/numpy/matrixlib/__init__.py @@ -7,5 +7,6 @@ __all__ = defmatrix.__all__ from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi index e8ec8b248866..56ae8bf4c84b 100644 --- a/numpy/matrixlib/__init__.pyi +++ b/numpy/matrixlib/__init__.pyi @@ -1,4 +1,5 @@ from numpy import matrix -from .defmatrix import bmat, asmatrix + +from .defmatrix import asmatrix, bmat __all__ = ["matrix", "bmat", "asmatrix"] diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 09f10fa3be6d..39b9a935500e 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -1,12 +1,13 @@ __all__ = ['matrix', 'bmat', 'asmatrix'] +import ast import sys import warnings -import ast -from numpy._utils import set_module import numpy._core.numeric as N from numpy._core.numeric import concatenate, isscalar +from numpy._utils import set_module + # While not in __all__, matrix_power used to be defined here, so we import # it for backward compatibility. from numpy.linalg import matrix_power diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py index c68dfcff4107..ce23933ab7f7 100644 --- a/numpy/matrixlib/tests/test_defmatrix.py +++ b/numpy/matrixlib/tests/test_defmatrix.py @@ -1,12 +1,17 @@ import collections.abc import numpy as np -from numpy import matrix, asmatrix, bmat -from numpy.testing import ( - assert_, assert_equal, assert_almost_equal, assert_array_equal, - assert_array_almost_equal, assert_raises - ) +from numpy import asmatrix, bmat, matrix from numpy.linalg import matrix_power +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) + class TestCtor: def test_basic(self): diff --git a/numpy/matrixlib/tests/test_interaction.py b/numpy/matrixlib/tests/test_interaction.py index 2af706bef785..87d133a2c586 100644 --- a/numpy/matrixlib/tests/test_interaction.py +++ b/numpy/matrixlib/tests/test_interaction.py @@ -2,15 +2,21 @@ Note that tests with MaskedArray and linalg are done in separate files. """ -import pytest - import textwrap import warnings +import pytest + import numpy as np -from numpy.testing import (assert_, assert_equal, assert_raises, - assert_raises_regex, assert_array_equal, - assert_almost_equal, assert_array_almost_equal) +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) def test_fancy_indexing(): diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py index 63eddba4622d..e6df047ee6ca 100644 --- a/numpy/matrixlib/tests/test_masked_matrix.py +++ b/numpy/matrixlib/tests/test_masked_matrix.py @@ -1,13 +1,22 @@ import pickle import numpy as np -from numpy.testing import assert_warns -from numpy.ma.testutils import (assert_, assert_equal, assert_raises, - assert_array_equal) -from numpy.ma.core import (masked_array, masked_values, masked, allequal, - MaskType, getmask, MaskedArray, nomask, - log, add, hypot, divide) +from numpy.ma.core import ( + MaskedArray, + MaskType, + add, + allequal, + divide, + getmask, + hypot, + log, + masked, + masked_array, + masked_values, + nomask, +) from numpy.ma.extras import mr_ +from numpy.ma.testutils import assert_, assert_array_equal, assert_equal, assert_raises class MMatrix(MaskedArray, np.matrix,): @@ -20,7 +29,6 @@ def __new__(cls, data, mask=nomask): def __array_finalize__(self, obj): np.matrix.__array_finalize__(self, obj) MaskedArray.__array_finalize__(self, obj) - return @property def _series(self): diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py index 106c2e38217a..4e639653bda4 100644 --- a/numpy/matrixlib/tests/test_matrix_linalg.py +++ b/numpy/matrixlib/tests/test_matrix_linalg.py @@ -1,12 +1,24 @@ """ Test functions for linalg module using the matrix class.""" import numpy as np - from numpy.linalg.tests.test_linalg import ( - LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase, - _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base, - SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases, - PinvCases, DetCases, LstsqCases) - + CondCases, + DetCases, + EigCases, + EigvalsCases, + InvCases, + LinalgCase, + LinalgTestCase, + LstsqCases, + PinvCases, + SolveCases, + SVDCases, + _TestNorm2D, + _TestNormDoubleBase, + _TestNormInt64Base, + _TestNormSingleBase, + apply_tag, +) +from numpy.linalg.tests.test_linalg import TestQR as _TestQR CASES = [] diff --git a/numpy/matrixlib/tests/test_multiarray.py b/numpy/matrixlib/tests/test_multiarray.py index 638d0d1534de..2d9d1f8efe41 100644 --- a/numpy/matrixlib/tests/test_multiarray.py +++ b/numpy/matrixlib/tests/test_multiarray.py @@ -1,5 +1,6 @@ import numpy as np -from numpy.testing import assert_, assert_equal, assert_array_equal +from numpy.testing import assert_, assert_array_equal, assert_equal + class TestView: def test_type(self): diff --git a/numpy/matrixlib/tests/test_numeric.py b/numpy/matrixlib/tests/test_numeric.py index 40dd5660153e..f2c259f2fb97 100644 --- a/numpy/matrixlib/tests/test_numeric.py +++ b/numpy/matrixlib/tests/test_numeric.py @@ -1,6 +1,7 @@ import numpy as np from numpy.testing import assert_equal + class TestDot: def test_matscalar(self): b1 = np.matrix(np.ones((3, 3), dtype=complex)) diff --git a/numpy/meson.build b/numpy/meson.build index 7fcafa9c8184..67e4861d7ad6 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -317,7 +317,6 @@ pure_subdirs = [ '_pyinstaller', '_typing', '_utils', - 'compat', 'ctypeslib', 'doc', 'f2py', diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py index 01e682606601..ed1ad5a2fdd3 100644 --- a/numpy/polynomial/__init__.py +++ b/numpy/polynomial/__init__.py @@ -114,14 +114,14 @@ - ``p.truncate(size)`` -- Truncate ``p`` to given size """ -from .polynomial import Polynomial from .chebyshev import Chebyshev -from .legendre import Legendre from .hermite import Hermite from .hermite_e import HermiteE from .laguerre import Laguerre +from .legendre import Legendre +from .polynomial import Polynomial -__all__ = [ +__all__ = [ # noqa: F822 "set_default_printstyle", "polynomial", "Polynomial", "chebyshev", "Chebyshev", @@ -182,5 +182,6 @@ def set_default_printstyle(style): from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index c5dccfe16dee..6fb0fb5ec7fa 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -1,12 +1,12 @@ from typing import Final, Literal -from .polynomial import Polynomial +from . import chebyshev, hermite, hermite_e, laguerre, legendre, polynomial from .chebyshev import Chebyshev -from .legendre import Legendre from .hermite import Hermite from .hermite_e import HermiteE from .laguerre import Laguerre -from . import polynomial, chebyshev, legendre, hermite, hermite_e, laguerre +from .legendre import Legendre +from .polynomial import Polynomial __all__ = [ "set_default_printstyle", @@ -21,4 +21,5 @@ __all__ = [ def set_default_printstyle(style: Literal["ascii", "unicode"]) -> None: ... from numpy._pytesttester import PytestTester as _PytestTester + test: Final[_PytestTester] diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index f454a131c31d..f89343340931 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -6,12 +6,13 @@ abc module from the stdlib, hence it is only available for Python >= 2.6. """ -import os import abc import numbers +import os from collections.abc import Callable import numpy as np + from . import polyutils as pu __all__ = ['ABCPolyBase'] @@ -1015,7 +1016,7 @@ class domain in NumPy 1.4 and ``None`` in later versions. if domain[0] == domain[1]: domain[0] -= 1 domain[1] += 1 - elif type(domain) is list and len(domain) == 0: + elif isinstance(domain, list) and len(domain) == 0: domain = cls.domain if window is None: @@ -1063,7 +1064,7 @@ def fromroots(cls, roots, domain=[], window=None, symbol='x'): [roots] = pu.as_series([roots], trim=False) if domain is None: domain = pu.getdomain(roots) - elif type(domain) is list and len(domain) == 0: + elif isinstance(domain, list) and len(domain) == 0: domain = cls.domain if window is None: diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index d36e6b64ca20..30c906fa3b4b 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -2,13 +2,27 @@ import abc import decimal import numbers from collections.abc import Iterator, Mapping, Sequence -from typing import Any, ClassVar, Final, Generic, Literal, LiteralString, Self, SupportsIndex, TypeAlias, overload - +from typing import ( + Any, + ClassVar, + Generic, + Literal, + LiteralString, + Self, + SupportsIndex, + TypeAlias, + overload, +) from typing_extensions import TypeIs, TypeVar import numpy as np import numpy.typing as npt -from numpy._typing import _ArrayLikeComplex_co, _ArrayLikeFloat_co, _FloatLike_co, _NumberLike_co +from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _FloatLike_co, + _NumberLike_co, +) from ._polytypes import ( _AnyInt, diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index de2d36142497..241a65be2fa2 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -19,18 +19,18 @@ from typing import ( import numpy as np import numpy.typing as npt from numpy._typing import ( + _ArrayLikeComplex_co, # array-likes _ArrayLikeFloat_co, - _ArrayLikeComplex_co, _ArrayLikeNumber_co, _ArrayLikeObject_co, - _NestedSequence, - _SupportsArray, + _ComplexLike_co, + _FloatLike_co, # scalar-likes _IntLike_co, - _FloatLike_co, - _ComplexLike_co, + _NestedSequence, _NumberLike_co, + _SupportsArray, ) _T = TypeVar("_T") diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 4b4c28d37860..58fce6046287 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -438,7 +438,7 @@ def cheb2poly(c): array([-2., -8., 4., 12.]) """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 2c319e978efe..47e1dfc05b4b 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -177,7 +177,7 @@ def herm2poly(c): array([0., 1., 2., 3.]) """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 07db43d0c000..f7d907c1b39d 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,4 +1,5 @@ -from typing import Any, Final, Literal as L, TypeVar +from typing import Any, Final, TypeVar +from typing import Literal as L import numpy as np diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 53caf9151343..d30fc1b5aa14 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -179,7 +179,7 @@ def herme2poly(c): array([0., 1., 2., 3.]) """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index 94ad7248f268..e8013e66b62f 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,4 +1,5 @@ -from typing import Any, Final, Literal as L, TypeVar +from typing import Any, Final, TypeVar +from typing import Literal as L import numpy as np diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index ef547f24ede1..38eb5a80b200 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -177,7 +177,7 @@ def lag2poly(c): array([0., 1., 2., 3.]) """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index a2b84f72bab7..6f67257a607c 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -1,4 +1,5 @@ -from typing import Final, Literal as L +from typing import Final +from typing import Literal as L import numpy as np diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index a363a1e1877e..b43bdfa83034 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -191,7 +191,7 @@ def leg2poly(c): """ - from .polynomial import polyadd, polysub, polymulx + from .polynomial import polyadd, polymulx, polysub [c] = pu.as_series([c]) n = len(c) diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index d81f3e6f54a4..35ea2ffd2bf2 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -1,4 +1,5 @@ -from typing import Final, Literal as L +from typing import Final +from typing import Literal as L import numpy as np diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 32b53b757a1c..6ec0dc58a1de 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -82,6 +82,7 @@ import numpy as np import numpy.linalg as la +from numpy._core.overrides import array_function_dispatch from numpy.lib.array_utils import normalize_axis_index from . import polyutils as pu @@ -841,7 +842,13 @@ def polyvalfromroots(x, r, tensor=True): raise ValueError("x.ndim must be < r.ndim when tensor == False") return np.prod(x - r, axis=0) +def _polyval2d_dispatcher(x, y, c): + return (x, y, c) +def _polygrid2d_dispatcher(x, y, c): + return (x, y, c) + +@array_function_dispatch(_polyval2d_dispatcher) def polyval2d(x, y, c): """ Evaluate a 2-D polynomial at points (x, y). @@ -893,7 +900,7 @@ def polyval2d(x, y, c): """ return pu._valnd(polyval, c, x, y) - +@array_function_dispatch(_polygrid2d_dispatcher) def polygrid2d(x, y, c): """ Evaluate a 2-D polynomial on the Cartesian product of x and y. diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index 89a8b57185f3..b4c784492b50 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -1,12 +1,12 @@ -from typing import Final, Literal as L +from typing import Final +from typing import Literal as L import numpy as np + from ._polybase import ABCPolyBase from ._polytypes import ( _Array1, _Array2, - _FuncVal2D, - _FuncVal3D, _FuncBinOp, _FuncCompanion, _FuncDer, @@ -18,10 +18,12 @@ from ._polytypes import ( _FuncRoots, _FuncUnOp, _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, _FuncVander, _FuncVander2D, _FuncVander3D, - _FuncValFromRoots, ) from .polyutils import trimcoef as polytrim diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index 904ae10fb19c..18dc0a8d1d24 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -18,12 +18,11 @@ mapparms parameters of the linear map between domains. """ -import operator import functools +import operator import warnings import numpy as np - from numpy._core.multiarray import dragon4_positional, dragon4_scientific from numpy.exceptions import RankWarning diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 57657808fcaf..c627e16dca1d 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Iterable, Sequence from typing import ( - Any, Final, Literal, SupportsIndex, @@ -12,40 +11,33 @@ from typing import ( import numpy as np import numpy.typing as npt from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, _FloatLike_co, _NumberLike_co, - - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, ) from ._polytypes import ( _AnyInt, - _CoefLike_co, - _Array2, - _Tuple2, - - _FloatSeries, + _ArrayLikeCoef_co, + _CoefArray, + _CoefLike_co, _CoefSeries, - _ComplexSeries, - _ObjectSeries, - _ComplexArray, + _ComplexSeries, _FloatArray, - _CoefArray, - _ObjectArray, - - _SeriesLikeInt_co, - _SeriesLikeFloat_co, - _SeriesLikeComplex_co, - _SeriesLikeCoef_co, - - _ArrayLikeCoef_co, - + _FloatSeries, _FuncBinOp, _FuncValND, _FuncVanderND, + _ObjectArray, + _ObjectSeries, + _SeriesLikeCoef_co, + _SeriesLikeComplex_co, + _SeriesLikeFloat_co, + _SeriesLikeInt_co, + _Tuple2, ) __all__: Final[Sequence[str]] = [ diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 7733ded90412..2cead454631c 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -7,8 +7,11 @@ import numpy.polynomial.chebyshev as cheb from numpy.polynomial.polynomial import polyval from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) def trim(x): diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py index dbbbb2011535..d10aafbda866 100644 --- a/numpy/polynomial/tests/test_classes.py +++ b/numpy/polynomial/tests/test_classes.py @@ -7,13 +7,23 @@ from numbers import Number import pytest + import numpy as np +from numpy.exceptions import RankWarning from numpy.polynomial import ( - Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) + Chebyshev, + Hermite, + HermiteE, + Laguerre, + Legendre, + Polynomial, +) from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) -from numpy.exceptions import RankWarning + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) # # fixtures diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 2f17091137b9..8bd3951f4241 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -7,8 +7,11 @@ import numpy.polynomial.hermite as herm from numpy.polynomial.polynomial import polyval from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) H0 = np.array([1]) H1 = np.array([0, 2]) diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index ce55e2098b97..29f34f66380e 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -7,8 +7,11 @@ import numpy.polynomial.hermite_e as herme from numpy.polynomial.polynomial import polyval from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) He0 = np.array([1]) He1 = np.array([0, 1]) diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 1dd1977de684..6793b780416d 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -7,8 +7,11 @@ import numpy.polynomial.laguerre as lag from numpy.polynomial.polynomial import polyval from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) L0 = np.array([1]) / 1 L1 = np.array([1, -1]) / 1 diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index ee23b4a2527f..d0ed7060cbe7 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -7,8 +7,11 @@ import numpy.polynomial.legendre as leg from numpy.polynomial.polynomial import polyval from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) L0 = np.array([1]) L1 = np.array([0, 1]) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 3cbc09157946..8bfa3c184cf7 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -1,16 +1,23 @@ """Tests for polynomial module. """ -from functools import reduce +import pickle +from copy import deepcopy from fractions import Fraction +from functools import reduce + import numpy as np import numpy.polynomial.polynomial as poly import numpy.polynomial.polyutils as pu -import pickle -from copy import deepcopy from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - assert_array_equal, assert_raises_regex, assert_warns) + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) def trim(x): @@ -547,12 +554,15 @@ def test_polyroots(self): for i in np.logspace(10, 25, num=1000, base=10): tgt = np.array([-1, 1, i]) res = poly.polyroots(poly.polyfromroots(tgt)) - assert_almost_equal(res, tgt, 15 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error - + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error. + assert_almost_equal(res, tgt, 15 - int(np.log10(i))) for i in np.logspace(10, 25, num=1000, base=10): tgt = np.array([-1, 1.01, i]) res = poly.polyroots(poly.polyfromroots(tgt)) - assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error. + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) def test_polyfit(self): def f(x): @@ -657,3 +667,25 @@ def test_result_type(self): arr = np.polydiv(1, np.float32(1)) assert_equal(arr[0].dtype, np.float64) + +class ArrayFunctionInterceptor: + def __init__(self): + self.called = False + + def __array_function__(self, func, types, args, kwargs): + self.called = True + return "intercepted" + +def test_polyval2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polyval2d(x, y, c) + assert result == "intercepted" + +def test_polygrid2d_array_function_hook(): + x = ArrayFunctionInterceptor() + y = ArrayFunctionInterceptor() + c = ArrayFunctionInterceptor() + result = np.polynomial.polynomial.polygrid2d(x, y, c) + assert result == "intercepted" diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py index e5143ed5c3e4..96e88b9de1fa 100644 --- a/numpy/polynomial/tests/test_polyutils.py +++ b/numpy/polynomial/tests/test_polyutils.py @@ -4,8 +4,11 @@ import numpy as np import numpy.polynomial.polyutils as pu from numpy.testing import ( - assert_almost_equal, assert_raises, assert_equal, assert_, - ) + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) class TestMisc: diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py index b897770c9427..d3735e3b85f6 100644 --- a/numpy/polynomial/tests/test_printing.py +++ b/numpy/polynomial/tests/test_printing.py @@ -1,12 +1,14 @@ -from math import nan, inf -import pytest -from numpy._core import array, arange, printoptions -import numpy.polynomial as poly -from numpy.testing import assert_equal, assert_ +from decimal import Decimal # For testing polynomial printing with object arrays from fractions import Fraction -from decimal import Decimal +from math import inf, nan + +import pytest + +import numpy.polynomial as poly +from numpy._core import arange, array, printoptions +from numpy.testing import assert_, assert_equal class TestStrUnicodeSuperSubscripts: diff --git a/numpy/polynomial/tests/test_symbol.py b/numpy/polynomial/tests/test_symbol.py index 3cb500e7af03..3de9e38ced08 100644 --- a/numpy/polynomial/tests/test_symbol.py +++ b/numpy/polynomial/tests/test_symbol.py @@ -3,9 +3,10 @@ """ import pytest + import numpy.polynomial as poly from numpy._core import array -from numpy.testing import assert_equal, assert_raises, assert_ +from numpy.testing import assert_, assert_equal, assert_raises class TestInit: diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py index 2e8f99fe3045..3e21d598a88e 100644 --- a/numpy/random/__init__.py +++ b/numpy/random/__init__.py @@ -177,16 +177,13 @@ ] # add these for module-freeze analysis (like PyInstaller) -from . import _pickle -from . import _common -from . import _bounded_integers - +from . import _bounded_integers, _common, _pickle from ._generator import Generator, default_rng -from .bit_generator import SeedSequence, BitGenerator from ._mt19937 import MT19937 from ._pcg64 import PCG64, PCG64DXSM from ._philox import Philox from ._sfc64 import SFC64 +from .bit_generator import BitGenerator, SeedSequence from .mtrand import * __all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', @@ -211,5 +208,6 @@ def __RandomState_ctor(): from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi index 8cfa9c0e1369..e9b9fb50ab8c 100644 --- a/numpy/random/__init__.pyi +++ b/numpy/random/__init__.pyi @@ -1,11 +1,9 @@ -from ._generator import Generator -from ._generator import default_rng +from ._generator import Generator, default_rng from ._mt19937 import MT19937 from ._pcg64 import PCG64, PCG64DXSM from ._philox import Philox from ._sfc64 import SFC64 -from .bit_generator import BitGenerator -from .bit_generator import SeedSequence +from .bit_generator import BitGenerator, SeedSequence from .mtrand import ( RandomState, beta, diff --git a/numpy/random/_examples/cffi/extending.py b/numpy/random/_examples/cffi/extending.py index 8440d400ea91..ad4c9acbdceb 100644 --- a/numpy/random/_examples/cffi/extending.py +++ b/numpy/random/_examples/cffi/extending.py @@ -2,9 +2,13 @@ Use cffi to access any of the underlying C functions from distributions.h """ import os -import numpy as np + import cffi + +import numpy as np + from .parse import parse_distributions_h + ffi = cffi.FFI() inc_dir = os.path.join(np.get_include(), 'numpy') diff --git a/numpy/random/_examples/numba/extending.py b/numpy/random/_examples/numba/extending.py index e7c6d7e88890..c1d0f4fbd3e3 100644 --- a/numpy/random/_examples/numba/extending.py +++ b/numpy/random/_examples/numba/extending.py @@ -1,8 +1,9 @@ -import numpy as np +from timeit import timeit + import numba as nb +import numpy as np from numpy.random import PCG64 -from timeit import timeit bit_gen = PCG64() next_d = bit_gen.cffi.next_double diff --git a/numpy/random/_examples/numba/extending_distributions.py b/numpy/random/_examples/numba/extending_distributions.py index 7ef0753d71d1..d0462e73ee0b 100644 --- a/numpy/random/_examples/numba/extending_distributions.py +++ b/numpy/random/_examples/numba/extending_distributions.py @@ -27,9 +27,9 @@ import os import numba as nb -import numpy as np from cffi import FFI +import numpy as np from numpy.random import PCG64 ffi = FFI() diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index a65843154063..70b2506da7af 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -1,9 +1,9 @@ from typing import TypedDict, type_check_only from numpy import uint32 -from numpy.typing import NDArray -from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import NDArray @type_check_only class _MT19937Internal(TypedDict): diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index a3a6260fabfd..5dc7bb66321b 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -1,7 +1,7 @@ from typing import TypedDict, type_check_only -from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence @type_check_only class _PCG64Internal(TypedDict): diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index 1305a797b09a..d8895bba67cf 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -1,9 +1,9 @@ from typing import TypedDict, type_check_only from numpy import uint64 -from numpy.typing import NDArray -from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import NDArray @type_check_only class _PhiloxInternal(TypedDict): diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py index 842bd441a502..05f7232e68de 100644 --- a/numpy/random/_pickle.py +++ b/numpy/random/_pickle.py @@ -1,11 +1,10 @@ -from .bit_generator import BitGenerator -from .mtrand import RandomState -from ._philox import Philox -from ._pcg64 import PCG64, PCG64DXSM -from ._sfc64 import SFC64 - from ._generator import Generator from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator +from .mtrand import RandomState BitGenerators = {'MT19937': MT19937, 'PCG64': PCG64, diff --git a/numpy/random/_pickle.pyi b/numpy/random/_pickle.pyi index d4c6e8155ae9..b8b1b7bcf63b 100644 --- a/numpy/random/_pickle.pyi +++ b/numpy/random/_pickle.pyi @@ -1,5 +1,5 @@ from collections.abc import Callable -from typing import Final, Literal, TypeVar, TypedDict, overload, type_check_only +from typing import Final, Literal, TypedDict, TypeVar, overload, type_check_only from numpy.random._generator import Generator from numpy.random._mt19937 import MT19937 diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi index 2f98ff9f1dda..a6f0d8445f25 100644 --- a/numpy/random/_sfc64.pyi +++ b/numpy/random/_sfc64.pyi @@ -1,8 +1,8 @@ from typing import TypedDict, type_check_only from numpy import uint64 -from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import NDArray, _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence @type_check_only class _SFC64Internal(TypedDict): diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 5963054e5ba5..ee4499dee1f3 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -1,13 +1,29 @@ import abc +from _typeshed import Incomplete from collections.abc import Callable, Mapping, Sequence from threading import Lock -from typing import Any, ClassVar, Literal, NamedTuple, Self, TypeAlias, TypedDict, overload, type_check_only - -from _typeshed import Incomplete +from typing import ( + Any, + ClassVar, + Literal, + NamedTuple, + Self, + TypeAlias, + TypedDict, + overload, + type_check_only, +) from typing_extensions import CapsuleType import numpy as np -from numpy._typing import NDArray, _ArrayLikeInt_co, _DTypeLike, _ShapeLike, _UInt32Codes, _UInt64Codes +from numpy._typing import ( + NDArray, + _ArrayLikeInt_co, + _DTypeLike, + _ShapeLike, + _UInt32Codes, + _UInt64Codes, +) __all__ = ["BitGenerator", "SeedSequence"] diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index 0062fa3ce657..54bb1462fb5f 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -1,6 +1,6 @@ import builtins from collections.abc import Callable -from typing import Any, overload, Literal +from typing import Any, Literal, overload import numpy as np from numpy import ( @@ -12,14 +12,13 @@ from numpy import ( int64, int_, long, + uint, uint8, uint16, uint32, uint64, - uint, ulong, ) -from numpy.random.bit_generator import BitGenerator from numpy._typing import ( ArrayLike, NDArray, @@ -41,6 +40,7 @@ from numpy._typing import ( _UIntCodes, _ULongCodes, ) +from numpy.random.bit_generator import BitGenerator class RandomState: _bit_generator: BitGenerator diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index c8a83f3bc40b..6f069e48879f 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -1,17 +1,28 @@ import os -from os.path import join import sys +from os.path import join -import numpy as np -from numpy.testing import (assert_equal, assert_allclose, assert_array_equal, - assert_raises) import pytest +import numpy as np from numpy.random import ( - Generator, MT19937, PCG64, PCG64DXSM, Philox, RandomState, SeedSequence, - SFC64, default_rng + MT19937, + PCG64, + PCG64DXSM, + SFC64, + Generator, + Philox, + RandomState, + SeedSequence, + default_rng, ) from numpy.random._common import interface +from numpy.testing import ( + assert_allclose, + assert_array_equal, + assert_equal, + assert_raises, +) try: import cffi # noqa: F401 @@ -130,9 +141,11 @@ def gauss_from_uint(x, n, bits): def test_seedsequence(): - from numpy.random.bit_generator import (ISeedSequence, - ISpawnableSeedSequence, - SeedlessSeedSequence) + from numpy.random.bit_generator import ( + ISeedSequence, + ISpawnableSeedSequence, + SeedlessSeedSequence, + ) s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6) s1.spawn(10) diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index dad9b10449d6..7a079d6362e8 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -1,15 +1,15 @@ -from importlib.util import spec_from_file_location, module_from_spec import os -import pytest import shutil import subprocess import sys import sysconfig import warnings +from importlib.util import module_from_spec, spec_from_file_location -import numpy as np -from numpy.testing import IS_WASM, IS_EDITABLE +import pytest +import numpy as np +from numpy.testing import IS_EDITABLE, IS_WASM try: import cffi @@ -54,7 +54,8 @@ ) @pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") @pytest.mark.skipif(cython is None, reason="requires cython") -@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='Meson unable to find MSVC linker on win-arm64') +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='Meson unable to find MSVC linker on win-arm64') @pytest.mark.slow def test_cython(tmp_path): import glob diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index f3a71a83a5e7..d09cbba4ec39 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1,18 +1,25 @@ +import hashlib import os.path import sys -import hashlib import pytest import numpy as np from numpy.exceptions import AxisError from numpy.linalg import LinAlgError +from numpy.random import MT19937, Generator, RandomState, SeedSequence from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_allclose, - assert_warns, assert_no_warnings, assert_array_equal, - assert_array_almost_equal, suppress_warnings, IS_WASM) - -from numpy.random import Generator, MT19937, SeedSequence, RandomState + IS_WASM, + assert_, + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, + assert_warns, + suppress_warnings, +) random = Generator(MT19937()) @@ -2780,8 +2787,8 @@ def test_pickle_preserves_seed_sequence(): @pytest.mark.parametrize("version", [121, 126]) def test_legacy_pickle(version): # Pickling format was changes in 1.22.x and in 2.0.x - import pickle import gzip + import pickle base_path = os.path.split(os.path.abspath(__file__))[0] pkl_file = os.path.join( diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index 01193b3e53e1..abfacb87dbc5 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -1,7 +1,8 @@ -from numpy.testing import (assert_, assert_array_equal) -import numpy as np import pytest -from numpy.random import Generator, MT19937 + +import numpy as np +from numpy.random import MT19937, Generator +from numpy.testing import assert_, assert_array_equal class TestRegression: diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 9a352433ad24..d5981906f6ef 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -1,15 +1,21 @@ +import sys import warnings import pytest import numpy as np -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_warns, - assert_no_warnings, assert_array_equal, assert_array_almost_equal, - suppress_warnings, IS_WASM - ) from numpy import random -import sys +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, + assert_warns, + suppress_warnings, +) class TestSeed: diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index 58952e129f01..cf4488543c12 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -3,16 +3,22 @@ import sys import warnings -import numpy as np import pytest -from numpy.testing import ( - assert_, assert_raises, assert_equal, assert_warns, - assert_no_warnings, assert_array_equal, assert_array_almost_equal, - suppress_warnings, IS_WASM - ) -from numpy.random import MT19937, PCG64 +import numpy as np from numpy import random +from numpy.random import MT19937, PCG64 +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, + assert_warns, + suppress_warnings, +) INT_FUNCS = {'binomial': (100.0, 0.6), 'geometric': (.5,), diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py index 142cc3cb3a3e..6ccc6180657c 100644 --- a/numpy/random/tests/test_randomstate_regression.py +++ b/numpy/random/tests/test_randomstate_regression.py @@ -2,12 +2,13 @@ import pytest -from numpy.testing import ( - assert_, assert_array_equal, assert_raises, - ) import numpy as np - from numpy import random +from numpy.testing import ( + assert_, + assert_array_equal, + assert_raises, +) class TestRegression: diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py index ccd6ba11cbfc..39b7d8c719ac 100644 --- a/numpy/random/tests/test_regression.py +++ b/numpy/random/tests/test_regression.py @@ -1,9 +1,12 @@ import sys -from numpy.testing import ( - assert_, assert_array_equal, assert_raises, - ) -from numpy import random + import numpy as np +from numpy import random +from numpy.testing import ( + assert_, + assert_array_equal, + assert_raises, +) class TestRegression: diff --git a/numpy/random/tests/test_seed_sequence.py b/numpy/random/tests/test_seed_sequence.py index f08cf80faafa..87ae4ff72139 100644 --- a/numpy/random/tests/test_seed_sequence.py +++ b/numpy/random/tests/test_seed_sequence.py @@ -1,7 +1,6 @@ import numpy as np -from numpy.testing import assert_array_equal, assert_array_compare - from numpy.random import SeedSequence +from numpy.testing import assert_array_compare, assert_array_equal def test_reference_data(): diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py index fcbfd97d61f6..6f07443f79a9 100644 --- a/numpy/random/tests/test_smoke.py +++ b/numpy/random/tests/test_smoke.py @@ -1,10 +1,12 @@ import pickle from functools import partial -import numpy as np import pytest -from numpy.testing import assert_equal, assert_, assert_array_equal -from numpy.random import (Generator, MT19937, PCG64, PCG64DXSM, Philox, SFC64) + +import numpy as np +from numpy.random import MT19937, PCG64, PCG64DXSM, SFC64, Generator, Philox +from numpy.testing import assert_, assert_array_equal, assert_equal + @pytest.fixture(scope='module', params=(np.bool, np.int8, np.int16, np.int32, np.int64, @@ -66,13 +68,12 @@ def comp_state(state1, state2): identical &= comp_state(state1[key], state2[key]) elif type(state1) != type(state2): identical &= type(state1) == type(state2) + elif (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( + state2, (list, tuple, np.ndarray))): + for s1, s2 in zip(state1, state2): + identical &= comp_state(s1, s2) else: - if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( - state2, (list, tuple, np.ndarray))): - for s1, s2 in zip(state1, state2): - identical &= comp_state(s1, s2) - else: - identical &= state1 == state2 + identical &= state1 == state2 return identical diff --git a/numpy/rec/__init__.py b/numpy/rec/__init__.py index 1a439ada8c35..420240c8d4d1 100644 --- a/numpy/rec/__init__.py +++ b/numpy/rec/__init__.py @@ -1,2 +1,2 @@ -from numpy._core.records import __all__, __doc__ from numpy._core.records import * +from numpy._core.records import __all__, __doc__ diff --git a/numpy/rec/__init__.pyi b/numpy/rec/__init__.pyi index 605770f7c9c0..6a78c66ff2c2 100644 --- a/numpy/rec/__init__.pyi +++ b/numpy/rec/__init__.pyi @@ -1,14 +1,15 @@ from numpy._core.records import ( - record, - recarray, + array, find_duplicate, format_parser, fromarrays, + fromfile, fromrecords, fromstring, - fromfile, - array, + recarray, + record, ) + __all__ = [ "record", "recarray", diff --git a/numpy/strings/__init__.py b/numpy/strings/__init__.py index f370ba71f296..561dadcf37d0 100644 --- a/numpy/strings/__init__.py +++ b/numpy/strings/__init__.py @@ -1,2 +1,2 @@ -from numpy._core.strings import __all__, __doc__ from numpy._core.strings import * +from numpy._core.strings import __all__, __doc__ diff --git a/numpy/strings/__init__.pyi b/numpy/strings/__init__.pyi index fb03e9c8b5e6..b2fb363531d4 100644 --- a/numpy/strings/__init__.pyi +++ b/numpy/strings/__init__.pyi @@ -1,49 +1,50 @@ from numpy._core.strings import ( + add, + capitalize, + center, + count, + decode, + encode, + endswith, equal, - not_equal, - greater_equal, - less_equal, + expandtabs, + find, greater, - less, - add, - multiply, - mod, - isalpha, + greater_equal, + index, isalnum, - isdigit, - isspace, - isnumeric, + isalpha, isdecimal, + isdigit, islower, - isupper, + isnumeric, + isspace, istitle, - str_len, - find, + isupper, + less, + less_equal, + ljust, + lower, + lstrip, + mod, + multiply, + not_equal, + partition, + replace, rfind, - index, rindex, - count, - startswith, - endswith, - decode, - encode, - expandtabs, - center, - ljust, rjust, - lstrip, + rpartition, rstrip, + slice, + startswith, + str_len, strip, - zfill, - upper, - lower, swapcase, - capitalize, title, - replace, - partition, - rpartition, translate, + upper, + zfill, ) __all__ = [ @@ -92,4 +93,5 @@ __all__ = [ "decode", "encode", "translate", + "slice", ] diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py index 8a34221e4dde..fe0c4f2367f2 100644 --- a/numpy/testing/__init__.py +++ b/numpy/testing/__init__.py @@ -7,16 +7,16 @@ """ from unittest import TestCase -from . import _private -from ._private.utils import * -from ._private.utils import (_assert_valid_refcount, _gen_alignment_data) +from . import _private, overrides from ._private import extbuild -from . import overrides +from ._private.utils import * +from ._private.utils import _assert_valid_refcount, _gen_alignment_data __all__ = ( _private.utils.__all__ + ['TestCase', 'overrides'] ) from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/testing/_private/extbuild.py b/numpy/testing/_private/extbuild.py index f81184e9af1e..2a724b73cfc3 100644 --- a/numpy/testing/_private/extbuild.py +++ b/numpy/testing/_private/extbuild.py @@ -233,7 +233,8 @@ def build(cfile, outputfilename, compile_extra, link_extra, cwd=build_dir, ) else: - subprocess.check_call(["meson", "setup", "--vsenv", "..", f'--native-file={os.fspath(native_file_name)}'], + subprocess.check_call(["meson", "setup", "--vsenv", + "..", f'--native-file={os.fspath(native_file_name)}'], cwd=build_dir ) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 0cfd50cb2124..d7ceaeab72cc 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2,34 +2,31 @@ Utility function to facilitate testing. """ +import concurrent.futures +import contextlib +import gc +import importlib.metadata +import operator import os -import sys import pathlib import platform +import pprint import re -import gc -import operator +import shutil +import sys +import sysconfig +import threading import warnings from functools import partial, wraps -import shutil -import contextlib +from io import StringIO from tempfile import mkdtemp, mkstemp from unittest.case import SkipTest from warnings import WarningMessage -import pprint -import sysconfig -import concurrent.futures -import threading -import importlib.metadata import numpy as np -from numpy._core import ( - intp, float32, empty, arange, array_repr, ndarray, isnat, array) -from numpy import isfinite, isnan, isinf import numpy.linalg._umath_linalg - -from io import StringIO - +from numpy import isfinite, isinf, isnan +from numpy._core import arange, array, array_repr, empty, float32, intp, isnat, ndarray __all__ = [ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', @@ -69,7 +66,8 @@ class KnownFailureException(Exception): IS_EDITABLE = np_dist.origin.dir_info.editable else: # Backport importlib.metadata.Distribution.origin - import json, types # noqa: E401 + import json # noqa: E401 + import types origin = json.loads( np_dist.read_text('direct_url.json') or '{}', object_hook=lambda data: types.SimpleNamespace(**data), @@ -163,11 +161,12 @@ def memusage(processName="python", instance=0): win32pdh.PDH_FMT_LONG, None) elif sys.platform[:5] == 'linux': - def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'): + def memusage(_proc_pid_stat=None): """ Return virtual memory size in bytes of the running python. """ + _proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat' try: with open(_proc_pid_stat) as f: l = f.readline().split(' ') @@ -184,7 +183,7 @@ def memusage(): if sys.platform[:5] == 'linux': - def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]): + def jiffies(_proc_pid_stat=None, _load_time=None): """ Return number of jiffies elapsed. @@ -192,6 +191,8 @@ def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]): process has been scheduled in user mode. See man 5 proc. """ + _proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat' + _load_time = _load_time or [] import time if not _load_time: _load_time.append(time.time()) @@ -364,8 +365,8 @@ def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', verbose) return - from numpy._core import ndarray, isscalar, signbit - from numpy import iscomplexobj, real, imag + from numpy import imag, iscomplexobj, real + from numpy._core import isscalar, ndarray, signbit if isinstance(actual, ndarray) or isinstance(desired, ndarray): return assert_array_equal(actual, desired, err_msg, verbose, strict=strict) @@ -569,8 +570,8 @@ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): """ __tracebackhide__ = True # Hide traceback for py.test + from numpy import imag, iscomplexobj, real from numpy._core import ndarray - from numpy import iscomplexobj, real, imag # Handle complex numbers: separate into real/imag to handle # nan/inf/negative zero correctly @@ -615,9 +616,8 @@ def _build_err_msg(): if isnan(desired) or isnan(actual): if not (isnan(desired) and isnan(actual)): raise AssertionError(_build_err_msg()) - else: - if not desired == actual: - raise AssertionError(_build_err_msg()) + elif not desired == actual: + raise AssertionError(_build_err_msg()) return except (NotImplementedError, TypeError): pass @@ -715,9 +715,8 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', if isnan(desired) or isnan(actual): if not (isnan(desired) and isnan(actual)): raise AssertionError(msg) - else: - if not desired == actual: - raise AssertionError(msg) + elif not desired == actual: + raise AssertionError(msg) return except (TypeError, NotImplementedError): pass @@ -729,8 +728,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', precision=6, equal_nan=True, equal_inf=True, *, strict=False, names=('ACTUAL', 'DESIRED')): __tracebackhide__ = True # Hide traceback for py.test - from numpy._core import (array2string, isnan, inf, errstate, - all, max, object_) + from numpy._core import all, array2string, errstate, inf, isnan, max, object_ x = np.asanyarray(x) y = np.asanyarray(y) @@ -1135,8 +1133,8 @@ def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', """ __tracebackhide__ = True # Hide traceback for py.test from numpy._core import number, result_type - from numpy._core.numerictypes import issubdtype from numpy._core.fromnumeric import any as npany + from numpy._core.numerictypes import issubdtype def compare(x, y): try: @@ -1382,8 +1380,9 @@ def rundocs(filename=None, raise_on_error=True): >>> np.lib.test(doctests=True) # doctest: +SKIP """ - from numpy.distutils.misc_util import exec_mod_from_location import doctest + + from numpy.distutils.misc_util import exec_mod_from_location if filename is None: f = sys._getframe(1) filename = f.f_globals['__file__'] @@ -1528,7 +1527,6 @@ def decorate_methods(cls, decorator, testmatch=None): continue if testmatch.search(funcname) and not funcname.startswith('_'): setattr(cls, funcname, decorator(function)) - return def measure(code_str, times=1, label=None): @@ -1586,6 +1584,7 @@ def _assert_valid_refcount(op): return True import gc + import numpy as np b = np.arange(100 * 100).reshape(100, 100) @@ -1600,7 +1599,6 @@ def _assert_valid_refcount(op): assert_(sys.getrefcount(i) >= rc) finally: gc.enable() - del d # for pyflakes def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, @@ -1897,9 +1895,8 @@ def _integer_repr(x, vdt, comp): rx = x.view(vdt) if not (rx.size == 1): rx[rx < 0] = comp - rx[rx < 0] - else: - if rx < 0: - rx = comp - rx + elif rx < 0: + rx = comp - rx return rx @@ -2160,7 +2157,7 @@ class clear_and_catch_warnings(warnings.catch_warnings): This makes it possible to trigger any warning afresh inside the context manager without disturbing the state of warnings outside. - For compatibility with Python 3.0, please consider all arguments to be + For compatibility with Python, please consider all arguments to be keyword-only. Parameters diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 4e3b60a0ef70..59a7539b69f1 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -3,6 +3,7 @@ import sys import types import unittest import warnings +from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Sequence from contextlib import _GeneratorContextManager from pathlib import Path @@ -23,10 +24,8 @@ from typing import ( type_check_only, ) from typing import Literal as L -from unittest.case import SkipTest - -from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath from typing_extensions import TypeVar +from unittest.case import SkipTest import numpy as np from numpy._typing import ( diff --git a/numpy/testing/overrides.py b/numpy/testing/overrides.py index 9e61534c3236..61771c4c0b58 100644 --- a/numpy/testing/overrides.py +++ b/numpy/testing/overrides.py @@ -3,9 +3,10 @@ """ -from numpy._core.overrides import ARRAY_FUNCTIONS as _array_functions -from numpy import ufunc as _ufunc import numpy._core.umath as _umath +from numpy import ufunc as _ufunc +from numpy._core.overrides import ARRAY_FUNCTIONS as _array_functions + def get_overridable_numpy_ufuncs(): """List all numpy ufuncs overridable via `__array_ufunc__` diff --git a/numpy/testing/overrides.pyi b/numpy/testing/overrides.pyi index 3fefc3f350da..916154c155b1 100644 --- a/numpy/testing/overrides.pyi +++ b/numpy/testing/overrides.pyi @@ -1,6 +1,5 @@ from collections.abc import Callable, Hashable from typing import Any - from typing_extensions import TypeIs import numpy as np diff --git a/numpy/testing/print_coercion_tables.py b/numpy/testing/print_coercion_tables.py index a3f90be96c84..89f0de3932ed 100755 --- a/numpy/testing/print_coercion_tables.py +++ b/numpy/testing/print_coercion_tables.py @@ -2,9 +2,11 @@ """Prints type-coercion tables for the built-in NumPy types """ +from collections import namedtuple + import numpy as np from numpy._core.numerictypes import obj2sctype -from collections import namedtuple + # Generic object that can be added, but doesn't do anything else class GenericObject: diff --git a/numpy/testing/print_coercion_tables.pyi b/numpy/testing/print_coercion_tables.pyi index c859305f2350..f463a18c05e4 100644 --- a/numpy/testing/print_coercion_tables.pyi +++ b/numpy/testing/print_coercion_tables.pyi @@ -1,6 +1,5 @@ from collections.abc import Iterable from typing import ClassVar, Generic, Self - from typing_extensions import TypeVar import numpy as np diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 57c97ad8a2a6..fcf20091ca8e 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1,20 +1,36 @@ -import warnings -import sys -import os import itertools -import pytest -import weakref +import os import re +import sys +import warnings +import weakref + +import pytest import numpy as np import numpy._core._multiarray_umath as ncu from numpy.testing import ( - assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_array_less, build_err_msg, - assert_raises, assert_warns, assert_no_warnings, assert_allclose, - assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp, - clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_, - tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT + HAS_REFCOUNT, + assert_, + assert_allclose, + assert_almost_equal, + assert_approx_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_less, + assert_array_max_ulp, + assert_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_string_equal, + assert_warns, + build_err_msg, + clear_and_catch_warnings, + suppress_warnings, + tempdir, + temppath, ) @@ -1526,7 +1542,7 @@ def assert_warn_len_equal(mod, n_in_context): num_warns = len(mod_warns) if 'version' in mod_warns: - # Python 3 adds a 'version' entry to the registry, + # Python adds a 'version' entry to the registry, # do not count it. num_warns -= 1 diff --git a/numpy/tests/test__all__.py b/numpy/tests/test__all__.py index e44bda3d58ab..2dc81669d9fb 100644 --- a/numpy/tests/test__all__.py +++ b/numpy/tests/test__all__.py @@ -1,5 +1,6 @@ import collections + import numpy as np diff --git a/numpy/tests/test_configtool.py b/numpy/tests/test_configtool.py index a26f911744b6..e0b9bb1b7aff 100644 --- a/numpy/tests/test_configtool.py +++ b/numpy/tests/test_configtool.py @@ -3,15 +3,13 @@ import os import pathlib import subprocess -import sysconfig import pytest + import numpy as np import numpy._core.include import numpy._core.lib.pkgconfig - -from numpy.testing import IS_WASM, IS_INSTALLED, IS_EDITABLE, NUMPY_ROOT - +from numpy.testing import IS_EDITABLE, IS_INSTALLED, IS_WASM, NUMPY_ROOT INCLUDE_DIR = NUMPY_ROOT / '_core' / 'include' PKG_CONFIG_DIR = NUMPY_ROOT / '_core' / 'lib' / 'pkgconfig' diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index 5a65f7d99eee..68d31416040b 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -6,8 +6,8 @@ import pytest import numpy as np -from numpy.ctypeslib import ndpointer, load_library, as_array -from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal +from numpy.ctypeslib import as_array, load_library, ndpointer +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises try: import ctypes @@ -205,7 +205,7 @@ def test_array(self): assert_array_equal(a, np.array([[1, 2], [3, 4], [5, 6]])) def test_pointer(self): - from ctypes import c_int, cast, POINTER + from ctypes import POINTER, c_int, cast p = cast((c_int * 10)(*range(10)), POINTER(c_int)) @@ -225,7 +225,7 @@ def test_pointer(self): reason="Broken in 3.12.0rc1, see gh-24399", ) def test_struct_array_pointer(self): - from ctypes import c_int16, Structure, pointer + from ctypes import Structure, c_int16, pointer class Struct(Structure): _fields_ = [('a', c_int16)] diff --git a/numpy/tests/test_lazyloading.py b/numpy/tests/test_lazyloading.py index 1298fadc5618..7b0324802611 100644 --- a/numpy/tests/test_lazyloading.py +++ b/numpy/tests/test_lazyloading.py @@ -1,22 +1,23 @@ +import subprocess import sys -from importlib.util import LazyLoader, find_spec, module_from_spec +import textwrap + import pytest +from numpy.testing import IS_WASM + -# Warning raised by _reload_guard() in numpy/__init__.py -@pytest.mark.filterwarnings("ignore:The NumPy module was reloaded") +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") def test_lazy_load(): # gh-22045. lazyload doesn't import submodule names into the namespace - # muck with sys.modules to test the importing system - old_numpy = sys.modules.pop("numpy") - numpy_modules = {} - for mod_name, mod in list(sys.modules.items()): - if mod_name[:6] == "numpy.": - numpy_modules[mod_name] = mod - sys.modules.pop(mod_name) + # Test within a new process, to ensure that we do not mess with the + # global state during the test run (could lead to cryptic test failures). + # This is generally unsafe, especially, since we also reload the C-modules. + code = textwrap.dedent(r""" + import sys + from importlib.util import LazyLoader, find_spec, module_from_spec - try: # create lazy load of numpy as np spec = find_spec("numpy") module = module_from_spec(spec) @@ -30,8 +31,12 @@ def test_lazy_load(): # test triggering the import of the package np.ndarray - - finally: - if old_numpy: - sys.modules["numpy"] = old_numpy - sys.modules.update(numpy_modules) + """) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout diff --git a/numpy/tests/test_matlib.py b/numpy/tests/test_matlib.py index 7f0619edd893..2aac1f2582a1 100644 --- a/numpy/tests/test_matlib.py +++ b/numpy/tests/test_matlib.py @@ -1,6 +1,7 @@ import numpy as np import numpy.matlib -from numpy.testing import assert_array_equal, assert_ +from numpy.testing import assert_, assert_array_equal + def test_empty(): x = numpy.matlib.empty((2,)) diff --git a/numpy/tests/test_numpy_config.py b/numpy/tests/test_numpy_config.py index 0e225b2bd7b4..f01a279574a5 100644 --- a/numpy/tests/test_numpy_config.py +++ b/numpy/tests/test_numpy_config.py @@ -1,10 +1,12 @@ """ Check the numpy config is valid. """ -import numpy as np -import pytest from unittest.mock import patch +import pytest + +import numpy as np + pytestmark = pytest.mark.skipif( not hasattr(np.__config__, "_built_with_meson"), reason="Requires Meson builds", diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 729af9751fa1..6a36358c3a06 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -1,19 +1,19 @@ import functools +import importlib +import inspect +import pkgutil +import subprocess import sys import sysconfig -import subprocess -import pkgutil import types -import importlib -import inspect import warnings -import numpy as np +import pytest + import numpy +import numpy as np from numpy.testing import IS_WASM -import pytest - try: import ctypes except ImportError: @@ -162,8 +162,6 @@ def test_NPY_NO_EXPORT(): PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ - "compat", - "compat.py3k", "conftest", "core", "core.multiarray", @@ -284,8 +282,6 @@ def is_unexpected(name): SKIP_LIST = ["numpy.distutils.msvc9compiler"] -# suppressing warnings from deprecated modules -@pytest.mark.filterwarnings("ignore:.*np.compat.*:DeprecationWarning") def test_all_modules_are_expected(): """ Test that we don't add anything that looks like a new public module by @@ -563,10 +559,11 @@ def test_functions_single_location(): Test performs BFS search traversing NumPy's public API. It flags any function-like object that is accessible from more that one place. """ - from typing import Any from collections.abc import Callable + from typing import Any + from numpy._core._multiarray_umath import ( - _ArrayFunctionDispatcher as dispatched_function + _ArrayFunctionDispatcher as dispatched_function, ) visited_modules: set[types.ModuleType] = {np} @@ -783,8 +780,7 @@ def test___qualname___and___module___attribute(): inspect.ismodule(member) and # it's a module "numpy" in member.__name__ and # inside NumPy not member_name.startswith("_") and # not private - member_name != "tests" and - member_name != "typing" and # 2024-12: type names don't match + member_name not in {"tests", "typing"} and # type names don't match "numpy._core" not in member.__name__ and # outside _core member not in visited_modules # not visited yet ): diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py index 22bff7212e59..3e6ded326941 100644 --- a/numpy/tests/test_reloading.py +++ b/numpy/tests/test_reloading.py @@ -1,18 +1,18 @@ -import sys +import pickle import subprocess +import sys import textwrap from importlib import reload -import pickle import pytest import numpy.exceptions as ex from numpy.testing import ( - assert_raises, - assert_warns, + IS_WASM, assert_, assert_equal, - IS_WASM, + assert_raises, + assert_warns, ) @@ -48,27 +48,34 @@ def test_novalue(): @pytest.mark.skipif(IS_WASM, reason="can't start subprocess") def test_full_reimport(): - """At the time of writing this, it is *not* truly supported, but - apparently enough users rely on it, for it to be an annoying change - when it started failing previously. - """ + # Reimporting numpy like this is not safe due to use of global C state, + # and has unexpected side effects. Test that an ImportError is raised. + # When all extension modules are isolated, this should test that clearing + # sys.modules and reimporting numpy works without error. + # Test within a new process, to ensure that we do not mess with the # global state during the test run (could lead to cryptic test failures). # This is generally unsafe, especially, since we also reload the C-modules. code = textwrap.dedent(r""" import sys - from pytest import warns import numpy as np - for k in list(sys.modules.keys()): - if "numpy" in k: - del sys.modules[k] + for k in [k for k in sys.modules if k.startswith('numpy')]: + del sys.modules[k] - with warns(UserWarning): + try: import numpy as np + except ImportError as err: + if str(err) != "cannot load module more than once per process": + raise SystemExit(f"Unexpected ImportError: {err}") + else: + raise SystemExit("DID NOT RAISE ImportError") """) - p = subprocess.run([sys.executable, '-c', code], capture_output=True) - if p.returncode: - raise AssertionError( - f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}" - ) + p = subprocess.run( + (sys.executable, '-c', code), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding='utf-8', + check=False, + ) + assert p.returncode == 0, p.stdout diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index fa15eb642a81..d8ce95887bce 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -2,14 +2,16 @@ Test that we can run executable scripts that have been installed with numpy. """ -import sys import os -import pytest -from os.path import join as pathjoin, isfile, dirname import subprocess +import sys +from os.path import dirname, isfile +from os.path import join as pathjoin + +import pytest import numpy as np -from numpy.testing import assert_equal, IS_WASM +from numpy.testing import IS_WASM, assert_equal is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py index 4cb9669ff589..560ee6143265 100644 --- a/numpy/tests/test_warnings.py +++ b/numpy/tests/test_warnings.py @@ -2,13 +2,15 @@ Tests which scan for certain occurrences in the code, they may not find all of these occurrences but should catch almost all. """ -import pytest - -from pathlib import Path import ast import tokenize +from pathlib import Path + +import pytest + import numpy + class ParseCall(ast.NodeVisitor): def __init__(self): self.ls = [] diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 2c75c348667e..163655bd7662 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -169,7 +169,7 @@ def __dir__() -> list[str]: return __DIR -def __getattr__(name: str): +def __getattr__(name: str) -> object: if name == "NBitBase": import warnings @@ -196,5 +196,6 @@ def __getattr__(name: str): del _docstrings from numpy._pytesttester import PytestTester + test = PytestTester(__name__) del PytestTester diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 5c01f261bb79..dc1e2564fc32 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -99,9 +99,9 @@ def _get_c_intp_name() -> str: from mypy.typeanal import TypeAnalyser import mypy.types - from mypy.plugin import Plugin, AnalyzeTypeContext - from mypy.nodes import MypyFile, ImportFrom, Statement from mypy.build import PRI_MED + from mypy.nodes import ImportFrom, MypyFile, Statement + from mypy.plugin import AnalyzeTypeContext, Plugin except ModuleNotFoundError as e: diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index 7cc49b93ba2f..e696083b8614 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -72,6 +72,11 @@ AR_i // AR_LIKE_m # type: ignore[operator] AR_f // AR_LIKE_m # type: ignore[operator] AR_c // AR_LIKE_m # type: ignore[operator] +# regression tests for https://github.com/numpy/numpy/issues/28957 +AR_c // 2 # type: ignore[operator] +AR_c // AR_i # type: ignore[operator] +AR_c // AR_c # type: ignore[operator] + # Array multiplication AR_b *= AR_LIKE_u # type: ignore[arg-type] @@ -80,7 +85,6 @@ AR_b *= AR_LIKE_f # type: ignore[arg-type] AR_b *= AR_LIKE_c # type: ignore[arg-type] AR_b *= AR_LIKE_m # type: ignore[arg-type] -AR_u *= AR_LIKE_i # type: ignore[arg-type] AR_u *= AR_LIKE_f # type: ignore[arg-type] AR_u *= AR_LIKE_c # type: ignore[arg-type] AR_u *= AR_LIKE_m # type: ignore[arg-type] @@ -100,7 +104,6 @@ AR_b **= AR_LIKE_i # type: ignore[misc] AR_b **= AR_LIKE_f # type: ignore[misc] AR_b **= AR_LIKE_c # type: ignore[misc] -AR_u **= AR_LIKE_i # type: ignore[arg-type] AR_u **= AR_LIKE_f # type: ignore[arg-type] AR_u **= AR_LIKE_c # type: ignore[arg-type] diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi index 54b147e0e6c0..fb52f7349dd1 100644 --- a/numpy/typing/tests/data/fail/chararray.pyi +++ b/numpy/typing/tests/data/fail/chararray.pyi @@ -1,7 +1,8 @@ +from typing import Any import numpy as np -AR_U: np.char.chararray[tuple[int, ...], np.dtype[np.str_]] -AR_S: np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]] +AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +AR_S: np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] AR_S.encode() # type: ignore[misc] AR_U.decode() # type: ignore[misc] diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index afab58f9a71f..51ef26810e21 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -7,6 +7,7 @@ A = np.array(True, ndmin=2, dtype=bool) A.setflags(write=False) AR_U: npt.NDArray[np.str_] AR_M: npt.NDArray[np.datetime64] +AR_f4: npt.NDArray[np.float32] a = np.bool(True) @@ -50,9 +51,11 @@ np.argsort(A, order=range(5)) # type: ignore[arg-type] np.argmax(A, axis="bob") # type: ignore[call-overload] np.argmax(A, kind="bob") # type: ignore[call-overload] +np.argmax(A, out=AR_f4) # type: ignore[type-var] np.argmin(A, axis="bob") # type: ignore[call-overload] np.argmin(A, kind="bob") # type: ignore[call-overload] +np.argmin(A, out=AR_f4) # type: ignore[type-var] np.searchsorted(A[0], 0, side="bob") # type: ignore[call-overload] np.searchsorted(A[0], 0, sorter=1.0) # type: ignore[call-overload] diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 2ea53a2956c5..5dc6706ebf81 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -1,133 +1,143 @@ -from typing import Any +from typing import TypeAlias, TypeVar import numpy as np -import numpy.ma import numpy.typing as npt +from numpy._typing import _AnyShape -m: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, np.dtype[_ScalarT]] + +MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] +MAR_b: MaskedArray[np.bool] +MAR_c: MaskedArray[np.complex128] +MAR_td64: MaskedArray[np.timedelta64] AR_b: npt.NDArray[np.bool] -m.shape = (3, 1) # type: ignore[assignment] -m.dtype = np.bool # type: ignore[assignment] - -np.ma.min(m, axis=1.0) # type: ignore[call-overload] -np.ma.min(m, keepdims=1.0) # type: ignore[call-overload] -np.ma.min(m, out=1.0) # type: ignore[call-overload] -np.ma.min(m, fill_value=lambda x: 27) # type: ignore[call-overload] - -m.min(axis=1.0) # type: ignore[call-overload] -m.min(keepdims=1.0) # type: ignore[call-overload] -m.min(out=1.0) # type: ignore[call-overload] -m.min(fill_value=lambda x: 27) # type: ignore[call-overload] - -np.ma.max(m, axis=1.0) # type: ignore[call-overload] -np.ma.max(m, keepdims=1.0) # type: ignore[call-overload] -np.ma.max(m, out=1.0) # type: ignore[call-overload] -np.ma.max(m, fill_value=lambda x: 27) # type: ignore[call-overload] - -m.max(axis=1.0) # type: ignore[call-overload] -m.max(keepdims=1.0) # type: ignore[call-overload] -m.max(out=1.0) # type: ignore[call-overload] -m.max(fill_value=lambda x: 27) # type: ignore[call-overload] - -np.ma.ptp(m, axis=1.0) # type: ignore[call-overload] -np.ma.ptp(m, keepdims=1.0) # type: ignore[call-overload] -np.ma.ptp(m, out=1.0) # type: ignore[call-overload] -np.ma.ptp(m, fill_value=lambda x: 27) # type: ignore[call-overload] - -m.ptp(axis=1.0) # type: ignore[call-overload] -m.ptp(keepdims=1.0) # type: ignore[call-overload] -m.ptp(out=1.0) # type: ignore[call-overload] -m.ptp(fill_value=lambda x: 27) # type: ignore[call-overload] - -m.argmin(axis=1.0) # type: ignore[call-overload] -m.argmin(keepdims=1.0) # type: ignore[call-overload] -m.argmin(out=1.0) # type: ignore[call-overload] -m.argmin(fill_value=lambda x: 27) # type: ignore[call-overload] - -np.ma.argmin(m, axis=1.0) # type: ignore[call-overload] -np.ma.argmin(m, axis=(1,)) # type: ignore[call-overload] -np.ma.argmin(m, keepdims=1.0) # type: ignore[call-overload] -np.ma.argmin(m, out=1.0) # type: ignore[call-overload] -np.ma.argmin(m, fill_value=lambda x: 27) # type: ignore[call-overload] - -m.argmax(axis=1.0) # type: ignore[call-overload] -m.argmax(keepdims=1.0) # type: ignore[call-overload] -m.argmax(out=1.0) # type: ignore[call-overload] -m.argmax(fill_value=lambda x: 27) # type: ignore[call-overload] - -np.ma.argmax(m, axis=1.0) # type: ignore[call-overload] -np.ma.argmax(m, axis=(0,)) # type: ignore[call-overload] -np.ma.argmax(m, keepdims=1.0) # type: ignore[call-overload] -np.ma.argmax(m, out=1.0) # type: ignore[call-overload] -np.ma.argmax(m, fill_value=lambda x: 27) # type: ignore[call-overload] - -m.all(axis=1.0) # type: ignore[call-overload] -m.all(keepdims=1.0) # type: ignore[call-overload] -m.all(out=1.0) # type: ignore[call-overload] - -m.any(axis=1.0) # type: ignore[call-overload] -m.any(keepdims=1.0) # type: ignore[call-overload] -m.any(out=1.0) # type: ignore[call-overload] - -m.sort(axis=(0,1)) # type: ignore[arg-type] -m.sort(axis=None) # type: ignore[arg-type] -m.sort(kind='cabbage') # type: ignore[arg-type] -m.sort(order=lambda: 'cabbage') # type: ignore[arg-type] -m.sort(endwith='cabbage') # type: ignore[arg-type] -m.sort(fill_value=lambda: 'cabbage') # type: ignore[arg-type] -m.sort(stable='cabbage') # type: ignore[arg-type] -m.sort(stable=True) # type: ignore[arg-type] - -m.take(axis=1.0) # type: ignore[call-overload] -m.take(out=1) # type: ignore[call-overload] -m.take(mode="bob") # type: ignore[call-overload] +MAR_1d_f8.shape = (3, 1) # type: ignore[assignment] +MAR_1d_f8.dtype = np.bool # type: ignore[assignment] + +np.ma.min(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.min(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.max(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.max(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.ptp(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.ptp(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.argmin(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.argmin(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, axis=(1,)) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.argmax(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.argmax(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, axis=(0,)) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.all(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.all(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.all(out=1.0) # type: ignore[call-overload] + +MAR_1d_f8.any(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.any(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.any(out=1.0) # type: ignore[call-overload] + +MAR_1d_f8.sort(axis=(0,1)) # type: ignore[arg-type] +MAR_1d_f8.sort(axis=None) # type: ignore[arg-type] +MAR_1d_f8.sort(kind='cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(order=lambda: 'cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(endwith='cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(fill_value=lambda: 'cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(stable='cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(stable=True) # type: ignore[arg-type] + +MAR_1d_f8.take(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.take(out=1) # type: ignore[call-overload] +MAR_1d_f8.take(mode="bob") # type: ignore[call-overload] np.ma.take(None) # type: ignore[call-overload] np.ma.take(axis=1.0) # type: ignore[call-overload] np.ma.take(out=1) # type: ignore[call-overload] np.ma.take(mode="bob") # type: ignore[call-overload] -m.partition(['cabbage']) # type: ignore[arg-type] -m.partition(axis=(0,1)) # type: ignore[arg-type, call-arg] -m.partition(kind='cabbage') # type: ignore[arg-type, call-arg] -m.partition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] -m.partition(AR_b) # type: ignore[arg-type] +MAR_1d_f8.partition(['cabbage']) # type: ignore[arg-type] +MAR_1d_f8.partition(axis=(0,1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(kind='cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] -m.argpartition(['cabbage']) # type: ignore[arg-type] -m.argpartition(axis=(0,1)) # type: ignore[arg-type, call-arg] -m.argpartition(kind='cabbage') # type: ignore[arg-type, call-arg] -m.argpartition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] -m.argpartition(AR_b) # type: ignore[arg-type] +MAR_1d_f8.argpartition(['cabbage']) # type: ignore[arg-type] +MAR_1d_f8.argpartition(axis=(0,1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(kind='cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] np.ma.ndim(lambda: 'lambda') # type: ignore[arg-type] np.ma.size(AR_b, axis='0') # type: ignore[arg-type] -m >= (lambda x: 'mango') # type: ignore[operator] -m > (lambda x: 'mango') # type: ignore[operator] -m <= (lambda x: 'mango') # type: ignore[operator] -m < (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 >= (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 > (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 <= (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 < (lambda x: 'mango') # type: ignore[operator] -m.count(axis=0.) # type: ignore[call-overload] +MAR_1d_f8.count(axis=0.) # type: ignore[call-overload] -np.ma.count(m, axis=0.) # type: ignore[call-overload] +np.ma.count(MAR_1d_f8, axis=0.) # type: ignore[call-overload] -m.put(4, 999, mode='flip') # type: ignore[arg-type] +MAR_1d_f8.put(4, 999, mode='flip') # type: ignore[arg-type] -np.ma.put(m, 4, 999, mode='flip') # type: ignore[arg-type] +np.ma.put(MAR_1d_f8, 4, 999, mode='flip') # type: ignore[arg-type] np.ma.put([1,1,3], 0, 999) # type: ignore[arg-type] np.ma.compressed(lambda: 'compress me') # type: ignore[call-overload] -np.ma.allequal(m, [1,2,3], fill_value=1.5) # type: ignore[arg-type] +np.ma.allequal(MAR_1d_f8, [1,2,3], fill_value=1.5) # type: ignore[arg-type] + +np.ma.allclose(MAR_1d_f8, [1,2,3], masked_equal=4.5) # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1,2,3], rtol='.4') # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1,2,3], atol='.5') # type: ignore[arg-type] -np.ma.allclose(m, [1,2,3], masked_equal=4.5) # type: ignore[arg-type] -np.ma.allclose(m, [1,2,3], rtol='.4') # type: ignore[arg-type] -np.ma.allclose(m, [1,2,3], atol='.5') # type: ignore[arg-type] +MAR_1d_f8.__setmask__('mask') # type: ignore[arg-type] -m.__setmask__('mask') # type: ignore[arg-type] +MAR_b *= 2 # type: ignore[arg-type] +MAR_c //= 2 # type: ignore[misc] +MAR_td64 **= 2 # type: ignore[misc] -m.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] +MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] diff --git a/numpy/typing/tests/data/mypy.ini b/numpy/typing/tests/data/mypy.ini index 46e7f712ddb6..4aa465ae087b 100644 --- a/numpy/typing/tests/data/mypy.ini +++ b/numpy/typing/tests/data/mypy.ini @@ -1,10 +1,8 @@ [mypy] -plugins = numpy.typing.mypy_plugin +strict = True enable_error_code = deprecated, ignore-without-code, truthy-bool -strict_bytes = True -warn_unused_ignores = True -implicit_reexport = False disallow_any_unimported = True -disallow_any_generics = True +allow_redefinition_new = True +local_partial_types = True show_absolute_path = True pretty = True diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 93fda1d291c0..3b2901cf2b51 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any +from typing import Any, cast import numpy as np import numpy.typing as npt import pytest @@ -61,6 +61,7 @@ def __rpow__(self, value: Any) -> Object: AR_b: npt.NDArray[np.bool] = np.array([True]) AR_u: npt.NDArray[np.uint32] = np.array([1], dtype=np.uint32) AR_i: npt.NDArray[np.int64] = np.array([1]) +AR_integer: npt.NDArray[np.integer] = cast(npt.NDArray[np.integer], AR_i) AR_f: npt.NDArray[np.float64] = np.array([1.0]) AR_c: npt.NDArray[np.complex128] = np.array([1j]) AR_m: npt.NDArray[np.timedelta64] = np.array([np.timedelta64(1, "D")]) @@ -252,6 +253,13 @@ def __rpow__(self, value: Any) -> Object: AR_LIKE_m // AR_m +AR_m /= f +AR_m //= f +AR_m /= AR_f +AR_m /= AR_LIKE_f +AR_m //= AR_f +AR_m //= AR_LIKE_f + AR_O // AR_LIKE_b AR_O // AR_LIKE_u AR_O // AR_LIKE_i @@ -275,6 +283,10 @@ def __rpow__(self, value: Any) -> Object: AR_i *= AR_LIKE_u AR_i *= AR_LIKE_i +AR_integer *= AR_LIKE_b +AR_integer *= AR_LIKE_u +AR_integer *= AR_LIKE_i + AR_f *= AR_LIKE_b AR_f *= AR_LIKE_u AR_f *= AR_LIKE_i @@ -307,6 +319,10 @@ def __rpow__(self, value: Any) -> Object: AR_i **= AR_LIKE_u AR_i **= AR_LIKE_i +AR_integer **= AR_LIKE_b +AR_integer **= AR_LIKE_u +AR_integer **= AR_LIKE_i + AR_f **= AR_LIKE_b AR_f **= AR_LIKE_u AR_f **= AR_LIKE_i diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py index 2238618eb67c..c8fa476210e3 100644 --- a/numpy/typing/tests/data/pass/literal.py +++ b/numpy/typing/tests/data/pass/literal.py @@ -17,7 +17,6 @@ CF = frozenset({None, "C", "F"}) order_list: list[tuple[frozenset[str | None], Callable[..., Any]]] = [ - (KACF, partial(np.ndarray, 1)), (KACF, AR.tobytes), (KACF, partial(AR.astype, int)), (KACF, AR.copy), @@ -25,7 +24,8 @@ (KACF, AR.flatten), (KACF, AR.ravel), (KACF, partial(np.array, 1)), - # NOTE: __call__ is needed due to mypy 1.11 bugs (#17620, #17631) + # NOTE: __call__ is needed due to mypy bugs (#17620, #17631) + (KACF, partial(np.ndarray.__call__, 1)), (CF, partial(np.zeros.__call__, 1)), (CF, partial(np.ones.__call__, 1)), (CF, partial(np.empty.__call__, 1)), diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index abd1a0103005..b9be2b2e4384 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -1,12 +1,176 @@ -from typing import Any +from typing import Any, TypeAlias, TypeVar, cast import numpy as np -import numpy.ma import numpy.typing as npt +from numpy._typing import _Shape -ar_b: npt.NDArray[np.bool] = np.array([True, False, True]) -m: np.ma.MaskedArray[Any, np.dtype[np.float64]] = np.ma.masked_array([1.5, 2, 3], mask=[True, False, True]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] -m.mask = ar_b -m.mask = np.False_ +# mypy: disable-error-code=no-untyped-call +MAR_b: MaskedArray[np.bool] = np.ma.MaskedArray([True]) +MAR_u: MaskedArray[np.uint32] = np.ma.MaskedArray([1], dtype=np.uint32) +MAR_i: MaskedArray[np.int64] = np.ma.MaskedArray([1]) +MAR_f: MaskedArray[np.float64] = np.ma.MaskedArray([1.0]) +MAR_c: MaskedArray[np.complex128] = np.ma.MaskedArray([1j]) +MAR_td64: MaskedArray[np.timedelta64] = np.ma.MaskedArray([np.timedelta64(1, "D")]) +MAR_M_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) +MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) +MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) +MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], + np.ma.MaskedArray(["a"], dtype="T")) + +AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) + +AR_LIKE_b = [True] +AR_LIKE_u = [np.uint32(1)] +AR_LIKE_i = [1] +AR_LIKE_f = [1.0] +AR_LIKE_c = [1j] +AR_LIKE_m = [np.timedelta64(1, "D")] +AR_LIKE_M = [np.datetime64(1, "D")] + +MAR_f.mask = AR_b +MAR_f.mask = np.False_ + +# Inplace addition + +MAR_b += AR_LIKE_b + +MAR_u += AR_LIKE_b +MAR_u += AR_LIKE_u + +MAR_i += AR_LIKE_b +MAR_i += 2 +MAR_i += AR_LIKE_i + +MAR_f += AR_LIKE_b +MAR_f += 2 +MAR_f += AR_LIKE_u +MAR_f += AR_LIKE_i +MAR_f += AR_LIKE_f + +MAR_c += AR_LIKE_b +MAR_c += AR_LIKE_u +MAR_c += AR_LIKE_i +MAR_c += AR_LIKE_f +MAR_c += AR_LIKE_c + +MAR_td64 += AR_LIKE_b +MAR_td64 += AR_LIKE_u +MAR_td64 += AR_LIKE_i +MAR_td64 += AR_LIKE_m +MAR_M_dt64 += AR_LIKE_b +MAR_M_dt64 += AR_LIKE_u +MAR_M_dt64 += AR_LIKE_i +MAR_M_dt64 += AR_LIKE_m + +MAR_S += b'snakes' +MAR_U += 'snakes' +MAR_T += 'snakes' + +# Inplace subtraction + +MAR_u -= AR_LIKE_b +MAR_u -= AR_LIKE_u + +MAR_i -= AR_LIKE_b +MAR_i -= AR_LIKE_i + +MAR_f -= AR_LIKE_b +MAR_f -= AR_LIKE_u +MAR_f -= AR_LIKE_i +MAR_f -= AR_LIKE_f + +MAR_c -= AR_LIKE_b +MAR_c -= AR_LIKE_u +MAR_c -= AR_LIKE_i +MAR_c -= AR_LIKE_f +MAR_c -= AR_LIKE_c + +MAR_td64 -= AR_LIKE_b +MAR_td64 -= AR_LIKE_u +MAR_td64 -= AR_LIKE_i +MAR_td64 -= AR_LIKE_m +MAR_M_dt64 -= AR_LIKE_b +MAR_M_dt64 -= AR_LIKE_u +MAR_M_dt64 -= AR_LIKE_i +MAR_M_dt64 -= AR_LIKE_m + +# Inplace floor division + +MAR_f //= AR_LIKE_b +MAR_f //= 2 +MAR_f //= AR_LIKE_u +MAR_f //= AR_LIKE_i +MAR_f //= AR_LIKE_f + +MAR_td64 //= AR_LIKE_i + +# Inplace true division + +MAR_f /= AR_LIKE_b +MAR_f /= 2 +MAR_f /= AR_LIKE_u +MAR_f /= AR_LIKE_i +MAR_f /= AR_LIKE_f + +MAR_c /= AR_LIKE_b +MAR_c /= AR_LIKE_u +MAR_c /= AR_LIKE_i +MAR_c /= AR_LIKE_f +MAR_c /= AR_LIKE_c + +MAR_td64 /= AR_LIKE_i + +# Inplace multiplication + +MAR_b *= AR_LIKE_b + +MAR_u *= AR_LIKE_b +MAR_u *= AR_LIKE_u + +MAR_i *= AR_LIKE_b +MAR_i *= 2 +MAR_i *= AR_LIKE_i + +MAR_f *= AR_LIKE_b +MAR_f *= 2 +MAR_f *= AR_LIKE_u +MAR_f *= AR_LIKE_i +MAR_f *= AR_LIKE_f + +MAR_c *= AR_LIKE_b +MAR_c *= AR_LIKE_u +MAR_c *= AR_LIKE_i +MAR_c *= AR_LIKE_f +MAR_c *= AR_LIKE_c + +MAR_td64 *= AR_LIKE_b +MAR_td64 *= AR_LIKE_u +MAR_td64 *= AR_LIKE_i +MAR_td64 *= AR_LIKE_f + +MAR_S *= 2 +MAR_U *= 2 +MAR_T *= 2 + +# Inplace power + +MAR_u **= AR_LIKE_b +MAR_u **= AR_LIKE_u + +MAR_i **= AR_LIKE_b +MAR_i **= AR_LIKE_i + +MAR_f **= AR_LIKE_b +MAR_f **= AR_LIKE_u +MAR_f **= AR_LIKE_i +MAR_f **= AR_LIKE_f + +MAR_c **= AR_LIKE_b +MAR_c **= AR_LIKE_u +MAR_c **= AR_LIKE_i +MAR_c **= AR_LIKE_f +MAR_c **= AR_LIKE_c diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index d995961f0962..bb290cdf12f7 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -15,13 +15,14 @@ import numpy.typing as npt class SubClass(npt.NDArray[np.float64]): ... - +class IntSubClass(npt.NDArray[np.intp]): ... i4 = np.int32(1) A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32) B0 = np.empty((), dtype=np.int32).view(SubClass) B1 = np.empty((1,), dtype=np.int32).view(SubClass) B2 = np.empty((1, 1), dtype=np.int32).view(SubClass) +B_int0: IntSubClass = np.empty((), dtype=np.intp).view(IntSubClass) C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32) D = np.ones(3).view(SubClass) @@ -42,12 +43,12 @@ class SubClass(npt.NDArray[np.float64]): ... i4.argmax() A.argmax() A.argmax(axis=0) -A.argmax(out=B0) +A.argmax(out=B_int0) i4.argmin() A.argmin() A.argmin(axis=0) -A.argmin(out=B0) +A.argmin(out=B_int0) i4.argsort() A.argsort() diff --git a/numpy/typing/tests/data/pass/recfunctions.py b/numpy/typing/tests/data/pass/recfunctions.py index d6290a23680e..52a3d78a7622 100644 --- a/numpy/typing/tests/data/pass/recfunctions.py +++ b/numpy/typing/tests/data/pass/recfunctions.py @@ -143,7 +143,7 @@ def test_stack_arrays() -> None: zz = np.ones((int(2),), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) assert_type( rfn.stack_arrays((z, zz)), - np.ma.MaskedArray[tuple[int, ...], np.dtype[np.void]], + np.ma.MaskedArray[tuple[Any, ...], np.dtype[np.void]], ) diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index b48226b75637..5dd78a197b8f 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -48,6 +48,9 @@ AR_c: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] +AR_S: npt.NDArray[np.bytes_] +AR_U: npt.NDArray[np.str_] +AR_T: np.ndarray[tuple[Any, ...], np.dtypes.StringDType] AR_floating: npt.NDArray[np.floating] AR_number: npt.NDArray[np.number] AR_Any: npt.NDArray[Any] @@ -673,3 +676,45 @@ assert_type(f / AR_floating, npt.NDArray[np.floating]) assert_type(f // AR_floating, npt.NDArray[np.floating]) assert_type(f % AR_floating, npt.NDArray[np.floating]) assert_type(divmod(f, AR_floating), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +# character-like + +assert_type(AR_S + b"", npt.NDArray[np.bytes_]) +assert_type(AR_S + [b""], npt.NDArray[np.bytes_]) +assert_type([b""] + AR_S, npt.NDArray[np.bytes_]) +assert_type(AR_S + AR_S, npt.NDArray[np.bytes_]) + +assert_type(AR_U + "", npt.NDArray[np.str_]) +assert_type(AR_U + [""], npt.NDArray[np.str_]) +assert_type("" + AR_U, npt.NDArray[np.str_]) +assert_type([""] + AR_U, npt.NDArray[np.str_]) +assert_type(AR_U + AR_U, npt.NDArray[np.str_]) + +assert_type(AR_T + "", np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + [""], np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type("" + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type([""] + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + AR_U, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_U + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) + +assert_type(AR_S * i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(AR_S * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(AR_S * AR_i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(i * AR_S, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +# mypy incorrectly infers `AR_LIKE_i * AR_S` as `list[int]` +assert_type(AR_i * AR_S, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) + +assert_type(AR_U * i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(AR_U * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(AR_U * AR_i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(i * AR_U, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +# mypy incorrectly infers `AR_LIKE_i * AR_U` as `list[int]` +assert_type(AR_i * AR_U, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(AR_T * i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T * AR_i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(i * AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +# mypy incorrectly infers `AR_LIKE_i * AR_T` as `list[int]` +assert_type(AR_i * AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 2f32579c0816..7b27d57bfe23 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -51,7 +51,7 @@ assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) assert_type(np.empty(mixed_shape), npt.NDArray[np.float64]) assert_type(np.concatenate(A), npt.NDArray[np.float64]) -assert_type(np.concatenate([A, A]), npt.NDArray[Any]) +assert_type(np.concatenate([A, A]), Any) # pyright correctly infers this as NDArray[float64] assert_type(np.concatenate([[1], A]), npt.NDArray[Any]) assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any]) assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) @@ -220,22 +220,22 @@ assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(np.atleast_3d(A, A, A), tuple[npt.NDArray[np.float64], ...]) -assert_type(np.vstack([A, A]), npt.NDArray[np.float64]) +assert_type(np.vstack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.vstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.vstack([A, C]), npt.NDArray[Any]) assert_type(np.vstack([C, C]), npt.NDArray[Any]) -assert_type(np.hstack([A, A]), npt.NDArray[np.float64]) +assert_type(np.hstack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.hstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) -assert_type(np.stack([A, A]), npt.NDArray[np.float64]) +assert_type(np.stack([A, A]), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.stack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.stack([A, C]), npt.NDArray[Any]) assert_type(np.stack([C, C]), npt.NDArray[Any]) -assert_type(np.stack([A, A], axis=0), npt.NDArray[np.float64]) +assert_type(np.stack([A, A], axis=0), np.ndarray[Any, Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.stack([A, A], out=B), SubClass[np.float64]) -assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) +assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] assert_type(np.block(C), npt.NDArray[Any]) if sys.version_info >= (3, 12): diff --git a/numpy/typing/tests/data/reveal/arrayterator.pyi b/numpy/typing/tests/data/reveal/arrayterator.pyi index 4484cf785139..470160c24de3 100644 --- a/numpy/typing/tests/data/reveal/arrayterator.pyi +++ b/numpy/typing/tests/data/reveal/arrayterator.pyi @@ -12,7 +12,7 @@ assert_type(ar_iter.buf_size, int | None) assert_type(ar_iter.start, list[int]) assert_type(ar_iter.stop, list[int]) assert_type(ar_iter.step, list[int]) -assert_type(ar_iter.shape, tuple[int, ...]) +assert_type(ar_iter.shape, tuple[Any, ...]) assert_type(ar_iter.flat, Generator[np.int64, None, None]) assert_type(ar_iter.__array__(), npt.NDArray[np.int64]) @@ -20,8 +20,8 @@ assert_type(ar_iter.__array__(), npt.NDArray[np.int64]) for i in ar_iter: assert_type(i, npt.NDArray[np.int64]) -assert_type(ar_iter[0], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) -assert_type(ar_iter[...], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) -assert_type(ar_iter[:], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) -assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) -assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) +assert_type(ar_iter[0], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[...], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[:], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index 14c504483d6a..9fdc9f61e893 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -4,12 +4,12 @@ import numpy as np import numpy._typing as np_t import numpy.typing as npt +AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] + AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] -AR_T: np.ndarray[np_t._Shape, np.dtypes.StringDType] - -AR_T_alias: TypeAlias = np.ndarray[np_t._Shape, np.dtypes.StringDType] -AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] +AR_T: AR_T_alias assert_type(np.char.equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.equal(AR_S, AR_S), npt.NDArray[np.bool]) @@ -203,16 +203,16 @@ assert_type(np.char.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.char.translate(AR_S, ""), npt.NDArray[np.bytes_]) assert_type(np.char.translate(AR_T, ""), AR_T_alias) -assert_type(np.char.array(AR_U), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(np.char.array(AR_S, order="K"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(np.char.array("bob", copy=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(np.char.array(1, unicode=False), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(np.char.array(1, unicode=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) - -assert_type(np.char.asarray(AR_U), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(np.char.asarray("bob"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(np.char.asarray(1, unicode=False), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(np.char.asarray(1, unicode=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(np.char.array(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array("bob", copy=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) + +assert_type(np.char.asarray(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray("bob"), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi index b9c40d95569f..b5f4392b75c8 100644 --- a/numpy/typing/tests/data/reveal/chararray.pyi +++ b/numpy/typing/tests/data/reveal/chararray.pyi @@ -1,10 +1,13 @@ -from typing import Any, assert_type +from typing import Any, TypeAlias, assert_type import numpy as np import numpy.typing as npt -AR_U: np.char.chararray[tuple[int, ...], np.dtype[np.str_]] -AR_S: np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]] +_BytesCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] +_StrCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] + +AR_U: _StrCharArray +AR_S: _BytesCharArray assert_type(AR_U == AR_U, npt.NDArray[np.bool]) assert_type(AR_S == AR_S, npt.NDArray[np.bool]) @@ -24,46 +27,46 @@ assert_type(AR_S > AR_S, npt.NDArray[np.bool]) assert_type(AR_U < AR_U, npt.NDArray[np.bool]) assert_type(AR_S < AR_S, npt.NDArray[np.bool]) -assert_type(AR_U * 5, np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S * [5], np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U * 5, _StrCharArray) +assert_type(AR_S * [5], _BytesCharArray) -assert_type(AR_U % "test", np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S % b"test", np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U % "test", _StrCharArray) +assert_type(AR_S % b"test", _BytesCharArray) -assert_type(AR_U.capitalize(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.capitalize(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.capitalize(), _StrCharArray) +assert_type(AR_S.capitalize(), _BytesCharArray) -assert_type(AR_U.center(5), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.center([2, 3, 4], b"a"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.center(5), _StrCharArray) +assert_type(AR_S.center([2, 3, 4], b"a"), _BytesCharArray) -assert_type(AR_U.encode(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_S.decode(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_U.encode(), _BytesCharArray) +assert_type(AR_S.decode(), _StrCharArray) -assert_type(AR_U.expandtabs(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.expandtabs(tabsize=4), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.expandtabs(), _StrCharArray) +assert_type(AR_S.expandtabs(tabsize=4), _BytesCharArray) -assert_type(AR_U.join("_"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.join([b"_", b""]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.join("_"), _StrCharArray) +assert_type(AR_S.join([b"_", b""]), _BytesCharArray) -assert_type(AR_U.ljust(5), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.rjust(5), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.ljust(5), _StrCharArray) +assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_U.rjust(5), _StrCharArray) +assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) -assert_type(AR_U.lstrip(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.lstrip(chars=b"_"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.rstrip(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.rstrip(chars=b"_"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.strip(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.strip(chars=b"_"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.lstrip(), _StrCharArray) +assert_type(AR_S.lstrip(chars=b"_"), _BytesCharArray) +assert_type(AR_U.rstrip(), _StrCharArray) +assert_type(AR_S.rstrip(chars=b"_"), _BytesCharArray) +assert_type(AR_U.strip(), _StrCharArray) +assert_type(AR_S.strip(chars=b"_"), _BytesCharArray) -assert_type(AR_U.partition("\n"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.partition([b"a", b"b", b"c"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.rpartition("\n"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.rpartition([b"a", b"b", b"c"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.partition("\n"), _StrCharArray) +assert_type(AR_S.partition([b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_U.rpartition("\n"), _StrCharArray) +assert_type(AR_S.rpartition([b"a", b"b", b"c"]), _BytesCharArray) -assert_type(AR_U.replace("_", "-"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.replace("_", "-"), _StrCharArray) +assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), _BytesCharArray) assert_type(AR_U.split("_"), npt.NDArray[np.object_]) assert_type(AR_S.split(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) @@ -73,17 +76,17 @@ assert_type(AR_S.rsplit(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) assert_type(AR_U.splitlines(), npt.NDArray[np.object_]) assert_type(AR_S.splitlines(keepends=[True, True, False]), npt.NDArray[np.object_]) -assert_type(AR_U.swapcase(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.swapcase(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.swapcase(), _StrCharArray) +assert_type(AR_S.swapcase(), _BytesCharArray) -assert_type(AR_U.title(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.title(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.title(), _StrCharArray) +assert_type(AR_S.title(), _BytesCharArray) -assert_type(AR_U.upper(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.upper(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.upper(), _StrCharArray) +assert_type(AR_S.upper(), _BytesCharArray) -assert_type(AR_U.zfill(5), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_S.zfill([2, 3, 4]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.zfill(5), _StrCharArray) +assert_type(AR_S.zfill([2, 3, 4]), _BytesCharArray) assert_type(AR_U.count("a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(AR_S.count([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) diff --git a/numpy/typing/tests/data/reveal/ctypeslib.pyi b/numpy/typing/tests/data/reveal/ctypeslib.pyi index dee2760d74e9..0564d725cf62 100644 --- a/numpy/typing/tests/data/reveal/ctypeslib.pyi +++ b/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -1,5 +1,4 @@ import ctypes as ct -import sys from typing import Any, assert_type import numpy as np @@ -74,18 +73,9 @@ assert_type(np.ctypeslib.as_array(AR_ubyte), npt.NDArray[np.ubyte]) assert_type(np.ctypeslib.as_array(1), npt.NDArray[Any]) assert_type(np.ctypeslib.as_array(pointer), npt.NDArray[Any]) -if sys.platform == "win32": - # Mainly on windows int is the same size as long but gets picked first: - assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_int]) - assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_uint]) - assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_uint]) - assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_int]) - assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_int) - assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_uint) -else: - assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_long]) - assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_ulong]) - assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_ulong]) - assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_long]) - assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_long) - assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_ulong) +assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_long]) +assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_ulong]) +assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_ulong]) +assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_long]) +assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_long) +assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_ulong) diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index a2c347fd471b..1794c944b3ae 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -14,12 +14,8 @@ dtype_U: np.dtype[np.str_] dtype_V: np.dtype[np.void] dtype_i8: np.dtype[np.int64] -py_int_co: type[int] -py_float_co: type[float] -py_complex_co: type[complex] py_object: type[_PyObjectLike] py_character: type[str | bytes] -py_flexible: type[str | bytes | memoryview] ct_floating: type[ct.c_float | ct.c_double | ct.c_longdouble] ct_number: type[ct.c_uint8 | ct.c_float] @@ -48,19 +44,16 @@ assert_type(np.dtype("str"), np.dtype[np.str_]) # Python types assert_type(np.dtype(bool), np.dtype[np.bool]) -assert_type(np.dtype(py_int_co), np.dtype[np.int_ | np.bool]) assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) -assert_type(np.dtype(py_float_co), np.dtype[np.float64 | np.int_ | np.bool]) assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) -assert_type(np.dtype(py_complex_co), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) assert_type(np.dtype(py_object), np.dtype[np.object_]) assert_type(np.dtype(str), np.dtype[np.str_]) assert_type(np.dtype(bytes), np.dtype[np.bytes_]) -assert_type(np.dtype(py_character), np.dtype[np.character]) assert_type(np.dtype(memoryview), np.dtype[np.void]) -assert_type(np.dtype(py_flexible), np.dtype[np.flexible]) +assert_type(np.dtype(py_character), np.dtype[np.character]) +# object types assert_type(np.dtype(list), np.dtype[np.object_]) assert_type(np.dtype(dt.datetime), np.dtype[np.object_]) assert_type(np.dtype(dt.timedelta), np.dtype[np.object_]) @@ -75,12 +68,9 @@ assert_type(np.dtype("l"), np.dtype[np.long]) assert_type(np.dtype("longlong"), np.dtype[np.longlong]) assert_type(np.dtype(">g"), np.dtype[np.longdouble]) assert_type(np.dtype(cs_integer), np.dtype[np.integer]) -assert_type(np.dtype(cs_number), np.dtype[np.number]) -assert_type(np.dtype(cs_flex), np.dtype[np.flexible]) -assert_type(np.dtype(cs_generic), np.dtype[np.generic]) # ctypes -assert_type(np.dtype(ct.c_double), np.dtype[np.double]) +assert_type(np.dtype(ct.c_double), np.dtype[np.float64]) # see numpy/numpy#29155 assert_type(np.dtype(ct.c_longlong), np.dtype[np.longlong]) assert_type(np.dtype(ct.c_uint32), np.dtype[np.uint32]) assert_type(np.dtype(ct.c_bool), np.dtype[np.bool]) @@ -90,7 +80,7 @@ assert_type(np.dtype(ct.py_object), np.dtype[np.object_]) # Special case for None assert_type(np.dtype(None), np.dtype[np.float64]) -# Dypes of dtypes +# dtypes of dtypes assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64]) assert_type(np.dtype(dt_inexact), np.dtype[np.inexact]) @@ -99,6 +89,7 @@ assert_type(np.dtype("S8"), np.dtype) # Void assert_type(np.dtype(("U", 10)), np.dtype[np.void]) +assert_type(np.dtype({"formats": (int, "u8"), "names": ("n", "B")}), np.dtype[np.void]) # StringDType assert_type(np.dtype(dt_string), StringDType) @@ -108,7 +99,7 @@ assert_type(np.dtype("|T"), StringDType) # Methods and attributes assert_type(dtype_U.base, np.dtype) -assert_type(dtype_U.subdtype, tuple[np.dtype, tuple[int, ...]] | None) +assert_type(dtype_U.subdtype, tuple[np.dtype, tuple[Any, ...]] | None) assert_type(dtype_U.newbyteorder(), np.dtype[np.str_]) assert_type(dtype_U.type, type[np.str_]) assert_type(dtype_U.name, LiteralString) diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 0827b27a056b..5438e001a13f 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -16,15 +16,19 @@ AR_i8: npt.NDArray[np.int64] AR_O: npt.NDArray[np.object_] AR_subclass: NDArraySubclass AR_m: npt.NDArray[np.timedelta64] -AR_0d: np.ndarray[tuple[()], np.dtype] -AR_1d: np.ndarray[tuple[int], np.dtype] -AR_nd: np.ndarray[tuple[int, ...], np.dtype] +AR_0d: np.ndarray[tuple[()]] +AR_1d: np.ndarray[tuple[int]] +AR_nd: np.ndarray b: np.bool f4: np.float32 i8: np.int64 f: float +# integer‑dtype subclass for argmin/argmax +class NDArrayIntSubclass(npt.NDArray[np.intp]): ... +AR_sub_i: NDArrayIntSubclass + assert_type(np.take(b, 0), np.bool) assert_type(np.take(f4, 0), np.float32) assert_type(np.take(f, 0), Any) @@ -89,13 +93,13 @@ assert_type(np.argmax(AR_b), np.intp) assert_type(np.argmax(AR_f4), np.intp) assert_type(np.argmax(AR_b, axis=0), Any) assert_type(np.argmax(AR_f4, axis=0), Any) -assert_type(np.argmax(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.argmax(AR_f4, out=AR_sub_i), NDArrayIntSubclass) assert_type(np.argmin(AR_b), np.intp) assert_type(np.argmin(AR_f4), np.intp) assert_type(np.argmin(AR_b, axis=0), Any) assert_type(np.argmin(AR_f4, axis=0), Any) -assert_type(np.argmin(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.argmin(AR_f4, out=AR_sub_i), NDArrayIntSubclass) assert_type(np.searchsorted(AR_b[0], 0), np.intp) assert_type(np.searchsorted(AR_f4[0], 0), np.intp) @@ -136,9 +140,9 @@ assert_type(np.shape(b), tuple[()]) assert_type(np.shape(f), tuple[()]) assert_type(np.shape([1]), tuple[int]) assert_type(np.shape([[2]]), tuple[int, int]) -assert_type(np.shape([[[3]]]), tuple[int, ...]) -assert_type(np.shape(AR_b), tuple[int, ...]) -assert_type(np.shape(AR_nd), tuple[int, ...]) +assert_type(np.shape([[[3]]]), tuple[Any, ...]) +assert_type(np.shape(AR_b), tuple[Any, ...]) +assert_type(np.shape(AR_nd), tuple[Any, ...]) # these fail on mypy, but it works as expected with pyright/pylance # assert_type(np.shape(AR_0d), tuple[()]) # assert_type(np.shape(AR_1d), tuple[int]) diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index b70798f3433a..f6067c3bed6b 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -18,10 +18,10 @@ assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) assert_type(np.ndenumerate(AR_LIKE_O), np.ndenumerate[Any]) -assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[int, ...], np.int64]) -assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[int, ...], np.float64]) -assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[int, ...], np.str_]) -assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[int, ...], Any]) +assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[Any, ...], np.int64]) +assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[Any, ...], np.float64]) +assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[Any, ...], np.str_]) +assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[Any, ...], Any]) assert_type(iter(np.ndenumerate(AR_i8)), np.ndenumerate[np.int64]) assert_type(iter(np.ndenumerate(AR_LIKE_f)), np.ndenumerate[np.float64]) @@ -31,7 +31,7 @@ assert_type(iter(np.ndenumerate(AR_LIKE_O)), np.ndenumerate[Any]) assert_type(np.ndindex(1, 2, 3), np.ndindex) assert_type(np.ndindex((1, 2, 3)), np.ndindex) assert_type(iter(np.ndindex(1, 2, 3)), np.ndindex) -assert_type(next(np.ndindex(1, 2, 3)), tuple[int, ...]) +assert_type(next(np.ndindex(1, 2, 3)), tuple[Any, ...]) assert_type(np.unravel_index([22, 41, 37], (7, 6)), tuple[npt.NDArray[np.intp], ...]) assert_type(np.unravel_index([31, 41, 13], (7, 6), order="F"), tuple[npt.NDArray[np.intp], ...]) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index bb54d6913b34..3ce8d375201b 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -20,7 +20,7 @@ AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] AR_b: npt.NDArray[np.bool] AR_U: npt.NDArray[np.str_] -CHAR_AR_U: np.char.chararray[tuple[int, ...], np.dtype[np.str_]] +CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] AR_b_list: list[npt.NDArray[np.bool]] @@ -81,9 +81,9 @@ assert_type(np.select([AR_f8], [AR_f8]), npt.NDArray[Any]) assert_type(np.copy(AR_LIKE_f8), npt.NDArray[Any]) assert_type(np.copy(AR_U), npt.NDArray[np.str_]) -assert_type(np.copy(CHAR_AR_U), npt.NDArray[np.str_]) -assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright correctly infers `NDArray[str_]` +assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) assert_type(np.gradient(AR_f8, axis=None), Any) assert_type(np.gradient(AR_LIKE_f8, edge_order=2), Any) diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index e80426efc03e..97f833b6a488 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -1,33 +1,46 @@ -from datetime import datetime, timedelta from typing import Any, Literal, TypeAlias, TypeVar, assert_type import numpy as np from numpy import dtype, generic -from numpy._typing import NDArray, _Shape +from numpy._typing import NDArray, _AnyShape _ScalarT = TypeVar("_ScalarT", bound=generic) -MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, dtype[_ScalarT]] +MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] class MaskedArraySubclass(MaskedArray[np.complex128]): ... AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] +AR_u4: NDArray[np.uint32] AR_dt64: NDArray[np.datetime64] AR_td64: NDArray[np.timedelta64] AR_o: NDArray[np.timedelta64] +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_td64: list[np.timedelta64] +AR_LIKE_dt64: list[np.datetime64] +AR_LIKE_o: list[np.object_] +AR_number: NDArray[np.number] + MAR_c16: MaskedArray[np.complex128] MAR_b: MaskedArray[np.bool] MAR_f4: MaskedArray[np.float32] MAR_f8: MaskedArray[np.float64] MAR_i8: MaskedArray[np.int64] +MAR_u4: MaskedArray[np.uint32] MAR_dt64: MaskedArray[np.datetime64] MAR_td64: MaskedArray[np.timedelta64] MAR_o: MaskedArray[np.object_] MAR_s: MaskedArray[np.str_] MAR_byte: MaskedArray[np.bytes_] MAR_V: MaskedArray[np.void] +MAR_floating: MaskedArray[np.floating] +MAR_number: MaskedArray[np.number] MAR_subclass: MaskedArraySubclass @@ -369,3 +382,244 @@ assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32] assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) + +# Masked Array addition + +assert_type(MAR_b + AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b + AR_LIKE_o, Any) + +assert_type(AR_LIKE_u + MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i + MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_b, Any) + +assert_type(MAR_u4 + AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 + AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u + MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i + MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_u4, Any) + +assert_type(MAR_i8 + AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 + AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 + AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i + MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f + MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c + MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 + MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_i8, Any) + +assert_type(MAR_f8 + AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 + AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f + MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c + MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o + MAR_f8, Any) + +assert_type(MAR_c16 + AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c + MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o + MAR_c16, Any) + +assert_type(MAR_td64 + AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 + MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 + MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o + MAR_td64, Any) + +assert_type(MAR_dt64 + AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 + AR_LIKE_o, Any) + +assert_type(AR_LIKE_o + MAR_dt64, Any) + +assert_type(MAR_o + AR_LIKE_b, Any) +assert_type(MAR_o + AR_LIKE_u, Any) +assert_type(MAR_o + AR_LIKE_i, Any) +assert_type(MAR_o + AR_LIKE_f, Any) +assert_type(MAR_o + AR_LIKE_c, Any) +assert_type(MAR_o + AR_LIKE_td64, Any) +assert_type(MAR_o + AR_LIKE_dt64, Any) +assert_type(MAR_o + AR_LIKE_o, Any) + +assert_type(AR_LIKE_b + MAR_o, Any) +assert_type(AR_LIKE_u + MAR_o, Any) +assert_type(AR_LIKE_i + MAR_o, Any) +assert_type(AR_LIKE_f + MAR_o, Any) +assert_type(AR_LIKE_c + MAR_o, Any) +assert_type(AR_LIKE_td64 + MAR_o, Any) +assert_type(AR_LIKE_dt64 + MAR_o, Any) +assert_type(AR_LIKE_o + MAR_o, Any) + +# Masked Array subtraction +# Keep in sync with numpy/typing/tests/data/reveal/arithmetic.pyi + +assert_type(MAR_number - AR_number, MaskedArray[np.number]) + +assert_type(MAR_b - AR_LIKE_u, MaskedArray[np.uint32]) +assert_type(MAR_b - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_b - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_b - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_b - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_b - AR_LIKE_o, Any) + +assert_type(AR_LIKE_u - MAR_b, MaskedArray[np.uint32]) +assert_type(AR_LIKE_i - MAR_b, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_b, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_b, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_b, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_b, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_b, Any) + +assert_type(MAR_u4 - AR_LIKE_b, MaskedArray[np.uint32]) +assert_type(MAR_u4 - AR_LIKE_u, MaskedArray[np.unsignedinteger]) +assert_type(MAR_u4 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_u4 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_u4 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_u4 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_u4 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_u4, MaskedArray[np.uint32]) +assert_type(AR_LIKE_u - MAR_u4, MaskedArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - MAR_u4, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_u4, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_u4, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_u4, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_u4, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_u4, Any) + +assert_type(MAR_i8 - AR_LIKE_b, MaskedArray[np.int64]) +assert_type(MAR_i8 - AR_LIKE_u, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_i, MaskedArray[np.signedinteger]) +assert_type(MAR_i8 - AR_LIKE_f, MaskedArray[np.floating]) +assert_type(MAR_i8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_i8 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_i8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_i8, MaskedArray[np.int64]) +assert_type(AR_LIKE_u - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_i - MAR_i8, MaskedArray[np.signedinteger]) +assert_type(AR_LIKE_f - MAR_i8, MaskedArray[np.floating]) +assert_type(AR_LIKE_c - MAR_i8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_td64 - MAR_i8, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_i8, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_i8, Any) + +assert_type(MAR_f8 - AR_LIKE_b, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_u, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_i, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_f, MaskedArray[np.float64]) +assert_type(MAR_f8 - AR_LIKE_c, MaskedArray[np.complexfloating]) +assert_type(MAR_f8 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_u - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_i - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_f - MAR_f8, MaskedArray[np.float64]) +assert_type(AR_LIKE_c - MAR_f8, MaskedArray[np.complexfloating]) +assert_type(AR_LIKE_o - MAR_f8, Any) + +assert_type(MAR_c16 - AR_LIKE_b, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_u, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_i, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_f, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_c, MaskedArray[np.complex128]) +assert_type(MAR_c16 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_u - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_i - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_f - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_c - MAR_c16, MaskedArray[np.complex128]) +assert_type(AR_LIKE_o - MAR_c16, Any) + +assert_type(MAR_td64 - AR_LIKE_b, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_u, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_i, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_td64, MaskedArray[np.timedelta64]) +assert_type(MAR_td64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_u - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_i - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_td64 - MAR_td64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_dt64 - MAR_td64, MaskedArray[np.datetime64]) +assert_type(AR_LIKE_o - MAR_td64, Any) + +assert_type(MAR_dt64 - AR_LIKE_b, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_u, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_i, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_td64, MaskedArray[np.datetime64]) +assert_type(MAR_dt64 - AR_LIKE_dt64, MaskedArray[np.timedelta64]) +assert_type(MAR_dt64 - AR_LIKE_o, Any) + +assert_type(AR_LIKE_dt64 - MAR_dt64, MaskedArray[np.timedelta64]) +assert_type(AR_LIKE_o - MAR_dt64, Any) + +assert_type(MAR_o - AR_LIKE_b, Any) +assert_type(MAR_o - AR_LIKE_u, Any) +assert_type(MAR_o - AR_LIKE_i, Any) +assert_type(MAR_o - AR_LIKE_f, Any) +assert_type(MAR_o - AR_LIKE_c, Any) +assert_type(MAR_o - AR_LIKE_td64, Any) +assert_type(MAR_o - AR_LIKE_dt64, Any) +assert_type(MAR_o - AR_LIKE_o, Any) + +assert_type(AR_LIKE_b - MAR_o, Any) +assert_type(AR_LIKE_u - MAR_o, Any) +assert_type(AR_LIKE_i - MAR_o, Any) +assert_type(AR_LIKE_f - MAR_o, Any) +assert_type(AR_LIKE_c - MAR_o, Any) +assert_type(AR_LIKE_td64 - MAR_o, Any) +assert_type(AR_LIKE_dt64 - MAR_o, Any) +assert_type(AR_LIKE_o - MAR_o, Any) diff --git a/numpy/typing/tests/data/reveal/matrix.pyi b/numpy/typing/tests/data/reveal/matrix.pyi index ed3247387baf..1a7285d428cc 100644 --- a/numpy/typing/tests/data/reveal/matrix.pyi +++ b/numpy/typing/tests/data/reveal/matrix.pyi @@ -7,6 +7,7 @@ _Shape2D: TypeAlias = tuple[int, int] mat: np.matrix[_Shape2D, np.dtype[np.int64]] ar_f8: npt.NDArray[np.float64] +ar_ip: npt.NDArray[np.intp] assert_type(mat * 5, np.matrix[_Shape2D, Any]) assert_type(5 * mat, np.matrix[_Shape2D, Any]) @@ -50,8 +51,8 @@ assert_type(mat.any(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.all(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.max(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.min(out=ar_f8), npt.NDArray[np.float64]) -assert_type(mat.argmax(out=ar_f8), npt.NDArray[np.float64]) -assert_type(mat.argmin(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.argmax(out=ar_ip), npt.NDArray[np.intp]) +assert_type(mat.argmin(out=ar_ip), npt.NDArray[np.intp]) assert_type(mat.ptp(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.T, np.matrix[_Shape2D, np.dtype[np.int64]]) diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index e73d6028c718..6ba3fcde632f 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -48,7 +48,7 @@ assert_type(b_f8.iters, tuple[np.flatiter[Any], ...]) assert_type(b_f8.nd, int) assert_type(b_f8.ndim, int) assert_type(b_f8.numiter, int) -assert_type(b_f8.shape, tuple[int, ...]) +assert_type(b_f8.shape, tuple[Any, ...]) assert_type(b_f8.size, int) assert_type(next(b_i8_f8_f8), tuple[Any, ...]) @@ -58,7 +58,7 @@ assert_type(b_i8_f8_f8.iters, tuple[np.flatiter[Any], ...]) assert_type(b_i8_f8_f8.nd, int) assert_type(b_i8_f8_f8.ndim, int) assert_type(b_i8_f8_f8.numiter, int) -assert_type(b_i8_f8_f8.shape, tuple[int, ...]) +assert_type(b_i8_f8_f8.shape, tuple[Any, ...]) assert_type(b_i8_f8_f8.size, int) assert_type(np.inner(AR_f8, AR_i8), Any) diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index 33229660b6f8..66470b95bf15 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -7,8 +7,7 @@ from numpy._typing import _32Bit, _64Bit T1 = TypeVar("T1", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] T2 = TypeVar("T2", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] -def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: - return a + b +def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: ... i8: np.int64 i4: np.int32 diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 682f9db50220..465ce7679b49 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -58,12 +58,12 @@ assert_type(AR_f8.any(out=B), SubClass) assert_type(f8.argmax(), np.intp) assert_type(AR_f8.argmax(), np.intp) assert_type(AR_f8.argmax(axis=0), Any) -assert_type(AR_f8.argmax(out=B), SubClass) +assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.intp]) assert_type(f8.argmin(), np.intp) assert_type(AR_f8.argmin(), np.intp) assert_type(AR_f8.argmin(axis=0), Any) -assert_type(AR_f8.argmin(out=B), SubClass) +assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.intp]) assert_type(f8.argsort(), npt.NDArray[Any]) assert_type(AR_f8.argsort(), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi index f55fcd272eae..aacf217e4207 100644 --- a/numpy/typing/tests/data/reveal/rec.pyi +++ b/numpy/typing/tests/data/reveal/rec.pyi @@ -1,11 +1,13 @@ import io -from typing import Any, assert_type +from typing import Any, TypeAlias, assert_type import numpy as np import numpy.typing as npt +_RecArray: TypeAlias = np.recarray[tuple[Any, ...], np.dtype[np.record]] + AR_i8: npt.NDArray[np.int64] -REC_AR_V: np.recarray[tuple[int, ...], np.dtype[np.record]] +REC_AR_V: _RecArray AR_LIST: list[npt.NDArray[np.int64]] record: np.record @@ -41,7 +43,7 @@ assert_type( order="K", byteorder="|", ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -50,13 +52,13 @@ assert_type( dtype=[("f8", np.float64), ("i8", np.int64)], strides=(5, 5), ), - np.recarray[Any, np.dtype], + np.recarray, ) -assert_type(np.rec.fromarrays(AR_LIST), np.recarray[Any, np.dtype]) +assert_type(np.rec.fromarrays(AR_LIST), np.recarray) assert_type( np.rec.fromarrays(AR_LIST, dtype=np.int64), - np.recarray[Any, np.dtype], + np.recarray, ) assert_type( np.rec.fromarrays( @@ -64,12 +66,12 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( np.rec.fromrecords((1, 1.5)), - np.recarray[Any, np.dtype[np.record]] + _RecArray ) assert_type( @@ -77,7 +79,7 @@ assert_type( [(1, 1.5)], dtype=[("i8", np.int64), ("f8", np.float64)], ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -86,7 +88,7 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -94,7 +96,7 @@ assert_type( b"(1, 1.5)", dtype=[("i8", np.int64), ("f8", np.float64)], ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -103,13 +105,16 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) -assert_type(np.rec.fromfile( - "test_file.txt", - dtype=[("i8", np.int64), ("f8", np.float64)], -), np.recarray[Any, np.dtype]) +assert_type( + np.rec.fromfile( + "test_file.txt", + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + np.recarray, +) assert_type( np.rec.fromfile( @@ -117,14 +122,14 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) -assert_type(np.rec.array(AR_i8), np.recarray[Any, np.dtype[np.int64]]) +assert_type(np.rec.array(AR_i8), np.recarray[tuple[Any, ...], np.dtype[np.int64]]) assert_type( np.rec.array([(1, 1.5)], dtype=[("i8", np.int64), ("f8", np.float64)]), - np.recarray[Any, np.dtype], + np.recarray, ) assert_type( @@ -133,7 +138,7 @@ assert_type( formats=[np.int64, np.float64], names=["i8", "f8"] ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( @@ -142,7 +147,7 @@ assert_type( dtype=np.float64, shape=(10, 3), ), - np.recarray[Any, np.dtype], + np.recarray, ) assert_type( @@ -152,15 +157,15 @@ assert_type( names=["i8", "f8"], shape=(10, 3), ), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) assert_type( np.rec.array(file_obj, dtype=np.float64), - np.recarray[Any, np.dtype], + np.recarray, ) assert_type( np.rec.array(file_obj, formats=[np.int64, np.float64], names=["i8", "f8"]), - np.recarray[Any, np.dtype[np.record]], + _RecArray, ) diff --git a/numpy/typing/tests/data/reveal/stride_tricks.pyi b/numpy/typing/tests/data/reveal/stride_tricks.pyi index 33804d51d1b8..8fde9b8ae30d 100644 --- a/numpy/typing/tests/data/reveal/stride_tricks.pyi +++ b/numpy/typing/tests/data/reveal/stride_tricks.pyi @@ -20,8 +20,8 @@ assert_type(np.broadcast_to(AR_f8, 5), npt.NDArray[np.float64]) assert_type(np.broadcast_to(AR_LIKE_f, (1, 5)), npt.NDArray[Any]) assert_type(np.broadcast_to(AR_f8, [4, 6], subok=True), npt.NDArray[np.float64]) -assert_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2)), tuple[int, ...]) -assert_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7)), tuple[int, ...]) +assert_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2)), tuple[Any, ...]) +assert_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7)), tuple[Any, ...]) assert_type(np.broadcast_arrays(AR_f8, AR_f8), tuple[npt.NDArray[Any], ...]) assert_type(np.broadcast_arrays(AR_f8, AR_LIKE_f), tuple[npt.NDArray[Any], ...]) diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index c0102d7342ef..18bd252d5ff9 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -4,12 +4,12 @@ import numpy as np import numpy._typing as np_t import numpy.typing as npt +AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] + AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] -AR_T: np.ndarray[np_t._Shape, np.dtypes.StringDType] - -AR_T_alias: TypeAlias = np.ndarray[np_t._Shape, np.dtypes.StringDType] -AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] +AR_T: AR_T_alias assert_type(np.strings.equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.equal(AR_S, AR_S), npt.NDArray[np.bool]) @@ -190,3 +190,7 @@ assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) assert_type(np.strings.translate(AR_T, ""), AR_T_alias) + +assert_type(np.strings.slice(AR_U, 1, 5, 2), npt.NDArray[np.str_]) +assert_type(np.strings.slice(AR_S, 1, 5, 2), npt.NDArray[np.bytes_]) +assert_type(np.strings.slice(AR_T, 1, 5, 2), AR_T_alias) diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py index a249472d5b3f..236952101126 100644 --- a/numpy/typing/tests/test_runtime.py +++ b/numpy/typing/tests/test_runtime.py @@ -1,20 +1,19 @@ """Test the runtime usage of `numpy.typing`.""" -from __future__ import annotations - from typing import ( - get_type_hints, - Union, + Any, NamedTuple, + Union, # pyright: ignore[reportDeprecated] get_args, get_origin, - Any, + get_type_hints, ) import pytest + import numpy as np -import numpy.typing as npt import numpy._typing as _npt +import numpy.typing as npt class TypeTup(NamedTuple): @@ -54,10 +53,7 @@ def test_get_type_hints(name: type, tup: TypeTup) -> None: """Test `typing.get_type_hints`.""" typ = tup.typ - # Explicitly set `__annotations__` in order to circumvent the - # stringification performed by `from __future__ import annotations` - def func(a): pass - func.__annotations__ = {"a": typ, "return": None} + def func(a: typ) -> None: pass out = get_type_hints(func) ref = {"a": typ, "return": type(None)} @@ -69,10 +65,7 @@ def test_get_type_hints_str(name: type, tup: TypeTup) -> None: """Test `typing.get_type_hints` with string-representation of types.""" typ_str, typ = f"npt.{name}", tup.typ - # Explicitly set `__annotations__` in order to circumvent the - # stringification performed by `from __future__ import annotations` - def func(a): pass - func.__annotations__ = {"a": typ_str, "return": None} + def func(a: typ_str) -> None: pass out = get_type_hints(func) ref = {"a": typ, "return": type(None)} diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index 2d8c48cb3fb2..ca4cf37fec3b 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -1,5 +1,3 @@ -from __future__ import annotations - import importlib.util import os import re @@ -10,7 +8,6 @@ import pytest - # Only trigger a full `mypy` run if this environment variable is set # Note that these tests tend to take over a minute even on a macOS M1 CPU, # and more than that in CI. @@ -34,6 +31,7 @@ if TYPE_CHECKING: from collections.abc import Iterator + # We need this as annotation, but it's located in a private namespace. # As a compromise, do *not* import it during runtime from _pytest.mark.structures import ParameterSet @@ -118,7 +116,7 @@ def run_mypy() -> None: filename = None -def get_test_cases(*directories: str) -> Iterator[ParameterSet]: +def get_test_cases(*directories: str) -> "Iterator[ParameterSet]": for directory in directories: for root, _, files in os.walk(directory): for fname in files: @@ -137,13 +135,6 @@ def get_test_cases(*directories: str) -> Iterator[ParameterSet]: {}""" -_FAIL_MSG_MISC = """{}:{} - error mismatch: - -Expression: {} -Expected error: {} -Observed error: {} -""" - @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @@ -196,49 +187,6 @@ def test_reveal(path: str) -> None: pytest.fail(reasons, pytrace=False) -LINENO_MAPPING = { - 6: "float96", - 7: "float128", - 8: "complex192", - 9: "complex256", -} - - -@pytest.mark.slow -@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") -def test_extended_precision() -> None: - from numpy.typing.mypy_plugin import _EXTENDED_PRECISION_LIST - - path = os.path.join(MISC_DIR, "extended_precision.pyi") - output_mypy = OUTPUT_MYPY - assert path in output_mypy - - expected_msg = 'Expression is of type "Any"' - - with open(path) as f: - expression_list = f.readlines() - - failures = [] - for _msg in output_mypy[path]: - lineno, error_msg = _strip_filename(_msg) - expr = expression_list[lineno - 1].rstrip("\n") - - if LINENO_MAPPING[lineno] in _EXTENDED_PRECISION_LIST: - raise AssertionError(_FAIL_MSG_REVEAL.format(lineno, error_msg)) - if "error" in error_msg or expected_msg not in error_msg: - continue - - if "\n" in error_msg: - error_msg = "\n" + textwrap.indent(error_msg, _FAIL_INDENT) - relpath = os.path.relpath(path) - failure = _FAIL_MSG_MISC.format(relpath, lineno, expr, expected_msg, error_msg) - failures.append(failure) - - if failures: - reasons = _FAIL_SEP.join(failures) - pytest.fail(reasons, pytrace=False) - - @pytest.mark.slow @pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") @pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) diff --git a/pavement.py b/pavement.py index 9b4e4fc81346..369b8703b0ba 100644 --- a/pavement.py +++ b/pavement.py @@ -22,21 +22,20 @@ - fix bdist_mpkg: we build the same source twice -> how to make sure we use the same underlying python for egg install in venv and for bdist_mpkg """ -import os import hashlib +import os import textwrap # The paver package needs to be installed to run tasks import paver -from paver.easy import Bunch, options, task, sh - +from paver.easy import Bunch, options, sh, task #----------------------------------- # Things to be changed for a release #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.3.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.4.0-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index b62d71cbba73..b0e58705ebd1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.3.0.dev0" +version = "2.4.0.dev0" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} @@ -142,14 +142,17 @@ tracker = "https://github.com/numpy/numpy/issues" # build wheels for in CI are controlled in `.github/workflows/wheels.yml` and # `tools/ci/cirrus_wheels.yml`. build-frontend = "build" -skip = "*_i686 *_ppc64le *_s390x *_universal2" +skip = ["*_i686", "*_ppc64le", "*_s390x", "*_universal2"] before-build = "bash {project}/tools/wheels/cibw_before_build.sh {project}" -# The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) -config-settings = "setup-args=-Duse-ilp64=true setup-args=-Dallow-noblas=false build-dir=build" before-test = "pip install -r {project}/requirements/test_requirements.txt" test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" enable = ["cpython-freethreading", "pypy", "cpython-prerelease"] +# The build will use openblas64 everywhere, except on arm64 macOS >=14.0 (uses Accelerate) +[tool.cibuildwheel.config-settings] +setup-args = ["-Duse-ilp64=true", "-Dallow-noblas=false"] +build-dir = "build" + [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux_2_28" manylinux-aarch64-image = "manylinux_2_28" @@ -157,7 +160,14 @@ musllinux-x86_64-image = "musllinux_1_2" musllinux-aarch64-image = "musllinux_1_2" [tool.cibuildwheel.pyodide] -config-settings = "build-dir=build setup-args=--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross setup-args=-Dblas=none setup-args=-Dlapack=none" +before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" +# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten +repair-wheel-command = "" +test-command = "python -m pytest --pyargs numpy -m 'not slow'" + +[tool.cibuildwheel.pyodide.config-settings] +build-dir = "build" +setup-args = ["--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross", "-Dblas=none", "-Dlapack=none"] [tool.cibuildwheel.linux.environment] # RUNNER_OS is a GitHub Actions specific env var; define it here so it works on Cirrus CI too @@ -178,27 +188,16 @@ repair-wheel-command = [ ] [tool.cibuildwheel.windows] +config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=false"], build-dir="build"} +repair-wheel-command = "bash -el ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" # This does not work, use CIBW_ENVIRONMENT_WINDOWS environment = {PKG_CONFIG_PATH="./.openblas"} -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=false build-dir=build" -repair-wheel-command = "bash -el ./tools/wheels/repair_windows.sh {wheel} {dest_dir}" [[tool.cibuildwheel.overrides]] -select = "*-win32" -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" +select = ["*-win32"] +config-settings = {setup-args = ["--vsenv", "-Dallow-noblas=true"], build-dir="build"} repair-wheel-command = "" -[[tool.cibuildwheel.overrides]] -select = "*-win_arm64" -config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" -repair-wheel-command = "" - -[[tool.cibuildwheel.overrides]] -select = "*pyodide*" -before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" -# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten -repair-wheel-command = "" -test-command = "python -m pytest --pyargs numpy -m 'not slow'" [tool.meson-python] meson = 'vendored-meson/meson/meson.py' diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 5a7be719214a..74c9a51ec111 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,4 @@ spin==0.13 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.29.0.0 +scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' +scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index adf7d86558f0..b6ea06c812c8 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,6 @@ spin==0.13 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.29.0.0 -scipy-openblas64==0.3.29.0.0 +scipy-openblas32==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' +scipy-openblas32==0.3.29.265.0 ; sys_platform == 'win32' and platform_machine == 'ARM64' +# Note there is not yet a win-arm64 wheel, so we currently only exclude win-arm64 +scipy-openblas64==0.3.29.0.0 ; sys_platform != 'win32' or platform_machine != 'ARM64' diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 330f0f7ac8b9..a6eb6e97b5cf 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -17,9 +17,7 @@ pickleshare towncrier toml - -# for doctests, also needs pytz which is in test_requirements -scipy-doctest==1.6.0 +scipy-doctest>=1.8.0 # interactive documentation utilities # see https://github.com/jupyterlite/pyodide-kernel#compatibility diff --git a/requirements/emscripten_test_requirements.txt b/requirements/emscripten_test_requirements.txt index 18cfb219034d..019a69da687a 100644 --- a/requirements/emscripten_test_requirements.txt +++ b/requirements/emscripten_test_requirements.txt @@ -1,4 +1,4 @@ hypothesis==6.81.1 pytest==7.4.0 -pytz==2023.3.post1 +tzdata pytest-xdist diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index dac35e08fd6e..45319571b561 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,2 +1,3 @@ -ruff==0.8.3 +# keep in sync with `environment.yml` +ruff==0.11.13 GitPython>=3.1.30 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index a2a68f044a50..17260753db4a 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -4,7 +4,6 @@ setuptools==65.5.1 ; python_version < '3.12' setuptools ; python_version >= '3.12' hypothesis==6.104.1 pytest==7.4.0 -pytz==2023.3.post1 pytest-cov==4.1.0 meson ninja; sys_platform != "emscripten" @@ -13,7 +12,9 @@ pytest-timeout # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.15.0; platform_python_implementation != "PyPy" +mypy==1.16.0; platform_python_implementation != "PyPy" typing_extensions>=4.5.0 # for optional f2py encoding detection charset-normalizer +tzdata + diff --git a/ruff.toml b/ruff.toml index 4224a478d863..7454c6c05e5b 100644 --- a/ruff.toml +++ b/ruff.toml @@ -14,15 +14,24 @@ extend-exclude = [ "numpy/_core/src/common/pythoncapi-compat", ] +line-length = 88 + +[format] +line-ending = "lf" + [lint] preview = true extend-select = [ + "B", "C4", + "ISC", "LOG", "G", "PIE", "TID", "FLY", + "I", + "PD", "E", "W", "PGH", @@ -30,22 +39,94 @@ extend-select = [ "UP", ] ignore = [ - "F", # TODO: enable Pyflakes rules + "B006", # Do not use mutable data structures for argument defaults + "B007", # Loop control variable not used within loop body + "B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()` + "B023", # Function definition does not bind loop variable + "B028", # No explicit `stacklevel` keyword argument found + "B904", # Within an `except` clause distinguish raised exceptions from errors in exception handling + "B905", #`zip()` without an explicit `strict=` parameter "C408", # Unnecessary `dict()` call (rewrite as a literal) + "ISC002", # Implicitly concatenated string literals over multiple lines "PIE790", # Unnecessary `pass` statement + "PD901", # Avoid using the generic variable name `df` for DataFrames "E241", # Multiple spaces after comma "E265", # Block comment should start with `# ` "E266", # Too many leading `#` before block comment "E302", # TODO: Expected 2 blank lines, found 1 "E402", # Module level import not at top of file - "E501", # TODO: Line too long "E712", # Avoid equality comparisons to `True` or `False` "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check "E731", # Do not assign a `lambda` expression, use a `def` "E741", # Ambiguous variable name + "F403", # `from ... import *` used; unable to detect undefined names + "F405", # may be undefined, or defined from star imports + "F821", # Undefined name + "F841", # Local variable is assigned to but never used "UP015", # Unnecessary mode argument "UP031", # TODO: Use format specifiers instead of percent format ] [lint.per-file-ignores] -"test*.py" = ["E201", "E714"] +"_tempita.py" = ["B909"] +"bench_*.py" = ["B015", "B018"] +"test*.py" = ["B015", "B018", "E201", "E714"] + +"benchmarks/benchmarks/bench_linalg.py" = ["E501"] +"numpy/_core/tests/test_api.py" = ["E501"] +"numpy/_core/tests/test_arrayprint.py" = ["E501"] +"numpy/_core/tests/test_cpu_dispatcher.py" = ["E501"] +"numpy/_core/tests/test_cpu_features.py" = ["E501"] +"numpy/_core/tests/test_datetime.py" = ["E501"] +"numpy/_core/tests/test_dtype.py" = ["E501"] +"numpy/_core/tests/test_defchararray.py" = ["E501"] +"numpy/_core/tests/test_einsum.py" = ["E501"] +"numpy/_core/tests/test_multiarray.py" = ["E501"] +"numpy/_core/tests/test_multithreading.py" = ["E501"] +"numpy/_core/tests/test_nditer*py" = ["E501"] +"numpy/_core/tests/test_ufunc*py" = ["E501"] +"numpy/_core/tests/test_umath*py" = ["E501"] +"numpy/_core/tests/test_numeric*.py" = ["E501"] +"numpy/_core/tests/test_regression.py" = ["E501"] +"numpy/_core/tests/test_shape_base.py" = ["E501"] +"numpy/_core/tests/test_simd*.py" = ["E501"] +"numpy/_core/tests/test_strings.py" = ["E501"] +"numpy/_core/_add_newdocs.py" = ["E501"] +"numpy/_core/_add_newdocs_scalars.py" = ["E501"] +"numpy/_core/code_generators/generate_umath.py" = ["E501"] +"numpy/lib/tests/test_function_base.py" = ["E501"] +"numpy/lib/tests/test_format.py" = ["E501"] +"numpy/lib/tests/test_io.py" = ["E501"] +"numpy/lib/tests/test_polynomial.py" = ["E501"] +"numpy/linalg/tests/test_linalg.py" = ["E501"] +"numpy/tests/test_configtool.py" = ["E501"] +"numpy/f2py/*py" = ["E501"] +# for typing related files we follow https://typing.python.org/en/latest/guides/writing_stubs.html#maximum-line-length +"numpy/_typing/_array_like.py" = ["E501"] +"numpy/_typing/_dtype_like.py" = ["E501"] +"numpy*pyi" = ["E501"] + +"__init__.py" = ["F401", "F403", "F405"] +"__init__.pyi" = ["F401"] +"numpy/_core/defchararray.py" = ["F403", "F405"] +"numpy/_core/multiarray.py" = ["F405"] +"numpy/_core/numeric.py" = ["F403", "F405"] +"numpy/_core/umath.py" = ["F401", "F403", "F405"] +"numpy/f2py/capi_maps.py" = ["F403", "F405"] +"numpy/f2py/crackfortran.py" = ["F403", "F405"] +"numpy/f2py/f90mod_rules.py" = ["F403", "F405"] +"numpy/ma/core.pyi" = ["F403", "F405"] +"numpy/matlib.py" = ["F405"] +"numpy/matlib.pyi" = ["F811"] + +[lint.flake8-builtins] +builtins-allowed-modules = ["random", "typing"] + +[lint.flake8-import-conventions.extend-aliases] +"numpy" = "np" +"numpy.typing" = "npt" + +[lint.isort] +# these are treated as stdlib within .pyi stubs +extra-standard-library = ["_typeshed", "typing_extensions"] +known-first-party = ["numpy"] diff --git a/tools/c_coverage/c_coverage_report.py b/tools/c_coverage/c_coverage_report.py index 0d65bf3af075..917b977dc195 100755 --- a/tools/c_coverage/c_coverage_report.py +++ b/tools/c_coverage/c_coverage_report.py @@ -7,15 +7,15 @@ import os import re import sys -from xml.sax.saxutils import quoteattr, escape +from xml.sax.saxutils import escape, quoteattr try: import pygments if tuple(int(x) for x in pygments.__version__.split('.')) < (0, 11): raise ImportError from pygments import highlight - from pygments.lexers import CLexer from pygments.formatters import HtmlFormatter + from pygments.lexers import CLexer has_pygments = True except ImportError: print("This script requires pygments 0.11 or greater to generate HTML") diff --git a/tools/changelog.py b/tools/changelog.py index f1f42f9333d0..6013d70adfbc 100755 --- a/tools/changelog.py +++ b/tools/changelog.py @@ -35,6 +35,7 @@ """ import os import re + from git import Repo from github import Github diff --git a/tools/check_installed_files.py b/tools/check_installed_files.py index 0783583cd928..61bc49197d79 100644 --- a/tools/check_installed_files.py +++ b/tools/check_installed_files.py @@ -18,11 +18,10 @@ """ -import os import glob -import sys import json - +import os +import sys CUR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__))) ROOT_DIR = os.path.dirname(CUR_DIR) diff --git a/tools/check_openblas_version.py b/tools/check_openblas_version.py index b51e68047fd4..9aa0b265dea5 100644 --- a/tools/check_openblas_version.py +++ b/tools/check_openblas_version.py @@ -6,10 +6,11 @@ example: check_openblas_version.py 0.3.26 """ -import numpy import pprint import sys +import numpy + version = sys.argv[1] deps = numpy.show_config('dicts')['Build Dependencies'] assert "blas" in deps diff --git a/tools/ci/array-api-xfails.txt b/tools/ci/array-api-xfails.txt index 98c3895ced06..8370099015c5 100644 --- a/tools/ci/array-api-xfails.txt +++ b/tools/ci/array-api-xfails.txt @@ -1,5 +1,20 @@ # finfo return type misalignment array_api_tests/test_data_type_functions.py::test_finfo[float32] +array_api_tests/test_data_type_functions.py::test_finfo[complex64] + +# finfo: data type not inexact +array_api_tests/test_data_type_functions.py::test_finfo[float64] +array_api_tests/test_data_type_functions.py::test_finfo[complex128] + +# iinfo: Invalid integer data type 'O' +array_api_tests/test_data_type_functions.py::test_iinfo[int8] +array_api_tests/test_data_type_functions.py::test_iinfo[uint8] +array_api_tests/test_data_type_functions.py::test_iinfo[int16] +array_api_tests/test_data_type_functions.py::test_iinfo[uint16] +array_api_tests/test_data_type_functions.py::test_iinfo[int32] +array_api_tests/test_data_type_functions.py::test_iinfo[uint32] +array_api_tests/test_data_type_functions.py::test_iinfo[int64] +array_api_tests/test_data_type_functions.py::test_iinfo[uint64] # 'shape' arg is present. 'newshape' is retained for backward compat. array_api_tests/test_signatures.py::test_func_signature[reshape] diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index b531e953daee..6d02411df2e9 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -11,7 +11,7 @@ macosx_arm64_task: CIRRUS_CLONE_SUBMODULES: true macos_instance: matrix: - image: ghcr.io/cirruslabs/macos-monterey-xcode + image: ghcr.io/cirruslabs/macos-runner:sonoma matrix: - env: diff --git a/tools/ci/push_docs_to_repo.py b/tools/ci/push_docs_to_repo.py index 9ea33da6ddc3..801454792304 100755 --- a/tools/ci/push_docs_to_repo.py +++ b/tools/ci/push_docs_to_repo.py @@ -1,12 +1,11 @@ #!/usr/bin/env python3 import argparse -import subprocess -import tempfile import os -import sys import shutil - +import subprocess +import sys +import tempfile parser = argparse.ArgumentParser( description='Upload files to a remote repo, replacing existing content' diff --git a/tools/ci/test_all_newsfragments_used.py b/tools/ci/test_all_newsfragments_used.py index 1df58791ad82..25b5103cb153 100755 --- a/tools/ci/test_all_newsfragments_used.py +++ b/tools/ci/test_all_newsfragments_used.py @@ -1,8 +1,10 @@ #!/usr/bin/env python3 +import os import sys + import toml -import os + def main(): path = toml.load("pyproject.toml")["tool"]["towncrier"]["directory"] diff --git a/tools/download-wheels.py b/tools/download-wheels.py index 598075f0b03c..38a8360f0437 100644 --- a/tools/download-wheels.py +++ b/tools/download-wheels.py @@ -23,18 +23,26 @@ $ python tools/download-wheels.py 1.19.0 -w ~/wheelhouse """ +import argparse import os import re import shutil -import argparse import urllib3 from bs4 import BeautifulSoup -__version__ = "0.1" +__version__ = "0.2" # Edit these for other projects. -STAGING_URL = "https://anaconda.org/multibuild-wheels-staging/numpy" + +# The first URL is used to get the file names as it avoids the need for paging +# when the number of files exceeds the page length. Note that files/page is not +# stable and can change when the page layout changes. The second URL is used to +# retrieve the files themselves. This workaround is copied from SciPy. +NAMES_URL = "https://pypi.anaconda.org/multibuild-wheels-staging/simple/numpy/" +FILES_URL = "https://anaconda.org/multibuild-wheels-staging/numpy" + +# Name prefix of the files to download. PREFIX = "numpy" # Name endings of the files to download. @@ -56,17 +64,12 @@ def get_wheel_names(version): The release version. For instance, "1.18.3". """ - ret = [] http = urllib3.PoolManager(cert_reqs="CERT_REQUIRED") tmpl = re.compile(rf"^.*{PREFIX}-{version}{SUFFIX}") - # TODO: generalize this by searching for `showing 1 of N` and - # looping over N pages, starting from 1 - for i in range(1, 3): - index_url = f"{STAGING_URL}/files?page={i}" - index_html = http.request("GET", index_url) - soup = BeautifulSoup(index_html.data, "html.parser") - ret += soup.find_all(string=tmpl) - return ret + index_url = f"{NAMES_URL}" + index_html = http.request('GET', index_url) + soup = BeautifulSoup(index_html.data, 'html.parser') + return sorted(soup.find_all(string=tmpl)) def download_wheels(version, wheelhouse, test=False): @@ -87,7 +90,7 @@ def download_wheels(version, wheelhouse, test=False): wheel_names = get_wheel_names(version) for i, wheel_name in enumerate(wheel_names): - wheel_url = f"{STAGING_URL}/{version}/download/{wheel_name}" + wheel_url = f"{FILES_URL}/{version}/download/{wheel_name}" wheel_path = os.path.join(wheelhouse, wheel_name) with open(wheel_path, "wb") as f: with http.request("GET", wheel_url, preload_content=False,) as r: diff --git a/tools/functions_missing_types.py b/tools/functions_missing_types.py index dc161621b1b0..8149a0106575 100755 --- a/tools/functions_missing_types.py +++ b/tools/functions_missing_types.py @@ -82,7 +82,6 @@ def visit_FunctionDef(self, node): def visit_ClassDef(self, node): if not node.name.startswith("_"): self.attributes.add(node.name) - return def visit_AnnAssign(self, node): self.attributes.add(node.target.id) diff --git a/tools/linter.py b/tools/linter.py index 6cf7d450f529..1ce9ca763343 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -1,9 +1,8 @@ import os -import sys import subprocess +import sys from argparse import ArgumentParser - CWD = os.path.abspath(os.path.dirname(__file__)) diff --git a/tools/refguide_check.py b/tools/refguide_check.py index 41cf3ddfefa5..da881574215f 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -33,17 +33,17 @@ import re import sys import warnings -import docutils.core from argparse import ArgumentParser +import docutils.core from docutils.parsers.rst import directives - sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext')) from numpydoc.docscrape_sphinx import get_doc_object # Enable specific Sphinx directives -from sphinx.directives.other import SeeAlso, Only +from sphinx.directives.other import Only, SeeAlso + directives.register_directive('seealso', SeeAlso) directives.register_directive('only', Only) @@ -560,12 +560,13 @@ def main(argv): if not args.module_names: args.module_names = list(PUBLIC_SUBMODULES) + [BASE_MODULE] - module_names = list(args.module_names) - for name in module_names: - if name in OTHER_MODULE_DOCS: - name = OTHER_MODULE_DOCS[name] - if name not in module_names: - module_names.append(name) + module_names = args.module_names + [ + OTHER_MODULE_DOCS[name] + for name in args.module_names + if name in OTHER_MODULE_DOCS + ] + # remove duplicates while maintaining order + module_names = list(dict.fromkeys(module_names)) dots = True success = True diff --git a/tools/swig/test/setup.py b/tools/swig/test/setup.py index f792b03a0933..98ba239942bd 100755 --- a/tools/swig/test/setup.py +++ b/tools/swig/test/setup.py @@ -1,8 +1,6 @@ #!/usr/bin/env python3 -# System imports from distutils.core import Extension, setup -# Third-party modules - we depend on numpy for everything import numpy # Obtain the numpy include directory. diff --git a/tools/swig/test/testArray.py b/tools/swig/test/testArray.py index c4740ecc6e4d..a8528207c167 100755 --- a/tools/swig/test/testArray.py +++ b/tools/swig/test/testArray.py @@ -1,10 +1,9 @@ #!/usr/bin/env python3 -# System imports import sys import unittest -# Import NumPy import numpy as np + major, minor = [int(d) for d in np.__version__.split(".")[:2]] if major == 0: BadListError = TypeError diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py index de98a52016b1..a9310e20a897 100755 --- a/tools/swig/test/testFarray.py +++ b/tools/swig/test/testFarray.py @@ -1,12 +1,11 @@ #!/usr/bin/env python3 -# System imports -from distutils.util import get_platform import os import sys import unittest +from distutils.util import get_platform -# Import NumPy import numpy as np + major, minor = [int(d) for d in np.__version__.split(".")[:2]] if major == 0: BadListError = TypeError diff --git a/tools/swig/test/testFlat.py b/tools/swig/test/testFlat.py index 4a25037a6473..ce6f74819e86 100755 --- a/tools/swig/test/testFlat.py +++ b/tools/swig/test/testFlat.py @@ -1,12 +1,10 @@ #!/usr/bin/env python3 -# System imports +import struct import sys import unittest -import struct - -# Import NumPy import numpy as np + major, minor = [int(d) for d in np.__version__.split(".")[:2]] if major == 0: BadListError = TypeError diff --git a/tools/swig/test/testFortran.py b/tools/swig/test/testFortran.py index 155a60a6e980..498732f3118f 100644 --- a/tools/swig/test/testFortran.py +++ b/tools/swig/test/testFortran.py @@ -1,10 +1,9 @@ #!/usr/bin/env python3 -# System imports import sys import unittest -# Import NumPy import numpy as np + major, minor = [int(d) for d in np.__version__.split(".")[:2]] if major == 0: BadListError = TypeError diff --git a/tools/swig/test/testMatrix.py b/tools/swig/test/testMatrix.py index 5f6f80a06148..d20312ecc2a0 100755 --- a/tools/swig/test/testMatrix.py +++ b/tools/swig/test/testMatrix.py @@ -1,10 +1,9 @@ #!/usr/bin/env python3 -# System imports import sys import unittest -# Import NumPy import numpy as np + major, minor = [int(d) for d in np.__version__.split(".")[:2]] if major == 0: BadListError = TypeError diff --git a/tools/swig/test/testSuperTensor.py b/tools/swig/test/testSuperTensor.py index 5f185884641e..e0027428e647 100644 --- a/tools/swig/test/testSuperTensor.py +++ b/tools/swig/test/testSuperTensor.py @@ -1,10 +1,9 @@ #!/usr/bin/env python3 -# System imports import sys import unittest -# Import NumPy import numpy as np + major, minor = [int(d) for d in np.__version__.split(".")[:2]] if major == 0: BadListError = TypeError diff --git a/tools/swig/test/testTensor.py b/tools/swig/test/testTensor.py index 7aab009a0958..aa962b0cbcda 100755 --- a/tools/swig/test/testTensor.py +++ b/tools/swig/test/testTensor.py @@ -1,11 +1,10 @@ #!/usr/bin/env python3 -# System imports -from math import sqrt import sys import unittest +from math import sqrt -# Import NumPy import numpy as np + major, minor = [int(d) for d in np.__version__.split(".")[:2]] if major == 0: BadListError = TypeError diff --git a/tools/swig/test/testVector.py b/tools/swig/test/testVector.py index ac40f7a9c729..f0b51715d1d5 100755 --- a/tools/swig/test/testVector.py +++ b/tools/swig/test/testVector.py @@ -1,10 +1,9 @@ #!/usr/bin/env python3 -# System imports import sys import unittest -# Import NumPy import numpy as np + major, minor = [int(d) for d in np.__version__.split(".")[:2]] if major == 0: BadListError = TypeError diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt index 9e2d9053b8a7..db488c6cff47 100644 --- a/tools/wheels/LICENSE_linux.txt +++ b/tools/wheels/LICENSE_linux.txt @@ -44,7 +44,7 @@ Name: LAPACK Files: numpy.libs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution +License: BSD-3-Clause-Open-MPI Copyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. @@ -99,7 +99,7 @@ Name: GCC runtime library Files: numpy.libs/libgfortran*.so Description: dynamically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception +License: GPL-3.0-or-later WITH GCC-exception-3.1 Copyright (C) 2002-2017 Free Software Foundation, Inc. Libgfortran is free software; you can redistribute it and/or modify diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt index 7ef2e381874e..5cea18441b35 100644 --- a/tools/wheels/LICENSE_osx.txt +++ b/tools/wheels/LICENSE_osx.txt @@ -44,7 +44,7 @@ Name: LAPACK Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution +License: BSD-3-Clause-Open-MPI Copyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. @@ -99,7 +99,7 @@ Name: GCC runtime library Files: numpy/.dylibs/libgfortran*, numpy/.dylibs/libgcc* Description: dynamically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception +License: GPL-3.0-or-later WITH GCC-exception-3.1 Copyright (C) 2002-2017 Free Software Foundation, Inc. Libgfortran is free software; you can redistribute it and/or modify diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt index c8277e7710a2..aed96845583b 100644 --- a/tools/wheels/LICENSE_win32.txt +++ b/tools/wheels/LICENSE_win32.txt @@ -44,7 +44,7 @@ Name: LAPACK Files: numpy.libs\libscipy_openblas*.dll Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution +License: BSD-3-Clause-Open-MPI Copyright (c) 1992-2013 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. @@ -99,7 +99,7 @@ Name: GCC runtime library Files: numpy.libs\libscipy_openblas*.dll Description: statically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran -License: GPL-3.0-with-GCC-exception +License: GPL-3.0-or-later WITH GCC-exception-3.1 Copyright (C) 2002-2017 Free Software Foundation, Inc. Libgfortran is free software; you can redistribute it and/or modify diff --git a/tools/wheels/check_license.py b/tools/wheels/check_license.py index 0a29bdf69fc0..572295b4ca2f 100644 --- a/tools/wheels/check_license.py +++ b/tools/wheels/check_license.py @@ -7,10 +7,10 @@ distribution. """ -import sys -import re import argparse import pathlib +import re +import sys def check_text(text): diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 3e1d4498fe7c..e41e5d37316b 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -22,9 +22,6 @@ fi if [[ $(python -c"import sys; print(sys.maxsize)") < $(python -c"import sys; print(2**33)") ]]; then echo "No BLAS used for 32-bit wheels" export INSTALL_OPENBLAS=false -elif [[ $(python -c"import sysconfig; print(sysconfig.get_platform())") == "win-arm64" ]]; then - echo "No BLAS used for ARM64 wheels" - export INSTALL_OPENBLAS=false elif [ -z $INSTALL_OPENBLAS ]; then # the macos_arm64 build might not set this variable export INSTALL_OPENBLAS=true @@ -32,20 +29,33 @@ fi # Install Openblas from scipy-openblas64 if [[ "$INSTALL_OPENBLAS" = "true" ]] ; then - echo PKG_CONFIG_PATH $PKG_CONFIG_PATH + # by default, use scipy-openblas64 + OPENBLAS=openblas64 + # Possible values for RUNNER_ARCH in github are + # X86, X64, ARM, or ARM64 + # TODO: should we detect a missing RUNNER_ARCH and use platform.machine() + # when wheel build is run outside github? + # On 32-bit platforms, use scipy_openblas32 + # On win-arm64 use scipy_openblas32 + if [[ $RUNNER_ARCH == "X86" || $RUNNER_ARCH == "ARM" ]] ; then + OPENBLAS=openblas32 + elif [[ $RUNNER_ARCH == "ARM64" && $RUNNER_OS == "Windows" ]] ; then + OPENBLAS=openblas32 + fi + echo PKG_CONFIG_PATH is $PKG_CONFIG_PATH, OPENBLAS is ${OPENBLAS} PKG_CONFIG_PATH=$PROJECT_DIR/.openblas rm -rf $PKG_CONFIG_PATH mkdir -p $PKG_CONFIG_PATH python -m pip install -r requirements/ci_requirements.txt - python -c "import scipy_openblas64; print(scipy_openblas64.get_pkg_config())" > $PKG_CONFIG_PATH/scipy-openblas.pc + python -c "import scipy_${OPENBLAS}; print(scipy_${OPENBLAS}.get_pkg_config())" > $PKG_CONFIG_PATH/scipy-openblas.pc # Copy the shared objects to a path under $PKG_CONFIG_PATH, the build # will point $LD_LIBRARY_PATH there and then auditwheel/delocate-wheel will # pull these into the wheel. Use python to avoid windows/posix problems python <