Initial & Programe PY
This commit is contained in:
commit
2777d93573
2852 changed files with 581026 additions and 0 deletions
8
credential.txt
Normal file
8
credential.txt
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
Félicitations !
|
||||
Nous sommes ravis de vous annoncer que vous avez franchi avec succès la première étape.
|
||||
Pour continuer, veuillez utiliser le mot de passe chiffré en Hill avec la clé IJDK et passer par l'autre service ouvert sur la machine.
|
||||
|
||||
Mot de passe : yzhxvq
|
||||
|
||||
Cordialement,
|
||||
Deby
|
||||
222
env/Lib/site-packages/_distutils_hack/__init__.py
vendored
Normal file
222
env/Lib/site-packages/_distutils_hack/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,222 @@
|
|||
# don't import any costly modules
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
is_pypy = '__pypy__' in sys.builtin_module_names
|
||||
|
||||
|
||||
def warn_distutils_present():
|
||||
if 'distutils' not in sys.modules:
|
||||
return
|
||||
if is_pypy and sys.version_info < (3, 7):
|
||||
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
|
||||
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
|
||||
return
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"Distutils was imported before Setuptools, but importing Setuptools "
|
||||
"also replaces the `distutils` module in `sys.modules`. This may lead "
|
||||
"to undesirable behaviors or errors. To avoid these issues, avoid "
|
||||
"using distutils directly, ensure that setuptools is installed in the "
|
||||
"traditional way (e.g. not an editable install), and/or make sure "
|
||||
"that setuptools is always imported before distutils."
|
||||
)
|
||||
|
||||
|
||||
def clear_distutils():
|
||||
if 'distutils' not in sys.modules:
|
||||
return
|
||||
import warnings
|
||||
|
||||
warnings.warn("Setuptools is replacing distutils.")
|
||||
mods = [
|
||||
name
|
||||
for name in sys.modules
|
||||
if name == "distutils" or name.startswith("distutils.")
|
||||
]
|
||||
for name in mods:
|
||||
del sys.modules[name]
|
||||
|
||||
|
||||
def enabled():
|
||||
"""
|
||||
Allow selection of distutils by environment variable.
|
||||
"""
|
||||
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
|
||||
return which == 'local'
|
||||
|
||||
|
||||
def ensure_local_distutils():
|
||||
import importlib
|
||||
|
||||
clear_distutils()
|
||||
|
||||
# With the DistutilsMetaFinder in place,
|
||||
# perform an import to cause distutils to be
|
||||
# loaded from setuptools._distutils. Ref #2906.
|
||||
with shim():
|
||||
importlib.import_module('distutils')
|
||||
|
||||
# check that submodules load as expected
|
||||
core = importlib.import_module('distutils.core')
|
||||
assert '_distutils' in core.__file__, core.__file__
|
||||
assert 'setuptools._distutils.log' not in sys.modules
|
||||
|
||||
|
||||
def do_override():
|
||||
"""
|
||||
Ensure that the local copy of distutils is preferred over stdlib.
|
||||
|
||||
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
|
||||
for more motivation.
|
||||
"""
|
||||
if enabled():
|
||||
warn_distutils_present()
|
||||
ensure_local_distutils()
|
||||
|
||||
|
||||
class _TrivialRe:
|
||||
def __init__(self, *patterns):
|
||||
self._patterns = patterns
|
||||
|
||||
def match(self, string):
|
||||
return all(pat in string for pat in self._patterns)
|
||||
|
||||
|
||||
class DistutilsMetaFinder:
|
||||
def find_spec(self, fullname, path, target=None):
|
||||
# optimization: only consider top level modules and those
|
||||
# found in the CPython test suite.
|
||||
if path is not None and not fullname.startswith('test.'):
|
||||
return
|
||||
|
||||
method_name = 'spec_for_{fullname}'.format(**locals())
|
||||
method = getattr(self, method_name, lambda: None)
|
||||
return method()
|
||||
|
||||
def spec_for_distutils(self):
|
||||
if self.is_cpython():
|
||||
return
|
||||
|
||||
import importlib
|
||||
import importlib.abc
|
||||
import importlib.util
|
||||
|
||||
try:
|
||||
mod = importlib.import_module('setuptools._distutils')
|
||||
except Exception:
|
||||
# There are a couple of cases where setuptools._distutils
|
||||
# may not be present:
|
||||
# - An older Setuptools without a local distutils is
|
||||
# taking precedence. Ref #2957.
|
||||
# - Path manipulation during sitecustomize removes
|
||||
# setuptools from the path but only after the hook
|
||||
# has been loaded. Ref #2980.
|
||||
# In either case, fall back to stdlib behavior.
|
||||
return
|
||||
|
||||
class DistutilsLoader(importlib.abc.Loader):
|
||||
def create_module(self, spec):
|
||||
mod.__name__ = 'distutils'
|
||||
return mod
|
||||
|
||||
def exec_module(self, module):
|
||||
pass
|
||||
|
||||
return importlib.util.spec_from_loader(
|
||||
'distutils', DistutilsLoader(), origin=mod.__file__
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def is_cpython():
|
||||
"""
|
||||
Suppress supplying distutils for CPython (build and tests).
|
||||
Ref #2965 and #3007.
|
||||
"""
|
||||
return os.path.isfile('pybuilddir.txt')
|
||||
|
||||
def spec_for_pip(self):
|
||||
"""
|
||||
Ensure stdlib distutils when running under pip.
|
||||
See pypa/pip#8761 for rationale.
|
||||
"""
|
||||
if self.pip_imported_during_build():
|
||||
return
|
||||
clear_distutils()
|
||||
self.spec_for_distutils = lambda: None
|
||||
|
||||
@classmethod
|
||||
def pip_imported_during_build(cls):
|
||||
"""
|
||||
Detect if pip is being imported in a build script. Ref #2355.
|
||||
"""
|
||||
import traceback
|
||||
|
||||
return any(
|
||||
cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def frame_file_is_setup(frame):
|
||||
"""
|
||||
Return True if the indicated frame suggests a setup.py file.
|
||||
"""
|
||||
# some frames may not have __file__ (#2940)
|
||||
return frame.f_globals.get('__file__', '').endswith('setup.py')
|
||||
|
||||
def spec_for_sensitive_tests(self):
|
||||
"""
|
||||
Ensure stdlib distutils when running select tests under CPython.
|
||||
|
||||
python/cpython#91169
|
||||
"""
|
||||
clear_distutils()
|
||||
self.spec_for_distutils = lambda: None
|
||||
|
||||
sensitive_tests = (
|
||||
[
|
||||
'test.test_distutils',
|
||||
'test.test_peg_generator',
|
||||
'test.test_importlib',
|
||||
]
|
||||
if sys.version_info < (3, 10)
|
||||
else [
|
||||
'test.test_distutils',
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
for name in DistutilsMetaFinder.sensitive_tests:
|
||||
setattr(
|
||||
DistutilsMetaFinder,
|
||||
f'spec_for_{name}',
|
||||
DistutilsMetaFinder.spec_for_sensitive_tests,
|
||||
)
|
||||
|
||||
|
||||
DISTUTILS_FINDER = DistutilsMetaFinder()
|
||||
|
||||
|
||||
def add_shim():
|
||||
DISTUTILS_FINDER in sys.meta_path or insert_shim()
|
||||
|
||||
|
||||
class shim:
|
||||
def __enter__(self):
|
||||
insert_shim()
|
||||
|
||||
def __exit__(self, exc, value, tb):
|
||||
remove_shim()
|
||||
|
||||
|
||||
def insert_shim():
|
||||
sys.meta_path.insert(0, DISTUTILS_FINDER)
|
||||
|
||||
|
||||
def remove_shim():
|
||||
try:
|
||||
sys.meta_path.remove(DISTUTILS_FINDER)
|
||||
except ValueError:
|
||||
pass
|
||||
BIN
env/Lib/site-packages/_distutils_hack/__pycache__/__init__.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/_distutils_hack/__pycache__/__init__.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/_distutils_hack/__pycache__/override.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/_distutils_hack/__pycache__/override.cpython-310.pyc
vendored
Normal file
Binary file not shown.
1
env/Lib/site-packages/_distutils_hack/override.py
vendored
Normal file
1
env/Lib/site-packages/_distutils_hack/override.py
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
__import__('_distutils_hack').do_override()
|
||||
1
env/Lib/site-packages/distutils-precedence.pth
vendored
Normal file
1
env/Lib/site-packages/distutils-precedence.pth
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
import os; var = 'SETUPTOOLS_USE_DISTUTILS'; enabled = os.environ.get(var, 'local') == 'local'; enabled and __import__('_distutils_hack').add_shim();
|
||||
0
env/Lib/site-packages/numpy-2.2.6-cp310-cp310-win_amd64.whl
vendored
Normal file
0
env/Lib/site-packages/numpy-2.2.6-cp310-cp310-win_amd64.whl
vendored
Normal file
2
env/Lib/site-packages/numpy-2.2.6.dist-info/DELVEWHEEL
vendored
Normal file
2
env/Lib/site-packages/numpy-2.2.6.dist-info/DELVEWHEEL
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
Version: 1.10.1
|
||||
Arguments: ['C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-7xf_vzet\\cp310-win_amd64\\build\\venv\\Scripts\\delvewheel', 'repair', '--add-path', 'C:/a/numpy/numpy/.openblas/lib', '-w', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-7xf_vzet\\cp310-win_amd64\\repaired_wheel', 'C:\\Users\\runneradmin\\AppData\\Local\\Temp\\cibw-run-7xf_vzet\\cp310-win_amd64\\built_wheel\\numpy-2.2.6-cp310-cp310-win_amd64.whl']
|
||||
1
env/Lib/site-packages/numpy-2.2.6.dist-info/INSTALLER
vendored
Normal file
1
env/Lib/site-packages/numpy-2.2.6.dist-info/INSTALLER
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
pip
|
||||
950
env/Lib/site-packages/numpy-2.2.6.dist-info/LICENSE.txt
vendored
Normal file
950
env/Lib/site-packages/numpy-2.2.6.dist-info/LICENSE.txt
vendored
Normal file
|
|
@ -0,0 +1,950 @@
|
|||
Copyright (c) 2005-2024, NumPy Developers.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials provided
|
||||
with the distribution.
|
||||
|
||||
* Neither the name of the NumPy Developers nor the names of any
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
----
|
||||
|
||||
The NumPy repository and source distributions bundle several libraries that are
|
||||
compatibly licensed. We list these here.
|
||||
|
||||
Name: lapack-lite
|
||||
Files: numpy/linalg/lapack_lite/*
|
||||
License: BSD-3-Clause
|
||||
For details, see numpy/linalg/lapack_lite/LICENSE.txt
|
||||
|
||||
Name: dragon4
|
||||
Files: numpy/_core/src/multiarray/dragon4.c
|
||||
License: MIT
|
||||
For license text, see numpy/_core/src/multiarray/dragon4.c
|
||||
|
||||
Name: libdivide
|
||||
Files: numpy/_core/include/numpy/libdivide/*
|
||||
License: Zlib
|
||||
For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt
|
||||
|
||||
|
||||
Note that the following files are vendored in the repository and sdist but not
|
||||
installed in built numpy packages:
|
||||
|
||||
Name: Meson
|
||||
Files: vendored-meson/meson/*
|
||||
License: Apache 2.0
|
||||
For license text, see vendored-meson/meson/COPYING
|
||||
|
||||
Name: spin
|
||||
Files: .spin/cmds.py
|
||||
License: BSD-3
|
||||
For license text, see .spin/LICENSE
|
||||
|
||||
Name: tempita
|
||||
Files: numpy/_build_utils/tempita/*
|
||||
License: MIT
|
||||
For details, see numpy/_build_utils/tempita/LICENCE.txt
|
||||
|
||||
----
|
||||
|
||||
This binary distribution of NumPy also bundles the following software:
|
||||
|
||||
|
||||
Name: OpenBLAS
|
||||
Files: numpy.libs\libscipy_openblas*.dll
|
||||
Description: bundled as a dynamically linked library
|
||||
Availability: https://github.com/OpenMathLib/OpenBLAS/
|
||||
License: BSD-3-Clause
|
||||
Copyright (c) 2011-2014, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
Name: LAPACK
|
||||
Files: numpy.libs\libscipy_openblas*.dll
|
||||
Description: bundled in OpenBLAS
|
||||
Availability: https://github.com/OpenMathLib/OpenBLAS/
|
||||
License: BSD-3-Clause-Attribution
|
||||
Copyright (c) 1992-2013 The University of Tennessee and The University
|
||||
of Tennessee Research Foundation. All rights
|
||||
reserved.
|
||||
Copyright (c) 2000-2013 The University of California Berkeley. All
|
||||
rights reserved.
|
||||
Copyright (c) 2006-2013 The University of Colorado Denver. All rights
|
||||
reserved.
|
||||
|
||||
$COPYRIGHT$
|
||||
|
||||
Additional copyrights may follow
|
||||
|
||||
$HEADER$
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
- Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
- Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer listed
|
||||
in this license in the documentation and/or other materials
|
||||
provided with the distribution.
|
||||
|
||||
- Neither the name of the copyright holders nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
The copyright holders provide no reassurances that the source code
|
||||
provided does not infringe any patent, copyright, or any other
|
||||
intellectual property rights of third parties. The copyright holders
|
||||
disclaim any liability to any recipient for claims brought against
|
||||
recipient by any third party for infringement of that parties
|
||||
intellectual property rights.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
Name: GCC runtime library
|
||||
Files: numpy.libs\libscipy_openblas*.dll
|
||||
Description: statically linked to files compiled with gcc
|
||||
Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
|
||||
License: GPL-3.0-with-GCC-exception
|
||||
Copyright (C) 2002-2017 Free Software Foundation, Inc.
|
||||
|
||||
Libgfortran is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 3, or (at your option)
|
||||
any later version.
|
||||
|
||||
Libgfortran is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
Under Section 7 of GPL version 3, you are granted additional
|
||||
permissions described in the GCC Runtime Library Exception, version
|
||||
3.1, as published by the Free Software Foundation.
|
||||
|
||||
You should have received a copy of the GNU General Public License and
|
||||
a copy of the GCC Runtime Library Exception along with this program;
|
||||
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
----
|
||||
|
||||
Full text of license texts referred to above follows (that they are
|
||||
listed below does not necessarily imply the conditions apply to the
|
||||
present binary release):
|
||||
|
||||
----
|
||||
|
||||
GCC RUNTIME LIBRARY EXCEPTION
|
||||
|
||||
Version 3.1, 31 March 2009
|
||||
|
||||
Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
This GCC Runtime Library Exception ("Exception") is an additional
|
||||
permission under section 7 of the GNU General Public License, version
|
||||
3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
|
||||
bears a notice placed by the copyright holder of the file stating that
|
||||
the file is governed by GPLv3 along with this Exception.
|
||||
|
||||
When you use GCC to compile a program, GCC may combine portions of
|
||||
certain GCC header files and runtime libraries with the compiled
|
||||
program. The purpose of this Exception is to allow compilation of
|
||||
non-GPL (including proprietary) programs to use, in this way, the
|
||||
header files and runtime libraries covered by this Exception.
|
||||
|
||||
0. Definitions.
|
||||
|
||||
A file is an "Independent Module" if it either requires the Runtime
|
||||
Library for execution after a Compilation Process, or makes use of an
|
||||
interface provided by the Runtime Library, but is not otherwise based
|
||||
on the Runtime Library.
|
||||
|
||||
"GCC" means a version of the GNU Compiler Collection, with or without
|
||||
modifications, governed by version 3 (or a specified later version) of
|
||||
the GNU General Public License (GPL) with the option of using any
|
||||
subsequent versions published by the FSF.
|
||||
|
||||
"GPL-compatible Software" is software whose conditions of propagation,
|
||||
modification and use would permit combination with GCC in accord with
|
||||
the license of GCC.
|
||||
|
||||
"Target Code" refers to output from any compiler for a real or virtual
|
||||
target processor architecture, in executable form or suitable for
|
||||
input to an assembler, loader, linker and/or execution
|
||||
phase. Notwithstanding that, Target Code does not include data in any
|
||||
format that is used as a compiler intermediate representation, or used
|
||||
for producing a compiler intermediate representation.
|
||||
|
||||
The "Compilation Process" transforms code entirely represented in
|
||||
non-intermediate languages designed for human-written code, and/or in
|
||||
Java Virtual Machine byte code, into Target Code. Thus, for example,
|
||||
use of source code generators and preprocessors need not be considered
|
||||
part of the Compilation Process, since the Compilation Process can be
|
||||
understood as starting with the output of the generators or
|
||||
preprocessors.
|
||||
|
||||
A Compilation Process is "Eligible" if it is done using GCC, alone or
|
||||
with other GPL-compatible software, or if it is done without using any
|
||||
work based on GCC. For example, using non-GPL-compatible Software to
|
||||
optimize any GCC intermediate representations would not qualify as an
|
||||
Eligible Compilation Process.
|
||||
|
||||
1. Grant of Additional Permission.
|
||||
|
||||
You have permission to propagate a work of Target Code formed by
|
||||
combining the Runtime Library with Independent Modules, even if such
|
||||
propagation would otherwise violate the terms of GPLv3, provided that
|
||||
all Target Code was generated by Eligible Compilation Processes. You
|
||||
may then convey such a combination under terms of your choice,
|
||||
consistent with the licensing of the Independent Modules.
|
||||
|
||||
2. No Weakening of GCC Copyleft.
|
||||
|
||||
The availability of this Exception does not imply any general
|
||||
presumption that third-party software is unaffected by the copyleft
|
||||
requirements of the license of GCC.
|
||||
|
||||
----
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
|
||||
1071
env/Lib/site-packages/numpy-2.2.6.dist-info/METADATA
vendored
Normal file
1071
env/Lib/site-packages/numpy-2.2.6.dist-info/METADATA
vendored
Normal file
File diff suppressed because it is too large
Load diff
1519
env/Lib/site-packages/numpy-2.2.6.dist-info/RECORD
vendored
Normal file
1519
env/Lib/site-packages/numpy-2.2.6.dist-info/RECORD
vendored
Normal file
File diff suppressed because it is too large
Load diff
0
env/Lib/site-packages/numpy-2.2.6.dist-info/REQUESTED
vendored
Normal file
0
env/Lib/site-packages/numpy-2.2.6.dist-info/REQUESTED
vendored
Normal file
4
env/Lib/site-packages/numpy-2.2.6.dist-info/WHEEL
vendored
Normal file
4
env/Lib/site-packages/numpy-2.2.6.dist-info/WHEEL
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
Wheel-Version: 1.0
|
||||
Generator: meson
|
||||
Root-Is-Purelib: false
|
||||
Tag: cp310-cp310-win_amd64
|
||||
10
env/Lib/site-packages/numpy-2.2.6.dist-info/entry_points.txt
vendored
Normal file
10
env/Lib/site-packages/numpy-2.2.6.dist-info/entry_points.txt
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
[array_api]
|
||||
numpy = numpy
|
||||
|
||||
[pyinstaller40]
|
||||
hook-dirs = numpy:_pyinstaller_hooks_dir
|
||||
|
||||
[console_scripts]
|
||||
f2py = numpy.f2py.f2py2e:main
|
||||
numpy-config = numpy._configtool:main
|
||||
|
||||
BIN
env/Lib/site-packages/numpy.libs/libscipy_openblas64_-13e2df515630b4a41f92893938845698.dll
vendored
Normal file
BIN
env/Lib/site-packages/numpy.libs/libscipy_openblas64_-13e2df515630b4a41f92893938845698.dll
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy.libs/msvcp140-263139962577ecda4cd9469ca360a746.dll
vendored
Normal file
BIN
env/Lib/site-packages/numpy.libs/msvcp140-263139962577ecda4cd9469ca360a746.dll
vendored
Normal file
Binary file not shown.
170
env/Lib/site-packages/numpy/__config__.py
vendored
Normal file
170
env/Lib/site-packages/numpy/__config__.py
vendored
Normal file
|
|
@ -0,0 +1,170 @@
|
|||
# This file is generated by numpy's build process
|
||||
# It contains system_info results at the time of building this package.
|
||||
from enum import Enum
|
||||
from numpy._core._multiarray_umath import (
|
||||
__cpu_features__,
|
||||
__cpu_baseline__,
|
||||
__cpu_dispatch__,
|
||||
)
|
||||
|
||||
__all__ = ["show_config"]
|
||||
_built_with_meson = True
|
||||
|
||||
|
||||
class DisplayModes(Enum):
|
||||
stdout = "stdout"
|
||||
dicts = "dicts"
|
||||
|
||||
|
||||
def _cleanup(d):
|
||||
"""
|
||||
Removes empty values in a `dict` recursively
|
||||
This ensures we remove values that Meson could not provide to CONFIG
|
||||
"""
|
||||
if isinstance(d, dict):
|
||||
return {k: _cleanup(v) for k, v in d.items() if v and _cleanup(v)}
|
||||
else:
|
||||
return d
|
||||
|
||||
|
||||
CONFIG = _cleanup(
|
||||
{
|
||||
"Compilers": {
|
||||
"c": {
|
||||
"name": "msvc",
|
||||
"linker": r"link",
|
||||
"version": "19.29.30159",
|
||||
"commands": r"cl",
|
||||
"args": r"",
|
||||
"linker args": r"",
|
||||
},
|
||||
"cython": {
|
||||
"name": "cython",
|
||||
"linker": r"cython",
|
||||
"version": "3.1.0",
|
||||
"commands": r"cython",
|
||||
"args": r"",
|
||||
"linker args": r"",
|
||||
},
|
||||
"c++": {
|
||||
"name": "msvc",
|
||||
"linker": r"link",
|
||||
"version": "19.29.30159",
|
||||
"commands": r"cl",
|
||||
"args": r"",
|
||||
"linker args": r"",
|
||||
},
|
||||
},
|
||||
"Machine Information": {
|
||||
"host": {
|
||||
"cpu": "x86_64",
|
||||
"family": "x86_64",
|
||||
"endian": "little",
|
||||
"system": "windows",
|
||||
},
|
||||
"build": {
|
||||
"cpu": "x86_64",
|
||||
"family": "x86_64",
|
||||
"endian": "little",
|
||||
"system": "windows",
|
||||
},
|
||||
"cross-compiled": bool("False".lower().replace("false", "")),
|
||||
},
|
||||
"Build Dependencies": {
|
||||
"blas": {
|
||||
"name": "scipy-openblas",
|
||||
"found": bool("True".lower().replace("false", "")),
|
||||
"version": "0.3.29",
|
||||
"detection method": "pkgconfig",
|
||||
"include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-7xf_vzet/cp310-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/include",
|
||||
"lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-7xf_vzet/cp310-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/lib",
|
||||
"openblas configuration": r"OpenBLAS 0.3.29 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=24",
|
||||
"pc file directory": r"C:/a/numpy/numpy/.openblas",
|
||||
},
|
||||
"lapack": {
|
||||
"name": "scipy-openblas",
|
||||
"found": bool("True".lower().replace("false", "")),
|
||||
"version": "0.3.29",
|
||||
"detection method": "pkgconfig",
|
||||
"include directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-7xf_vzet/cp310-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/include",
|
||||
"lib directory": r"C:/Users/runneradmin/AppData/Local/Temp/cibw-run-7xf_vzet/cp310-win_amd64/build/venv/Lib/site-packages/scipy_openblas64/lib",
|
||||
"openblas configuration": r"OpenBLAS 0.3.29 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=24",
|
||||
"pc file directory": r"C:/a/numpy/numpy/.openblas",
|
||||
},
|
||||
},
|
||||
"Python Information": {
|
||||
"path": r"C:\Users\runneradmin\AppData\Local\Temp\build-env-r6iiznem\Scripts\python.exe",
|
||||
"version": "3.10",
|
||||
},
|
||||
"SIMD Extensions": {
|
||||
"baseline": __cpu_baseline__,
|
||||
"found": [
|
||||
feature for feature in __cpu_dispatch__ if __cpu_features__[feature]
|
||||
],
|
||||
"not found": [
|
||||
feature for feature in __cpu_dispatch__ if not __cpu_features__[feature]
|
||||
],
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _check_pyyaml():
|
||||
import yaml
|
||||
|
||||
return yaml
|
||||
|
||||
|
||||
def show(mode=DisplayModes.stdout.value):
|
||||
"""
|
||||
Show libraries and system information on which NumPy was built
|
||||
and is being used
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mode : {`'stdout'`, `'dicts'`}, optional.
|
||||
Indicates how to display the config information.
|
||||
`'stdout'` prints to console, `'dicts'` returns a dictionary
|
||||
of the configuration.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : {`dict`, `None`}
|
||||
If mode is `'dicts'`, a dict is returned, else None
|
||||
|
||||
See Also
|
||||
--------
|
||||
get_include : Returns the directory containing NumPy C
|
||||
header files.
|
||||
|
||||
Notes
|
||||
-----
|
||||
1. The `'stdout'` mode will give more readable
|
||||
output if ``pyyaml`` is installed
|
||||
|
||||
"""
|
||||
if mode == DisplayModes.stdout.value:
|
||||
try: # Non-standard library, check import
|
||||
yaml = _check_pyyaml()
|
||||
|
||||
print(yaml.dump(CONFIG))
|
||||
except ModuleNotFoundError:
|
||||
import warnings
|
||||
import json
|
||||
|
||||
warnings.warn("Install `pyyaml` for better output", stacklevel=1)
|
||||
print(json.dumps(CONFIG, indent=2))
|
||||
elif mode == DisplayModes.dicts.value:
|
||||
return CONFIG
|
||||
else:
|
||||
raise AttributeError(
|
||||
f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}"
|
||||
)
|
||||
|
||||
|
||||
def show_config(mode=DisplayModes.stdout.value):
|
||||
return show(mode)
|
||||
|
||||
|
||||
show_config.__doc__ = show.__doc__
|
||||
show_config.__module__ = "numpy"
|
||||
102
env/Lib/site-packages/numpy/__config__.pyi
vendored
Normal file
102
env/Lib/site-packages/numpy/__config__.pyi
vendored
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
from enum import Enum
|
||||
from types import ModuleType
|
||||
from typing import Final, Literal as L, TypedDict, overload, type_check_only
|
||||
from typing_extensions import NotRequired
|
||||
|
||||
_CompilerConfigDictValue = TypedDict(
|
||||
"_CompilerConfigDictValue",
|
||||
{
|
||||
"name": str,
|
||||
"linker": str,
|
||||
"version": str,
|
||||
"commands": str,
|
||||
"args": str,
|
||||
"linker args": str,
|
||||
},
|
||||
)
|
||||
_CompilerConfigDict = TypedDict(
|
||||
"_CompilerConfigDict",
|
||||
{
|
||||
"c": _CompilerConfigDictValue,
|
||||
"cython": _CompilerConfigDictValue,
|
||||
"c++": _CompilerConfigDictValue,
|
||||
},
|
||||
)
|
||||
_MachineInformationDict = TypedDict(
|
||||
"_MachineInformationDict",
|
||||
{
|
||||
"host":_MachineInformationDictValue,
|
||||
"build": _MachineInformationDictValue,
|
||||
"cross-compiled": NotRequired[L[True]],
|
||||
},
|
||||
)
|
||||
|
||||
@type_check_only
|
||||
class _MachineInformationDictValue(TypedDict):
|
||||
cpu: str
|
||||
family: str
|
||||
endian: L["little", "big"]
|
||||
system: str
|
||||
|
||||
_BuildDependenciesDictValue = TypedDict(
|
||||
"_BuildDependenciesDictValue",
|
||||
{
|
||||
"name": str,
|
||||
"found": NotRequired[L[True]],
|
||||
"version": str,
|
||||
"include directory": str,
|
||||
"lib directory": str,
|
||||
"openblas configuration": str,
|
||||
"pc file directory": str,
|
||||
},
|
||||
)
|
||||
|
||||
class _BuildDependenciesDict(TypedDict):
|
||||
blas: _BuildDependenciesDictValue
|
||||
lapack: _BuildDependenciesDictValue
|
||||
|
||||
class _PythonInformationDict(TypedDict):
|
||||
path: str
|
||||
version: str
|
||||
|
||||
_SIMDExtensionsDict = TypedDict(
|
||||
"_SIMDExtensionsDict",
|
||||
{
|
||||
"baseline": list[str],
|
||||
"found": list[str],
|
||||
"not found": list[str],
|
||||
},
|
||||
)
|
||||
|
||||
_ConfigDict = TypedDict(
|
||||
"_ConfigDict",
|
||||
{
|
||||
"Compilers": _CompilerConfigDict,
|
||||
"Machine Information": _MachineInformationDict,
|
||||
"Build Dependencies": _BuildDependenciesDict,
|
||||
"Python Information": _PythonInformationDict,
|
||||
"SIMD Extensions": _SIMDExtensionsDict,
|
||||
},
|
||||
)
|
||||
|
||||
###
|
||||
|
||||
__all__ = ["show_config"]
|
||||
|
||||
CONFIG: Final[_ConfigDict] = ...
|
||||
|
||||
class DisplayModes(Enum):
|
||||
stdout = "stdout"
|
||||
dicts = "dicts"
|
||||
|
||||
def _check_pyyaml() -> ModuleType: ...
|
||||
|
||||
@overload
|
||||
def show(mode: L["stdout"] = "stdout") -> None: ...
|
||||
@overload
|
||||
def show(mode: L["dicts"]) -> _ConfigDict: ...
|
||||
|
||||
@overload
|
||||
def show_config(mode: L["stdout"] = "stdout") -> None: ...
|
||||
@overload
|
||||
def show_config(mode: L["dicts"]) -> _ConfigDict: ...
|
||||
1250
env/Lib/site-packages/numpy/__init__.cython-30.pxd
vendored
Normal file
1250
env/Lib/site-packages/numpy/__init__.cython-30.pxd
vendored
Normal file
File diff suppressed because it is too large
Load diff
1164
env/Lib/site-packages/numpy/__init__.pxd
vendored
Normal file
1164
env/Lib/site-packages/numpy/__init__.pxd
vendored
Normal file
File diff suppressed because it is too large
Load diff
560
env/Lib/site-packages/numpy/__init__.py
vendored
Normal file
560
env/Lib/site-packages/numpy/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,560 @@
|
|||
"""
|
||||
NumPy
|
||||
=====
|
||||
|
||||
Provides
|
||||
1. An array object of arbitrary homogeneous items
|
||||
2. Fast mathematical operations over arrays
|
||||
3. Linear Algebra, Fourier Transforms, Random Number Generation
|
||||
|
||||
How to use the documentation
|
||||
----------------------------
|
||||
Documentation is available in two forms: docstrings provided
|
||||
with the code, and a loose standing reference guide, available from
|
||||
`the NumPy homepage <https://numpy.org>`_.
|
||||
|
||||
We recommend exploring the docstrings using
|
||||
`IPython <https://ipython.org>`_, an advanced Python shell with
|
||||
TAB-completion and introspection capabilities. See below for further
|
||||
instructions.
|
||||
|
||||
The docstring examples assume that `numpy` has been imported as ``np``::
|
||||
|
||||
>>> import numpy as np
|
||||
|
||||
Code snippets are indicated by three greater-than signs::
|
||||
|
||||
>>> x = 42
|
||||
>>> x = x + 1
|
||||
|
||||
Use the built-in ``help`` function to view a function's docstring::
|
||||
|
||||
>>> help(np.sort)
|
||||
... # doctest: +SKIP
|
||||
|
||||
For some objects, ``np.info(obj)`` may provide additional help. This is
|
||||
particularly true if you see the line "Help on ufunc object:" at the top
|
||||
of the help() page. Ufuncs are implemented in C, not Python, for speed.
|
||||
The native Python help() does not know how to view their help, but our
|
||||
np.info() function does.
|
||||
|
||||
Available subpackages
|
||||
---------------------
|
||||
lib
|
||||
Basic functions used by several sub-packages.
|
||||
random
|
||||
Core Random Tools
|
||||
linalg
|
||||
Core Linear Algebra Tools
|
||||
fft
|
||||
Core FFT routines
|
||||
polynomial
|
||||
Polynomial tools
|
||||
testing
|
||||
NumPy testing tools
|
||||
distutils
|
||||
Enhancements to distutils with support for
|
||||
Fortran compilers support and more (for Python <= 3.11)
|
||||
|
||||
Utilities
|
||||
---------
|
||||
test
|
||||
Run numpy unittests
|
||||
show_config
|
||||
Show numpy build configuration
|
||||
__version__
|
||||
NumPy version string
|
||||
|
||||
Viewing documentation using IPython
|
||||
-----------------------------------
|
||||
|
||||
Start IPython and import `numpy` usually under the alias ``np``: `import
|
||||
numpy as np`. Then, directly past or use the ``%cpaste`` magic to paste
|
||||
examples into the shell. To see which functions are available in `numpy`,
|
||||
type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
|
||||
``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
|
||||
down the list. To view the docstring for a function, use
|
||||
``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
|
||||
the source code).
|
||||
|
||||
Copies vs. in-place operation
|
||||
-----------------------------
|
||||
Most of the functions in `numpy` return a copy of the array argument
|
||||
(e.g., `np.sort`). In-place versions of these functions are often
|
||||
available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
|
||||
Exceptions to this rule are documented.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
# start delvewheel patch
|
||||
def _delvewheel_patch_1_10_1():
|
||||
import os
|
||||
if os.path.isdir(libs_dir := os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'numpy.libs'))):
|
||||
os.add_dll_directory(libs_dir)
|
||||
|
||||
|
||||
_delvewheel_patch_1_10_1()
|
||||
del _delvewheel_patch_1_10_1
|
||||
# end delvewheel patch
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from ._globals import _NoValue, _CopyMode
|
||||
from ._expired_attrs_2_0 import __expired_attributes__
|
||||
|
||||
|
||||
# If a version with git hash was stored, use that instead
|
||||
from . import version
|
||||
from .version import __version__
|
||||
|
||||
# We first need to detect if we're being called as part of the numpy setup
|
||||
# procedure itself in a reliable manner.
|
||||
try:
|
||||
__NUMPY_SETUP__
|
||||
except NameError:
|
||||
__NUMPY_SETUP__ = False
|
||||
|
||||
if __NUMPY_SETUP__:
|
||||
sys.stderr.write('Running from numpy source directory.\n')
|
||||
else:
|
||||
# Allow distributors to run custom init code before importing numpy._core
|
||||
from . import _distributor_init
|
||||
|
||||
try:
|
||||
from numpy.__config__ import show_config
|
||||
except ImportError as e:
|
||||
msg = """Error importing numpy: you should not try to import numpy from
|
||||
its source directory; please exit the numpy source tree, and relaunch
|
||||
your python interpreter from there."""
|
||||
raise ImportError(msg) from e
|
||||
|
||||
from . import _core
|
||||
from ._core import (
|
||||
False_, ScalarType, True_,
|
||||
abs, absolute, acos, acosh, add, all, allclose,
|
||||
amax, amin, any, arange, arccos, arccosh, arcsin, arcsinh,
|
||||
arctan, arctan2, arctanh, argmax, argmin, argpartition, argsort,
|
||||
argwhere, around, array, array2string, array_equal, array_equiv,
|
||||
array_repr, array_str, asanyarray, asarray, ascontiguousarray,
|
||||
asfortranarray, asin, asinh, atan, atanh, atan2, astype, atleast_1d,
|
||||
atleast_2d, atleast_3d, base_repr, binary_repr, bitwise_and,
|
||||
bitwise_count, bitwise_invert, bitwise_left_shift, bitwise_not,
|
||||
bitwise_or, bitwise_right_shift, bitwise_xor, block, bool, bool_,
|
||||
broadcast, busday_count, busday_offset, busdaycalendar, byte, bytes_,
|
||||
can_cast, cbrt, cdouble, ceil, character, choose, clip, clongdouble,
|
||||
complex128, complex64, complexfloating, compress, concat, concatenate,
|
||||
conj, conjugate, convolve, copysign, copyto, correlate, cos, cosh,
|
||||
count_nonzero, cross, csingle, cumprod, cumsum, cumulative_prod,
|
||||
cumulative_sum, datetime64, datetime_as_string, datetime_data,
|
||||
deg2rad, degrees, diagonal, divide, divmod, dot, double, dtype, e,
|
||||
einsum, einsum_path, empty, empty_like, equal, errstate, euler_gamma,
|
||||
exp, exp2, expm1, fabs, finfo, flatiter, flatnonzero, flexible,
|
||||
float16, float32, float64, float_power, floating, floor, floor_divide,
|
||||
fmax, fmin, fmod, format_float_positional, format_float_scientific,
|
||||
frexp, from_dlpack, frombuffer, fromfile, fromfunction, fromiter,
|
||||
frompyfunc, fromstring, full, full_like, gcd, generic, geomspace,
|
||||
get_printoptions, getbufsize, geterr, geterrcall, greater,
|
||||
greater_equal, half, heaviside, hstack, hypot, identity, iinfo,
|
||||
indices, inexact, inf, inner, int16, int32, int64, int8, int_, intc,
|
||||
integer, intp, invert, is_busday, isclose, isdtype, isfinite,
|
||||
isfortran, isinf, isnan, isnat, isscalar, issubdtype, lcm, ldexp,
|
||||
left_shift, less, less_equal, lexsort, linspace, little_endian, log,
|
||||
log10, log1p, log2, logaddexp, logaddexp2, logical_and, logical_not,
|
||||
logical_or, logical_xor, logspace, long, longdouble, longlong, matmul,
|
||||
matvec, matrix_transpose, max, maximum, may_share_memory, mean, memmap,
|
||||
min, min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan,
|
||||
ndarray, ndim, nditer, negative, nested_iters, newaxis, nextafter,
|
||||
nonzero, not_equal, number, object_, ones, ones_like, outer, partition,
|
||||
permute_dims, pi, positive, pow, power, printoptions, prod,
|
||||
promote_types, ptp, put, putmask, rad2deg, radians, ravel, recarray,
|
||||
reciprocal, record, remainder, repeat, require, reshape, resize,
|
||||
result_type, right_shift, rint, roll, rollaxis, round, sctypeDict,
|
||||
searchsorted, set_printoptions, setbufsize, seterr, seterrcall, shape,
|
||||
shares_memory, short, sign, signbit, signedinteger, sin, single, sinh,
|
||||
size, sort, spacing, sqrt, square, squeeze, stack, std,
|
||||
str_, subtract, sum, swapaxes, take, tan, tanh, tensordot,
|
||||
timedelta64, trace, transpose, true_divide, trunc, typecodes, ubyte,
|
||||
ufunc, uint, uint16, uint32, uint64, uint8, uintc, uintp, ulong,
|
||||
ulonglong, unsignedinteger, unstack, ushort, var, vdot, vecdot,
|
||||
vecmat, void, vstack, where, zeros, zeros_like
|
||||
)
|
||||
|
||||
# NOTE: It's still under discussion whether these aliases
|
||||
# should be removed.
|
||||
for ta in ["float96", "float128", "complex192", "complex256"]:
|
||||
try:
|
||||
globals()[ta] = getattr(_core, ta)
|
||||
except AttributeError:
|
||||
pass
|
||||
del ta
|
||||
|
||||
from . import lib
|
||||
from .lib import scimath as emath
|
||||
from .lib._histograms_impl import (
|
||||
histogram, histogram_bin_edges, histogramdd
|
||||
)
|
||||
from .lib._nanfunctions_impl import (
|
||||
nanargmax, nanargmin, nancumprod, nancumsum, nanmax, nanmean,
|
||||
nanmedian, nanmin, nanpercentile, nanprod, nanquantile, nanstd,
|
||||
nansum, nanvar
|
||||
)
|
||||
from .lib._function_base_impl import (
|
||||
select, piecewise, trim_zeros, copy, iterable, percentile, diff,
|
||||
gradient, angle, unwrap, sort_complex, flip, rot90, extract, place,
|
||||
vectorize, asarray_chkfinite, average, bincount, digitize, cov,
|
||||
corrcoef, median, sinc, hamming, hanning, bartlett, blackman,
|
||||
kaiser, trapezoid, trapz, i0, meshgrid, delete, insert, append,
|
||||
interp, quantile
|
||||
)
|
||||
from .lib._twodim_base_impl import (
|
||||
diag, diagflat, eye, fliplr, flipud, tri, triu, tril, vander,
|
||||
histogram2d, mask_indices, tril_indices, tril_indices_from,
|
||||
triu_indices, triu_indices_from
|
||||
)
|
||||
from .lib._shape_base_impl import (
|
||||
apply_over_axes, apply_along_axis, array_split, column_stack, dsplit,
|
||||
dstack, expand_dims, hsplit, kron, put_along_axis, row_stack, split,
|
||||
take_along_axis, tile, vsplit
|
||||
)
|
||||
from .lib._type_check_impl import (
|
||||
iscomplexobj, isrealobj, imag, iscomplex, isreal, nan_to_num, real,
|
||||
real_if_close, typename, mintypecode, common_type
|
||||
)
|
||||
from .lib._arraysetops_impl import (
|
||||
ediff1d, in1d, intersect1d, isin, setdiff1d, setxor1d, union1d,
|
||||
unique, unique_all, unique_counts, unique_inverse, unique_values
|
||||
)
|
||||
from .lib._ufunclike_impl import fix, isneginf, isposinf
|
||||
from .lib._arraypad_impl import pad
|
||||
from .lib._utils_impl import (
|
||||
show_runtime, get_include, info
|
||||
)
|
||||
from .lib._stride_tricks_impl import (
|
||||
broadcast_arrays, broadcast_shapes, broadcast_to
|
||||
)
|
||||
from .lib._polynomial_impl import (
|
||||
poly, polyint, polyder, polyadd, polysub, polymul, polydiv, polyval,
|
||||
polyfit, poly1d, roots
|
||||
)
|
||||
from .lib._npyio_impl import (
|
||||
savetxt, loadtxt, genfromtxt, load, save, savez, packbits,
|
||||
savez_compressed, unpackbits, fromregex
|
||||
)
|
||||
from .lib._index_tricks_impl import (
|
||||
diag_indices_from, diag_indices, fill_diagonal, ndindex, ndenumerate,
|
||||
ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index,
|
||||
index_exp
|
||||
)
|
||||
|
||||
from . import matrixlib as _mat
|
||||
from .matrixlib import (
|
||||
asmatrix, bmat, matrix
|
||||
)
|
||||
|
||||
# public submodules are imported lazily, therefore are accessible from
|
||||
# __getattr__. Note that `distutils` (deprecated) and `array_api`
|
||||
# (experimental label) are not added here, because `from numpy import *`
|
||||
# must not raise any warnings - that's too disruptive.
|
||||
__numpy_submodules__ = {
|
||||
"linalg", "fft", "dtypes", "random", "polynomial", "ma",
|
||||
"exceptions", "lib", "ctypeslib", "testing", "typing",
|
||||
"f2py", "test", "rec", "char", "core", "strings",
|
||||
}
|
||||
|
||||
# We build warning messages for former attributes
|
||||
_msg = (
|
||||
"module 'numpy' has no attribute '{n}'.\n"
|
||||
"`np.{n}` was a deprecated alias for the builtin `{n}`. "
|
||||
"To avoid this error in existing code, use `{n}` by itself. "
|
||||
"Doing this will not modify any behavior and is safe. {extended_msg}\n"
|
||||
"The aliases was originally deprecated in NumPy 1.20; for more "
|
||||
"details and guidance see the original release note at:\n"
|
||||
" https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations")
|
||||
|
||||
_specific_msg = (
|
||||
"If you specifically wanted the numpy scalar type, use `np.{}` here.")
|
||||
|
||||
_int_extended_msg = (
|
||||
"When replacing `np.{}`, you may wish to use e.g. `np.int64` "
|
||||
"or `np.int32` to specify the precision. If you wish to review "
|
||||
"your current use, check the release note link for "
|
||||
"additional information.")
|
||||
|
||||
_type_info = [
|
||||
("object", ""), # The NumPy scalar only exists by name.
|
||||
("float", _specific_msg.format("float64")),
|
||||
("complex", _specific_msg.format("complex128")),
|
||||
("str", _specific_msg.format("str_")),
|
||||
("int", _int_extended_msg.format("int"))]
|
||||
|
||||
__former_attrs__ = {
|
||||
n: _msg.format(n=n, extended_msg=extended_msg)
|
||||
for n, extended_msg in _type_info
|
||||
}
|
||||
|
||||
|
||||
# Some of these could be defined right away, but most were aliases to
|
||||
# the Python objects and only removed in NumPy 1.24. Defining them should
|
||||
# probably wait for NumPy 1.26 or 2.0.
|
||||
# When defined, these should possibly not be added to `__all__` to avoid
|
||||
# import with `from numpy import *`.
|
||||
__future_scalars__ = {"str", "bytes", "object"}
|
||||
|
||||
__array_api_version__ = "2023.12"
|
||||
|
||||
from ._array_api_info import __array_namespace_info__
|
||||
|
||||
# now that numpy core module is imported, can initialize limits
|
||||
_core.getlimits._register_known_types()
|
||||
|
||||
__all__ = list(
|
||||
__numpy_submodules__ |
|
||||
set(_core.__all__) |
|
||||
set(_mat.__all__) |
|
||||
set(lib._histograms_impl.__all__) |
|
||||
set(lib._nanfunctions_impl.__all__) |
|
||||
set(lib._function_base_impl.__all__) |
|
||||
set(lib._twodim_base_impl.__all__) |
|
||||
set(lib._shape_base_impl.__all__) |
|
||||
set(lib._type_check_impl.__all__) |
|
||||
set(lib._arraysetops_impl.__all__) |
|
||||
set(lib._ufunclike_impl.__all__) |
|
||||
set(lib._arraypad_impl.__all__) |
|
||||
set(lib._utils_impl.__all__) |
|
||||
set(lib._stride_tricks_impl.__all__) |
|
||||
set(lib._polynomial_impl.__all__) |
|
||||
set(lib._npyio_impl.__all__) |
|
||||
set(lib._index_tricks_impl.__all__) |
|
||||
{"emath", "show_config", "__version__", "__array_namespace_info__"}
|
||||
)
|
||||
|
||||
# Filter out Cython harmless warnings
|
||||
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
|
||||
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
|
||||
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
|
||||
|
||||
def __getattr__(attr):
|
||||
# Warn for expired attributes
|
||||
import warnings
|
||||
|
||||
if attr == "linalg":
|
||||
import numpy.linalg as linalg
|
||||
return linalg
|
||||
elif attr == "fft":
|
||||
import numpy.fft as fft
|
||||
return fft
|
||||
elif attr == "dtypes":
|
||||
import numpy.dtypes as dtypes
|
||||
return dtypes
|
||||
elif attr == "random":
|
||||
import numpy.random as random
|
||||
return random
|
||||
elif attr == "polynomial":
|
||||
import numpy.polynomial as polynomial
|
||||
return polynomial
|
||||
elif attr == "ma":
|
||||
import numpy.ma as ma
|
||||
return ma
|
||||
elif attr == "ctypeslib":
|
||||
import numpy.ctypeslib as ctypeslib
|
||||
return ctypeslib
|
||||
elif attr == "exceptions":
|
||||
import numpy.exceptions as exceptions
|
||||
return exceptions
|
||||
elif attr == "testing":
|
||||
import numpy.testing as testing
|
||||
return testing
|
||||
elif attr == "matlib":
|
||||
import numpy.matlib as matlib
|
||||
return matlib
|
||||
elif attr == "f2py":
|
||||
import numpy.f2py as f2py
|
||||
return f2py
|
||||
elif attr == "typing":
|
||||
import numpy.typing as typing
|
||||
return typing
|
||||
elif attr == "rec":
|
||||
import numpy.rec as rec
|
||||
return rec
|
||||
elif attr == "char":
|
||||
import numpy.char as char
|
||||
return char
|
||||
elif attr == "array_api":
|
||||
raise AttributeError("`numpy.array_api` is not available from "
|
||||
"numpy 2.0 onwards", name=None)
|
||||
elif attr == "core":
|
||||
import numpy.core as core
|
||||
return core
|
||||
elif attr == "strings":
|
||||
import numpy.strings as strings
|
||||
return strings
|
||||
elif attr == "distutils":
|
||||
if 'distutils' in __numpy_submodules__:
|
||||
import numpy.distutils as distutils
|
||||
return distutils
|
||||
else:
|
||||
raise AttributeError("`numpy.distutils` is not available from "
|
||||
"Python 3.12 onwards", name=None)
|
||||
|
||||
if attr in __future_scalars__:
|
||||
# And future warnings for those that will change, but also give
|
||||
# the AttributeError
|
||||
warnings.warn(
|
||||
f"In the future `np.{attr}` will be defined as the "
|
||||
"corresponding NumPy scalar.", FutureWarning, stacklevel=2)
|
||||
|
||||
if attr in __former_attrs__:
|
||||
raise AttributeError(__former_attrs__[attr], name=None)
|
||||
|
||||
if attr in __expired_attributes__:
|
||||
raise AttributeError(
|
||||
f"`np.{attr}` was removed in the NumPy 2.0 release. "
|
||||
f"{__expired_attributes__[attr]}",
|
||||
name=None
|
||||
)
|
||||
|
||||
if attr == "chararray":
|
||||
warnings.warn(
|
||||
"`np.chararray` is deprecated and will be removed from "
|
||||
"the main namespace in the future. Use an array with a string "
|
||||
"or bytes dtype instead.", DeprecationWarning, stacklevel=2)
|
||||
import numpy.char as char
|
||||
return char.chararray
|
||||
|
||||
raise AttributeError("module {!r} has no attribute "
|
||||
"{!r}".format(__name__, attr))
|
||||
|
||||
def __dir__():
|
||||
public_symbols = (
|
||||
globals().keys() | __numpy_submodules__
|
||||
)
|
||||
public_symbols -= {
|
||||
"matrixlib", "matlib", "tests", "conftest", "version",
|
||||
"compat", "distutils", "array_api"
|
||||
}
|
||||
return list(public_symbols)
|
||||
|
||||
# Pytest testing
|
||||
from numpy._pytesttester import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
|
||||
def _sanity_check():
|
||||
"""
|
||||
Quick sanity checks for common bugs caused by environment.
|
||||
There are some cases e.g. with wrong BLAS ABI that cause wrong
|
||||
results under specific runtime conditions that are not necessarily
|
||||
achieved during test suite runs, and it is useful to catch those early.
|
||||
|
||||
See https://github.com/numpy/numpy/issues/8577 and other
|
||||
similar bug reports.
|
||||
|
||||
"""
|
||||
try:
|
||||
x = ones(2, dtype=float32)
|
||||
if not abs(x.dot(x) - float32(2.0)) < 1e-5:
|
||||
raise AssertionError
|
||||
except AssertionError:
|
||||
msg = ("The current Numpy installation ({!r}) fails to "
|
||||
"pass simple sanity checks. This can be caused for example "
|
||||
"by incorrect BLAS library being linked in, or by mixing "
|
||||
"package managers (pip, conda, apt, ...). Search closed "
|
||||
"numpy issues for similar problems.")
|
||||
raise RuntimeError(msg.format(__file__)) from None
|
||||
|
||||
_sanity_check()
|
||||
del _sanity_check
|
||||
|
||||
def _mac_os_check():
|
||||
"""
|
||||
Quick Sanity check for Mac OS look for accelerate build bugs.
|
||||
Testing numpy polyfit calls init_dgelsd(LAPACK)
|
||||
"""
|
||||
try:
|
||||
c = array([3., 2., 1.])
|
||||
x = linspace(0, 2, 5)
|
||||
y = polyval(c, x)
|
||||
_ = polyfit(x, y, 2, cov=True)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if sys.platform == "darwin":
|
||||
from . import exceptions
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
_mac_os_check()
|
||||
# Throw runtime error, if the test failed Check for warning and error_message
|
||||
if len(w) > 0:
|
||||
for _wn in w:
|
||||
if _wn.category is exceptions.RankWarning:
|
||||
# Ignore other warnings, they may not be relevant (see gh-25433).
|
||||
error_message = (
|
||||
f"{_wn.category.__name__}: {_wn.message}"
|
||||
)
|
||||
msg = (
|
||||
"Polyfit sanity test emitted a warning, most likely due "
|
||||
"to using a buggy Accelerate backend."
|
||||
"\nIf you compiled yourself, more information is available at:"
|
||||
"\nhttps://numpy.org/devdocs/building/index.html"
|
||||
"\nOtherwise report this to the vendor "
|
||||
"that provided NumPy.\n\n{}\n".format(error_message))
|
||||
raise RuntimeError(msg)
|
||||
del _wn
|
||||
del w
|
||||
del _mac_os_check
|
||||
|
||||
def hugepage_setup():
|
||||
"""
|
||||
We usually use madvise hugepages support, but on some old kernels it
|
||||
is slow and thus better avoided. Specifically kernel version 4.6
|
||||
had a bug fix which probably fixed this:
|
||||
https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff
|
||||
"""
|
||||
use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None)
|
||||
if sys.platform == "linux" and use_hugepage is None:
|
||||
# If there is an issue with parsing the kernel version,
|
||||
# set use_hugepage to 0. Usage of LooseVersion will handle
|
||||
# the kernel version parsing better, but avoided since it
|
||||
# will increase the import time.
|
||||
# See: #16679 for related discussion.
|
||||
try:
|
||||
use_hugepage = 1
|
||||
kernel_version = os.uname().release.split(".")[:2]
|
||||
kernel_version = tuple(int(v) for v in kernel_version)
|
||||
if kernel_version < (4, 6):
|
||||
use_hugepage = 0
|
||||
except ValueError:
|
||||
use_hugepage = 0
|
||||
elif use_hugepage is None:
|
||||
# This is not Linux, so it should not matter, just enable anyway
|
||||
use_hugepage = 1
|
||||
else:
|
||||
use_hugepage = int(use_hugepage)
|
||||
return use_hugepage
|
||||
|
||||
# Note that this will currently only make a difference on Linux
|
||||
_core.multiarray._set_madvise_hugepage(hugepage_setup())
|
||||
del hugepage_setup
|
||||
|
||||
# Give a warning if NumPy is reloaded or imported on a sub-interpreter
|
||||
# We do this from python, since the C-module may not be reloaded and
|
||||
# it is tidier organized.
|
||||
_core.multiarray._multiarray_umath._reload_guard()
|
||||
|
||||
# TODO: Remove the environment variable entirely now that it is "weak"
|
||||
if (os.environ.get("NPY_PROMOTION_STATE", "weak") != "weak"):
|
||||
warnings.warn(
|
||||
"NPY_PROMOTION_STATE was a temporary feature for NumPy 2.0 "
|
||||
"transition and is ignored after NumPy 2.2.",
|
||||
UserWarning, stacklevel=2)
|
||||
|
||||
# Tell PyInstaller where to find hook-numpy.py
|
||||
def _pyinstaller_hooks_dir():
|
||||
from pathlib import Path
|
||||
return [str(Path(__file__).with_name("_pyinstaller").resolve())]
|
||||
|
||||
|
||||
# Remove symbols imported for internal use
|
||||
del os, sys, warnings
|
||||
5419
env/Lib/site-packages/numpy/__init__.pyi
vendored
Normal file
5419
env/Lib/site-packages/numpy/__init__.pyi
vendored
Normal file
File diff suppressed because it is too large
Load diff
BIN
env/Lib/site-packages/numpy/__pycache__/__config__.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/__config__.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/__init__.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/__init__.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/_array_api_info.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/_array_api_info.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/_configtool.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/_configtool.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/_distributor_init.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/_distributor_init.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/_globals.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/_globals.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/_pytesttester.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/_pytesttester.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/conftest.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/conftest.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/ctypeslib.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/ctypeslib.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/dtypes.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/dtypes.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/exceptions.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/exceptions.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/matlib.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/matlib.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/__pycache__/version.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/__pycache__/version.cpython-310.pyc
vendored
Normal file
Binary file not shown.
346
env/Lib/site-packages/numpy/_array_api_info.py
vendored
Normal file
346
env/Lib/site-packages/numpy/_array_api_info.py
vendored
Normal file
|
|
@ -0,0 +1,346 @@
|
|||
"""
|
||||
Array API Inspection namespace
|
||||
|
||||
This is the namespace for inspection functions as defined by the array API
|
||||
standard. See
|
||||
https://data-apis.org/array-api/latest/API_specification/inspection.html for
|
||||
more details.
|
||||
|
||||
"""
|
||||
from numpy._core import (
|
||||
dtype,
|
||||
bool,
|
||||
intp,
|
||||
int8,
|
||||
int16,
|
||||
int32,
|
||||
int64,
|
||||
uint8,
|
||||
uint16,
|
||||
uint32,
|
||||
uint64,
|
||||
float32,
|
||||
float64,
|
||||
complex64,
|
||||
complex128,
|
||||
)
|
||||
|
||||
|
||||
class __array_namespace_info__:
|
||||
"""
|
||||
Get the array API inspection namespace for NumPy.
|
||||
|
||||
The array API inspection namespace defines the following functions:
|
||||
|
||||
- capabilities()
|
||||
- default_device()
|
||||
- default_dtypes()
|
||||
- dtypes()
|
||||
- devices()
|
||||
|
||||
See
|
||||
https://data-apis.org/array-api/latest/API_specification/inspection.html
|
||||
for more details.
|
||||
|
||||
Returns
|
||||
-------
|
||||
info : ModuleType
|
||||
The array API inspection namespace for NumPy.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> info = np.__array_namespace_info__()
|
||||
>>> info.default_dtypes()
|
||||
{'real floating': numpy.float64,
|
||||
'complex floating': numpy.complex128,
|
||||
'integral': numpy.int64,
|
||||
'indexing': numpy.int64}
|
||||
|
||||
"""
|
||||
|
||||
__module__ = 'numpy'
|
||||
|
||||
def capabilities(self):
|
||||
"""
|
||||
Return a dictionary of array API library capabilities.
|
||||
|
||||
The resulting dictionary has the following keys:
|
||||
|
||||
- **"boolean indexing"**: boolean indicating whether an array library
|
||||
supports boolean indexing. Always ``True`` for NumPy.
|
||||
|
||||
- **"data-dependent shapes"**: boolean indicating whether an array
|
||||
library supports data-dependent output shapes. Always ``True`` for
|
||||
NumPy.
|
||||
|
||||
See
|
||||
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
|
||||
for more details.
|
||||
|
||||
See Also
|
||||
--------
|
||||
__array_namespace_info__.default_device,
|
||||
__array_namespace_info__.default_dtypes,
|
||||
__array_namespace_info__.dtypes,
|
||||
__array_namespace_info__.devices
|
||||
|
||||
Returns
|
||||
-------
|
||||
capabilities : dict
|
||||
A dictionary of array API library capabilities.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> info = np.__array_namespace_info__()
|
||||
>>> info.capabilities()
|
||||
{'boolean indexing': True,
|
||||
'data-dependent shapes': True}
|
||||
|
||||
"""
|
||||
return {
|
||||
"boolean indexing": True,
|
||||
"data-dependent shapes": True,
|
||||
# 'max rank' will be part of the 2024.12 standard
|
||||
# "max rank": 64,
|
||||
}
|
||||
|
||||
def default_device(self):
|
||||
"""
|
||||
The default device used for new NumPy arrays.
|
||||
|
||||
For NumPy, this always returns ``'cpu'``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
__array_namespace_info__.capabilities,
|
||||
__array_namespace_info__.default_dtypes,
|
||||
__array_namespace_info__.dtypes,
|
||||
__array_namespace_info__.devices
|
||||
|
||||
Returns
|
||||
-------
|
||||
device : str
|
||||
The default device used for new NumPy arrays.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> info = np.__array_namespace_info__()
|
||||
>>> info.default_device()
|
||||
'cpu'
|
||||
|
||||
"""
|
||||
return "cpu"
|
||||
|
||||
def default_dtypes(self, *, device=None):
|
||||
"""
|
||||
The default data types used for new NumPy arrays.
|
||||
|
||||
For NumPy, this always returns the following dictionary:
|
||||
|
||||
- **"real floating"**: ``numpy.float64``
|
||||
- **"complex floating"**: ``numpy.complex128``
|
||||
- **"integral"**: ``numpy.intp``
|
||||
- **"indexing"**: ``numpy.intp``
|
||||
|
||||
Parameters
|
||||
----------
|
||||
device : str, optional
|
||||
The device to get the default data types for. For NumPy, only
|
||||
``'cpu'`` is allowed.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dtypes : dict
|
||||
A dictionary describing the default data types used for new NumPy
|
||||
arrays.
|
||||
|
||||
See Also
|
||||
--------
|
||||
__array_namespace_info__.capabilities,
|
||||
__array_namespace_info__.default_device,
|
||||
__array_namespace_info__.dtypes,
|
||||
__array_namespace_info__.devices
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> info = np.__array_namespace_info__()
|
||||
>>> info.default_dtypes()
|
||||
{'real floating': numpy.float64,
|
||||
'complex floating': numpy.complex128,
|
||||
'integral': numpy.int64,
|
||||
'indexing': numpy.int64}
|
||||
|
||||
"""
|
||||
if device not in ["cpu", None]:
|
||||
raise ValueError(
|
||||
'Device not understood. Only "cpu" is allowed, but received:'
|
||||
f' {device}'
|
||||
)
|
||||
return {
|
||||
"real floating": dtype(float64),
|
||||
"complex floating": dtype(complex128),
|
||||
"integral": dtype(intp),
|
||||
"indexing": dtype(intp),
|
||||
}
|
||||
|
||||
def dtypes(self, *, device=None, kind=None):
|
||||
"""
|
||||
The array API data types supported by NumPy.
|
||||
|
||||
Note that this function only returns data types that are defined by
|
||||
the array API.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
device : str, optional
|
||||
The device to get the data types for. For NumPy, only ``'cpu'`` is
|
||||
allowed.
|
||||
kind : str or tuple of str, optional
|
||||
The kind of data types to return. If ``None``, all data types are
|
||||
returned. If a string, only data types of that kind are returned.
|
||||
If a tuple, a dictionary containing the union of the given kinds
|
||||
is returned. The following kinds are supported:
|
||||
|
||||
- ``'bool'``: boolean data types (i.e., ``bool``).
|
||||
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
|
||||
``int16``, ``int32``, ``int64``).
|
||||
- ``'unsigned integer'``: unsigned integer data types (i.e.,
|
||||
``uint8``, ``uint16``, ``uint32``, ``uint64``).
|
||||
- ``'integral'``: integer data types. Shorthand for ``('signed
|
||||
integer', 'unsigned integer')``.
|
||||
- ``'real floating'``: real-valued floating-point data types
|
||||
(i.e., ``float32``, ``float64``).
|
||||
- ``'complex floating'``: complex floating-point data types (i.e.,
|
||||
``complex64``, ``complex128``).
|
||||
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
|
||||
'real floating', 'complex floating')``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dtypes : dict
|
||||
A dictionary mapping the names of data types to the corresponding
|
||||
NumPy data types.
|
||||
|
||||
See Also
|
||||
--------
|
||||
__array_namespace_info__.capabilities,
|
||||
__array_namespace_info__.default_device,
|
||||
__array_namespace_info__.default_dtypes,
|
||||
__array_namespace_info__.devices
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> info = np.__array_namespace_info__()
|
||||
>>> info.dtypes(kind='signed integer')
|
||||
{'int8': numpy.int8,
|
||||
'int16': numpy.int16,
|
||||
'int32': numpy.int32,
|
||||
'int64': numpy.int64}
|
||||
|
||||
"""
|
||||
if device not in ["cpu", None]:
|
||||
raise ValueError(
|
||||
'Device not understood. Only "cpu" is allowed, but received:'
|
||||
f' {device}'
|
||||
)
|
||||
if kind is None:
|
||||
return {
|
||||
"bool": dtype(bool),
|
||||
"int8": dtype(int8),
|
||||
"int16": dtype(int16),
|
||||
"int32": dtype(int32),
|
||||
"int64": dtype(int64),
|
||||
"uint8": dtype(uint8),
|
||||
"uint16": dtype(uint16),
|
||||
"uint32": dtype(uint32),
|
||||
"uint64": dtype(uint64),
|
||||
"float32": dtype(float32),
|
||||
"float64": dtype(float64),
|
||||
"complex64": dtype(complex64),
|
||||
"complex128": dtype(complex128),
|
||||
}
|
||||
if kind == "bool":
|
||||
return {"bool": bool}
|
||||
if kind == "signed integer":
|
||||
return {
|
||||
"int8": dtype(int8),
|
||||
"int16": dtype(int16),
|
||||
"int32": dtype(int32),
|
||||
"int64": dtype(int64),
|
||||
}
|
||||
if kind == "unsigned integer":
|
||||
return {
|
||||
"uint8": dtype(uint8),
|
||||
"uint16": dtype(uint16),
|
||||
"uint32": dtype(uint32),
|
||||
"uint64": dtype(uint64),
|
||||
}
|
||||
if kind == "integral":
|
||||
return {
|
||||
"int8": dtype(int8),
|
||||
"int16": dtype(int16),
|
||||
"int32": dtype(int32),
|
||||
"int64": dtype(int64),
|
||||
"uint8": dtype(uint8),
|
||||
"uint16": dtype(uint16),
|
||||
"uint32": dtype(uint32),
|
||||
"uint64": dtype(uint64),
|
||||
}
|
||||
if kind == "real floating":
|
||||
return {
|
||||
"float32": dtype(float32),
|
||||
"float64": dtype(float64),
|
||||
}
|
||||
if kind == "complex floating":
|
||||
return {
|
||||
"complex64": dtype(complex64),
|
||||
"complex128": dtype(complex128),
|
||||
}
|
||||
if kind == "numeric":
|
||||
return {
|
||||
"int8": dtype(int8),
|
||||
"int16": dtype(int16),
|
||||
"int32": dtype(int32),
|
||||
"int64": dtype(int64),
|
||||
"uint8": dtype(uint8),
|
||||
"uint16": dtype(uint16),
|
||||
"uint32": dtype(uint32),
|
||||
"uint64": dtype(uint64),
|
||||
"float32": dtype(float32),
|
||||
"float64": dtype(float64),
|
||||
"complex64": dtype(complex64),
|
||||
"complex128": dtype(complex128),
|
||||
}
|
||||
if isinstance(kind, tuple):
|
||||
res = {}
|
||||
for k in kind:
|
||||
res.update(self.dtypes(kind=k))
|
||||
return res
|
||||
raise ValueError(f"unsupported kind: {kind!r}")
|
||||
|
||||
def devices(self):
|
||||
"""
|
||||
The devices supported by NumPy.
|
||||
|
||||
For NumPy, this always returns ``['cpu']``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
devices : list of str
|
||||
The devices supported by NumPy.
|
||||
|
||||
See Also
|
||||
--------
|
||||
__array_namespace_info__.capabilities,
|
||||
__array_namespace_info__.default_device,
|
||||
__array_namespace_info__.default_dtypes,
|
||||
__array_namespace_info__.dtypes
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> info = np.__array_namespace_info__()
|
||||
>>> info.devices()
|
||||
['cpu']
|
||||
|
||||
"""
|
||||
return ["cpu"]
|
||||
210
env/Lib/site-packages/numpy/_array_api_info.pyi
vendored
Normal file
210
env/Lib/site-packages/numpy/_array_api_info.pyi
vendored
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
from typing import (
|
||||
ClassVar,
|
||||
Literal,
|
||||
TypeAlias,
|
||||
TypedDict,
|
||||
TypeVar,
|
||||
final,
|
||||
overload,
|
||||
type_check_only,
|
||||
)
|
||||
from typing_extensions import Never
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
_Device: TypeAlias = Literal["cpu"]
|
||||
_DeviceLike: TypeAlias = None | _Device
|
||||
|
||||
_Capabilities = TypedDict(
|
||||
"_Capabilities",
|
||||
{
|
||||
"boolean indexing": Literal[True],
|
||||
"data-dependent shapes": Literal[True],
|
||||
},
|
||||
)
|
||||
|
||||
_DefaultDTypes = TypedDict(
|
||||
"_DefaultDTypes",
|
||||
{
|
||||
"real floating": np.dtype[np.float64],
|
||||
"complex floating": np.dtype[np.complex128],
|
||||
"integral": np.dtype[np.intp],
|
||||
"indexing": np.dtype[np.intp],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
_KindBool: TypeAlias = Literal["bool"]
|
||||
_KindInt: TypeAlias = Literal["signed integer"]
|
||||
_KindUInt: TypeAlias = Literal["unsigned integer"]
|
||||
_KindInteger: TypeAlias = Literal["integral"]
|
||||
_KindFloat: TypeAlias = Literal["real floating"]
|
||||
_KindComplex: TypeAlias = Literal["complex floating"]
|
||||
_KindNumber: TypeAlias = Literal["numeric"]
|
||||
_Kind: TypeAlias = (
|
||||
_KindBool
|
||||
| _KindInt
|
||||
| _KindUInt
|
||||
| _KindInteger
|
||||
| _KindFloat
|
||||
| _KindComplex
|
||||
| _KindNumber
|
||||
)
|
||||
|
||||
|
||||
_T1 = TypeVar("_T1")
|
||||
_T2 = TypeVar("_T2")
|
||||
_T3 = TypeVar("_T3")
|
||||
_Permute1: TypeAlias = _T1 | tuple[_T1]
|
||||
_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1]
|
||||
_Permute3: TypeAlias = (
|
||||
tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2]
|
||||
| tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1]
|
||||
| tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1]
|
||||
)
|
||||
|
||||
@type_check_only
|
||||
class _DTypesBool(TypedDict):
|
||||
bool: np.dtype[np.bool]
|
||||
|
||||
@type_check_only
|
||||
class _DTypesInt(TypedDict):
|
||||
int8: np.dtype[np.int8]
|
||||
int16: np.dtype[np.int16]
|
||||
int32: np.dtype[np.int32]
|
||||
int64: np.dtype[np.int64]
|
||||
|
||||
@type_check_only
|
||||
class _DTypesUInt(TypedDict):
|
||||
uint8: np.dtype[np.uint8]
|
||||
uint16: np.dtype[np.uint16]
|
||||
uint32: np.dtype[np.uint32]
|
||||
uint64: np.dtype[np.uint64]
|
||||
|
||||
@type_check_only
|
||||
class _DTypesInteger(_DTypesInt, _DTypesUInt): ...
|
||||
|
||||
@type_check_only
|
||||
class _DTypesFloat(TypedDict):
|
||||
float32: np.dtype[np.float32]
|
||||
float64: np.dtype[np.float64]
|
||||
|
||||
@type_check_only
|
||||
class _DTypesComplex(TypedDict):
|
||||
complex64: np.dtype[np.complex64]
|
||||
complex128: np.dtype[np.complex128]
|
||||
|
||||
@type_check_only
|
||||
class _DTypesNumber(_DTypesInteger, _DTypesFloat, _DTypesComplex): ...
|
||||
|
||||
@type_check_only
|
||||
class _DTypes(_DTypesBool, _DTypesNumber): ...
|
||||
|
||||
@type_check_only
|
||||
class _DTypesUnion(TypedDict, total=False):
|
||||
bool: np.dtype[np.bool]
|
||||
int8: np.dtype[np.int8]
|
||||
int16: np.dtype[np.int16]
|
||||
int32: np.dtype[np.int32]
|
||||
int64: np.dtype[np.int64]
|
||||
uint8: np.dtype[np.uint8]
|
||||
uint16: np.dtype[np.uint16]
|
||||
uint32: np.dtype[np.uint32]
|
||||
uint64: np.dtype[np.uint64]
|
||||
float32: np.dtype[np.float32]
|
||||
float64: np.dtype[np.float64]
|
||||
complex64: np.dtype[np.complex64]
|
||||
complex128: np.dtype[np.complex128]
|
||||
|
||||
_EmptyDict: TypeAlias = dict[Never, Never]
|
||||
|
||||
@final
|
||||
class __array_namespace_info__:
|
||||
__module__: ClassVar[Literal['numpy']]
|
||||
|
||||
def capabilities(self) -> _Capabilities: ...
|
||||
def default_device(self) -> _Device: ...
|
||||
def default_dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
) -> _DefaultDTypes: ...
|
||||
def devices(self) -> list[_Device]: ...
|
||||
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: None = ...,
|
||||
) -> _DTypes: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: _Permute1[_KindBool],
|
||||
) -> _DTypesBool: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: _Permute1[_KindInt],
|
||||
) -> _DTypesInt: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: _Permute1[_KindUInt],
|
||||
) -> _DTypesUInt: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: _Permute1[_KindFloat],
|
||||
) -> _DTypesFloat: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: _Permute1[_KindComplex],
|
||||
) -> _DTypesComplex: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: (
|
||||
_Permute1[_KindInteger]
|
||||
| _Permute2[_KindInt, _KindUInt]
|
||||
),
|
||||
) -> _DTypesInteger: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: (
|
||||
_Permute1[_KindNumber]
|
||||
| _Permute3[_KindInteger, _KindFloat, _KindComplex]
|
||||
),
|
||||
) -> _DTypesNumber: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: tuple[()],
|
||||
) -> _EmptyDict: ...
|
||||
@overload
|
||||
def dtypes(
|
||||
self,
|
||||
*,
|
||||
device: _DeviceLike = ...,
|
||||
kind: tuple[_Kind, ...],
|
||||
) -> _DTypesUnion: ...
|
||||
39
env/Lib/site-packages/numpy/_configtool.py
vendored
Normal file
39
env/Lib/site-packages/numpy/_configtool.py
vendored
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
import argparse
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
from .version import __version__
|
||||
from .lib._utils_impl import get_include
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--version",
|
||||
action="version",
|
||||
version=__version__,
|
||||
help="Print the version and exit.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cflags",
|
||||
action="store_true",
|
||||
help="Compile flag needed when using the NumPy headers.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pkgconfigdir",
|
||||
action="store_true",
|
||||
help=("Print the pkgconfig directory in which `numpy.pc` is stored "
|
||||
"(useful for setting $PKG_CONFIG_PATH)."),
|
||||
)
|
||||
args = parser.parse_args()
|
||||
if not sys.argv[1:]:
|
||||
parser.print_help()
|
||||
if args.cflags:
|
||||
print("-I" + get_include())
|
||||
if args.pkgconfigdir:
|
||||
_path = Path(get_include()) / '..' / 'lib' / 'pkgconfig'
|
||||
print(_path.resolve())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1
env/Lib/site-packages/numpy/_configtool.pyi
vendored
Normal file
1
env/Lib/site-packages/numpy/_configtool.pyi
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
def main() -> None: ...
|
||||
180
env/Lib/site-packages/numpy/_core/__init__.py
vendored
Normal file
180
env/Lib/site-packages/numpy/_core/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,180 @@
|
|||
"""
|
||||
Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
|
||||
|
||||
Please note that this module is private. All functions and objects
|
||||
are available in the main ``numpy`` namespace - use that instead.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from numpy.version import version as __version__
|
||||
|
||||
|
||||
# disables OpenBLAS affinity setting of the main thread that limits
|
||||
# python threads or processes to one core
|
||||
env_added = []
|
||||
for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
|
||||
if envkey not in os.environ:
|
||||
os.environ[envkey] = '1'
|
||||
env_added.append(envkey)
|
||||
|
||||
try:
|
||||
from . import multiarray
|
||||
except ImportError as exc:
|
||||
import sys
|
||||
msg = """
|
||||
|
||||
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
|
||||
|
||||
Importing the numpy C-extensions failed. This error can happen for
|
||||
many reasons, often due to issues with your setup or how NumPy was
|
||||
installed.
|
||||
|
||||
We have compiled some common reasons and troubleshooting tips at:
|
||||
|
||||
https://numpy.org/devdocs/user/troubleshooting-importerror.html
|
||||
|
||||
Please note and check the following:
|
||||
|
||||
* The Python version is: Python%d.%d from "%s"
|
||||
* The NumPy version is: "%s"
|
||||
|
||||
and make sure that they are the versions you expect.
|
||||
Please carefully study the documentation linked above for further help.
|
||||
|
||||
Original error was: %s
|
||||
""" % (sys.version_info[0], sys.version_info[1], sys.executable,
|
||||
__version__, exc)
|
||||
raise ImportError(msg)
|
||||
finally:
|
||||
for envkey in env_added:
|
||||
del os.environ[envkey]
|
||||
del envkey
|
||||
del env_added
|
||||
del os
|
||||
|
||||
from . import umath
|
||||
|
||||
# Check that multiarray,umath are pure python modules wrapping
|
||||
# _multiarray_umath and not either of the old c-extension modules
|
||||
if not (hasattr(multiarray, '_multiarray_umath') and
|
||||
hasattr(umath, '_multiarray_umath')):
|
||||
import sys
|
||||
path = sys.modules['numpy'].__path__
|
||||
msg = ("Something is wrong with the numpy installation. "
|
||||
"While importing we detected an older version of "
|
||||
"numpy in {}. One method of fixing this is to repeatedly uninstall "
|
||||
"numpy until none is found, then reinstall this version.")
|
||||
raise ImportError(msg.format(path))
|
||||
|
||||
from . import numerictypes as nt
|
||||
from .numerictypes import sctypes, sctypeDict
|
||||
multiarray.set_typeDict(nt.sctypeDict)
|
||||
from . import numeric
|
||||
from .numeric import *
|
||||
from . import fromnumeric
|
||||
from .fromnumeric import *
|
||||
from .records import record, recarray
|
||||
# Note: module name memmap is overwritten by a class with same name
|
||||
from .memmap import *
|
||||
from . import function_base
|
||||
from .function_base import *
|
||||
from . import _machar
|
||||
from . import getlimits
|
||||
from .getlimits import *
|
||||
from . import shape_base
|
||||
from .shape_base import *
|
||||
from . import einsumfunc
|
||||
from .einsumfunc import *
|
||||
del nt
|
||||
|
||||
from .numeric import absolute as abs
|
||||
|
||||
# do this after everything else, to minimize the chance of this misleadingly
|
||||
# appearing in an import-time traceback
|
||||
from . import _add_newdocs
|
||||
from . import _add_newdocs_scalars
|
||||
# add these for module-freeze analysis (like PyInstaller)
|
||||
from . import _dtype_ctypes
|
||||
from . import _internal
|
||||
from . import _dtype
|
||||
from . import _methods
|
||||
|
||||
acos = numeric.arccos
|
||||
acosh = numeric.arccosh
|
||||
asin = numeric.arcsin
|
||||
asinh = numeric.arcsinh
|
||||
atan = numeric.arctan
|
||||
atanh = numeric.arctanh
|
||||
atan2 = numeric.arctan2
|
||||
concat = numeric.concatenate
|
||||
bitwise_left_shift = numeric.left_shift
|
||||
bitwise_invert = numeric.invert
|
||||
bitwise_right_shift = numeric.right_shift
|
||||
permute_dims = numeric.transpose
|
||||
pow = numeric.power
|
||||
|
||||
__all__ = [
|
||||
"abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2",
|
||||
"bitwise_invert", "bitwise_left_shift", "bitwise_right_shift", "concat",
|
||||
"pow", "permute_dims", "memmap", "sctypeDict", "record", "recarray"
|
||||
]
|
||||
__all__ += numeric.__all__
|
||||
__all__ += function_base.__all__
|
||||
__all__ += getlimits.__all__
|
||||
__all__ += shape_base.__all__
|
||||
__all__ += einsumfunc.__all__
|
||||
|
||||
|
||||
def _ufunc_reduce(func):
|
||||
# Report the `__name__`. pickle will try to find the module. Note that
|
||||
# pickle supports for this `__name__` to be a `__qualname__`. It may
|
||||
# make sense to add a `__qualname__` to ufuncs, to allow this more
|
||||
# explicitly (Numba has ufuncs as attributes).
|
||||
# See also: https://github.com/dask/distributed/issues/3450
|
||||
return func.__name__
|
||||
|
||||
|
||||
def _DType_reconstruct(scalar_type):
|
||||
# This is a work-around to pickle type(np.dtype(np.float64)), etc.
|
||||
# and it should eventually be replaced with a better solution, e.g. when
|
||||
# DTypes become HeapTypes.
|
||||
return type(dtype(scalar_type))
|
||||
|
||||
|
||||
def _DType_reduce(DType):
|
||||
# As types/classes, most DTypes can simply be pickled by their name:
|
||||
if not DType._legacy or DType.__module__ == "numpy.dtypes":
|
||||
return DType.__name__
|
||||
|
||||
# However, user defined legacy dtypes (like rational) do not end up in
|
||||
# `numpy.dtypes` as module and do not have a public class at all.
|
||||
# For these, we pickle them by reconstructing them from the scalar type:
|
||||
scalar_type = DType.type
|
||||
return _DType_reconstruct, (scalar_type,)
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
# Deprecated 2022-11-22, NumPy 1.25.
|
||||
if name == "MachAr":
|
||||
import warnings
|
||||
warnings.warn(
|
||||
"The `np._core.MachAr` is considered private API (NumPy 1.24)",
|
||||
DeprecationWarning, stacklevel=2,
|
||||
)
|
||||
return _machar.MachAr
|
||||
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}")
|
||||
|
||||
|
||||
import copyreg
|
||||
|
||||
copyreg.pickle(ufunc, _ufunc_reduce)
|
||||
copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct)
|
||||
|
||||
# Unclutter namespace (must keep _*_reconstruct for unpickling)
|
||||
del copyreg, _ufunc_reduce, _DType_reduce
|
||||
|
||||
from numpy._pytesttester import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
||||
2
env/Lib/site-packages/numpy/_core/__init__.pyi
vendored
Normal file
2
env/Lib/site-packages/numpy/_core/__init__.pyi
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
# NOTE: The `np._core` namespace is deliberately kept empty due to it
|
||||
# being private
|
||||
BIN
env/Lib/site-packages/numpy/_core/__pycache__/__init__.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/__init__.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_add_newdocs.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_add_newdocs.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_asarray.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_asarray.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_dtype.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_dtype.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_dtype_ctypes.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_exceptions.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_exceptions.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_internal.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_internal.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_machar.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_machar.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_methods.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_methods.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_string_helpers.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_string_helpers.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_type_aliases.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_type_aliases.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_ufunc_config.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/_ufunc_config.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/arrayprint.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/arrayprint.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/cversions.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/cversions.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/defchararray.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/defchararray.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/einsumfunc.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/einsumfunc.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/fromnumeric.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/fromnumeric.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/function_base.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/function_base.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/getlimits.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/getlimits.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/memmap.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/memmap.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/multiarray.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/multiarray.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/numeric.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/numeric.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/numerictypes.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/numerictypes.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/overrides.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/overrides.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/printoptions.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/printoptions.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/records.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/records.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/shape_base.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/shape_base.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/strings.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/strings.cpython-310.pyc
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/__pycache__/umath.cpython-310.pyc
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/__pycache__/umath.cpython-310.pyc
vendored
Normal file
Binary file not shown.
6974
env/Lib/site-packages/numpy/_core/_add_newdocs.py
vendored
Normal file
6974
env/Lib/site-packages/numpy/_core/_add_newdocs.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
3
env/Lib/site-packages/numpy/_core/_add_newdocs.pyi
vendored
Normal file
3
env/Lib/site-packages/numpy/_core/_add_newdocs.pyi
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
from .overrides import get_array_function_like_doc as get_array_function_like_doc
|
||||
|
||||
def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ...
|
||||
389
env/Lib/site-packages/numpy/_core/_add_newdocs_scalars.py
vendored
Normal file
389
env/Lib/site-packages/numpy/_core/_add_newdocs_scalars.py
vendored
Normal file
|
|
@ -0,0 +1,389 @@
|
|||
"""
|
||||
This file is separate from ``_add_newdocs.py`` so that it can be mocked out by
|
||||
our sphinx ``conf.py`` during doc builds, where we want to avoid showing
|
||||
platform-dependent information.
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
from numpy._core import dtype
|
||||
from numpy._core import numerictypes as _numerictypes
|
||||
from numpy._core.function_base import add_newdoc
|
||||
|
||||
##############################################################################
|
||||
#
|
||||
# Documentation for concrete scalar classes
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
def numeric_type_aliases(aliases):
|
||||
def type_aliases_gen():
|
||||
for alias, doc in aliases:
|
||||
try:
|
||||
alias_type = getattr(_numerictypes, alias)
|
||||
except AttributeError:
|
||||
# The set of aliases that actually exist varies between platforms
|
||||
pass
|
||||
else:
|
||||
yield (alias_type, alias, doc)
|
||||
return list(type_aliases_gen())
|
||||
|
||||
|
||||
possible_aliases = numeric_type_aliases([
|
||||
('int8', '8-bit signed integer (``-128`` to ``127``)'),
|
||||
('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),
|
||||
('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),
|
||||
('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),
|
||||
('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
|
||||
('uint8', '8-bit unsigned integer (``0`` to ``255``)'),
|
||||
('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),
|
||||
('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),
|
||||
('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),
|
||||
('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
|
||||
('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
|
||||
('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
|
||||
('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
|
||||
('float96', '96-bit extended-precision floating-point number type'),
|
||||
('float128', '128-bit extended-precision floating-point number type'),
|
||||
('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
|
||||
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
|
||||
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
|
||||
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
|
||||
])
|
||||
|
||||
|
||||
def _get_platform_and_machine():
|
||||
try:
|
||||
system, _, _, _, machine = os.uname()
|
||||
except AttributeError:
|
||||
system = sys.platform
|
||||
if system == 'win32':
|
||||
machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \
|
||||
or os.environ.get('PROCESSOR_ARCHITECTURE', '')
|
||||
else:
|
||||
machine = 'unknown'
|
||||
return system, machine
|
||||
|
||||
|
||||
_system, _machine = _get_platform_and_machine()
|
||||
_doc_alias_string = f":Alias on this platform ({_system} {_machine}):"
|
||||
|
||||
|
||||
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
|
||||
# note: `:field: value` is rST syntax which renders as field lists.
|
||||
o = getattr(_numerictypes, obj)
|
||||
|
||||
character_code = dtype(o).char
|
||||
canonical_name_doc = "" if obj == o.__name__ else \
|
||||
f":Canonical name: `numpy.{obj}`\n "
|
||||
if fixed_aliases:
|
||||
alias_doc = ''.join(f":Alias: `numpy.{alias}`\n "
|
||||
for alias in fixed_aliases)
|
||||
else:
|
||||
alias_doc = ''
|
||||
alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n "
|
||||
for (alias_type, alias, doc) in possible_aliases if alias_type is o)
|
||||
|
||||
docstring = f"""
|
||||
{doc.strip()}
|
||||
|
||||
:Character code: ``'{character_code}'``
|
||||
{canonical_name_doc}{alias_doc}
|
||||
"""
|
||||
|
||||
add_newdoc('numpy._core.numerictypes', obj, docstring)
|
||||
|
||||
|
||||
_bool_docstring = (
|
||||
"""
|
||||
Boolean type (True or False), stored as a byte.
|
||||
|
||||
.. warning::
|
||||
|
||||
The :class:`bool` type is not a subclass of the :class:`int_` type
|
||||
(the :class:`bool` is not even a number type). This is different
|
||||
than Python's default implementation of :class:`bool` as a
|
||||
sub-class of :class:`int`.
|
||||
"""
|
||||
)
|
||||
|
||||
add_newdoc_for_scalar_type('bool', [], _bool_docstring)
|
||||
|
||||
add_newdoc_for_scalar_type('bool_', [], _bool_docstring)
|
||||
|
||||
add_newdoc_for_scalar_type('byte', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``char``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('short', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``short``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('intc', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``int``.
|
||||
""")
|
||||
|
||||
# TODO: These docs probably need an if to highlight the default rather than
|
||||
# the C-types (and be correct).
|
||||
add_newdoc_for_scalar_type('int_', [],
|
||||
"""
|
||||
Default signed integer type, 64bit on 64bit systems and 32bit on 32bit
|
||||
systems.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('longlong', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``long long``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('ubyte', [],
|
||||
"""
|
||||
Unsigned integer type, compatible with C ``unsigned char``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('ushort', [],
|
||||
"""
|
||||
Unsigned integer type, compatible with C ``unsigned short``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('uintc', [],
|
||||
"""
|
||||
Unsigned integer type, compatible with C ``unsigned int``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('uint', [],
|
||||
"""
|
||||
Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit
|
||||
systems.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('ulonglong', [],
|
||||
"""
|
||||
Signed integer type, compatible with C ``unsigned long long``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('half', [],
|
||||
"""
|
||||
Half-precision floating-point number type.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('single', [],
|
||||
"""
|
||||
Single-precision floating-point number type, compatible with C ``float``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('double', [],
|
||||
"""
|
||||
Double-precision floating-point number type, compatible with Python
|
||||
:class:`float` and C ``double``.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('longdouble', [],
|
||||
"""
|
||||
Extended-precision floating-point number type, compatible with C
|
||||
``long double`` but not necessarily with IEEE 754 quadruple-precision.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('csingle', [],
|
||||
"""
|
||||
Complex number type composed of two single-precision floating-point
|
||||
numbers.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('cdouble', [],
|
||||
"""
|
||||
Complex number type composed of two double-precision floating-point
|
||||
numbers, compatible with Python :class:`complex`.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('clongdouble', [],
|
||||
"""
|
||||
Complex number type composed of two extended-precision floating-point
|
||||
numbers.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('object_', [],
|
||||
"""
|
||||
Any Python object.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('str_', [],
|
||||
r"""
|
||||
A unicode string.
|
||||
|
||||
This type strips trailing null codepoints.
|
||||
|
||||
>>> s = np.str_("abc\x00")
|
||||
>>> s
|
||||
'abc'
|
||||
|
||||
Unlike the builtin :class:`str`, this supports the
|
||||
:ref:`python:bufferobjects`, exposing its contents as UCS4:
|
||||
|
||||
>>> m = memoryview(np.str_("abc"))
|
||||
>>> m.format
|
||||
'3w'
|
||||
>>> m.tobytes()
|
||||
b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('bytes_', [],
|
||||
r"""
|
||||
A byte string.
|
||||
|
||||
When used in arrays, this type strips trailing null bytes.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('void', [],
|
||||
r"""
|
||||
np.void(length_or_data, /, dtype=None)
|
||||
|
||||
Create a new structured or unstructured void scalar.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
length_or_data : int, array-like, bytes-like, object
|
||||
One of multiple meanings (see notes). The length or
|
||||
bytes data of an unstructured void. Or alternatively,
|
||||
the data to be stored in the new scalar when `dtype`
|
||||
is provided.
|
||||
This can be an array-like, in which case an array may
|
||||
be returned.
|
||||
dtype : dtype, optional
|
||||
If provided the dtype of the new scalar. This dtype must
|
||||
be "void" dtype (i.e. a structured or unstructured void,
|
||||
see also :ref:`defining-structured-types`).
|
||||
|
||||
.. versionadded:: 1.24
|
||||
|
||||
Notes
|
||||
-----
|
||||
For historical reasons and because void scalars can represent both
|
||||
arbitrary byte data and structured dtypes, the void constructor
|
||||
has three calling conventions:
|
||||
|
||||
1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five
|
||||
``\0`` bytes. The 5 can be a Python or NumPy integer.
|
||||
2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.
|
||||
The dtype itemsize will match the byte string length, here ``"V10"``.
|
||||
3. When a ``dtype=`` is passed the call is roughly the same as an
|
||||
array creation. However, a void scalar rather than array is returned.
|
||||
|
||||
Please see the examples which show all three different conventions.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.void(5)
|
||||
np.void(b'\x00\x00\x00\x00\x00')
|
||||
>>> np.void(b'abcd')
|
||||
np.void(b'\x61\x62\x63\x64')
|
||||
>>> np.void((3.2, b'eggs'), dtype="d,S5")
|
||||
np.void((3.2, b'eggs'), dtype=[('f0', '<f8'), ('f1', 'S5')])
|
||||
>>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])
|
||||
np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')])
|
||||
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('datetime64', [],
|
||||
"""
|
||||
If created from a 64-bit integer, it represents an offset from
|
||||
``1970-01-01T00:00:00``.
|
||||
If created from string, the string can be in ISO 8601 date
|
||||
or datetime format.
|
||||
|
||||
When parsing a string to create a datetime object, if the string contains
|
||||
a trailing timezone (A 'Z' or a timezone offset), the timezone will be
|
||||
dropped and a User Warning is given.
|
||||
|
||||
Datetime64 objects should be considered to be UTC and therefore have an
|
||||
offset of +0000.
|
||||
|
||||
>>> np.datetime64(10, 'Y')
|
||||
np.datetime64('1980')
|
||||
>>> np.datetime64('1980', 'Y')
|
||||
np.datetime64('1980')
|
||||
>>> np.datetime64(10, 'D')
|
||||
np.datetime64('1970-01-11')
|
||||
|
||||
See :ref:`arrays.datetime` for more information.
|
||||
""")
|
||||
|
||||
add_newdoc_for_scalar_type('timedelta64', [],
|
||||
"""
|
||||
A timedelta stored as a 64-bit integer.
|
||||
|
||||
See :ref:`arrays.datetime` for more information.
|
||||
""")
|
||||
|
||||
add_newdoc('numpy._core.numerictypes', "integer", ('is_integer',
|
||||
"""
|
||||
integer.is_integer() -> bool
|
||||
|
||||
Return ``True`` if the number is finite with integral value.
|
||||
|
||||
.. versionadded:: 1.22
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> np.int64(-2).is_integer()
|
||||
True
|
||||
>>> np.uint32(5).is_integer()
|
||||
True
|
||||
"""))
|
||||
|
||||
# TODO: work out how to put this on the base class, np.floating
|
||||
for float_name in ('half', 'single', 'double', 'longdouble'):
|
||||
add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio',
|
||||
"""
|
||||
{ftype}.as_integer_ratio() -> (int, int)
|
||||
|
||||
Return a pair of integers, whose ratio is exactly equal to the original
|
||||
floating point number, and with a positive denominator.
|
||||
Raise `OverflowError` on infinities and a `ValueError` on NaNs.
|
||||
|
||||
>>> np.{ftype}(10.0).as_integer_ratio()
|
||||
(10, 1)
|
||||
>>> np.{ftype}(0.0).as_integer_ratio()
|
||||
(0, 1)
|
||||
>>> np.{ftype}(-.25).as_integer_ratio()
|
||||
(-1, 4)
|
||||
""".format(ftype=float_name)))
|
||||
|
||||
add_newdoc('numpy._core.numerictypes', float_name, ('is_integer',
|
||||
f"""
|
||||
{float_name}.is_integer() -> bool
|
||||
|
||||
Return ``True`` if the floating point number is finite with integral
|
||||
value, and ``False`` otherwise.
|
||||
|
||||
.. versionadded:: 1.22
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.{float_name}(-2.0).is_integer()
|
||||
True
|
||||
>>> np.{float_name}(3.2).is_integer()
|
||||
False
|
||||
"""))
|
||||
|
||||
for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
|
||||
'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'):
|
||||
# Add negative examples for signed cases by checking typecode
|
||||
add_newdoc('numpy._core.numerictypes', int_name, ('bit_count',
|
||||
f"""
|
||||
{int_name}.bit_count() -> int
|
||||
|
||||
Computes the number of 1-bits in the absolute value of the input.
|
||||
Analogous to the builtin `int.bit_count` or ``popcount`` in C++.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.{int_name}(127).bit_count()
|
||||
7""" +
|
||||
(f"""
|
||||
>>> np.{int_name}(-127).bit_count()
|
||||
7
|
||||
""" if dtype(int_name).char.islower() else "")))
|
||||
16
env/Lib/site-packages/numpy/_core/_add_newdocs_scalars.pyi
vendored
Normal file
16
env/Lib/site-packages/numpy/_core/_add_newdocs_scalars.pyi
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
from collections.abc import Iterable
|
||||
from typing import Final
|
||||
|
||||
import numpy as np
|
||||
|
||||
possible_aliases: Final[list[tuple[type[np.number], str, str]]] = ...
|
||||
_system: Final[str] = ...
|
||||
_machine: Final[str] = ...
|
||||
_doc_alias_string: Final[str] = ...
|
||||
_bool_docstring: Final[str] = ...
|
||||
int_name: str = ...
|
||||
float_name: str = ...
|
||||
|
||||
def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ...
|
||||
def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ...
|
||||
def _get_platform_and_machine() -> tuple[str, str]: ...
|
||||
135
env/Lib/site-packages/numpy/_core/_asarray.py
vendored
Normal file
135
env/Lib/site-packages/numpy/_core/_asarray.py
vendored
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
"""
|
||||
Functions in the ``as*array`` family that promote array-likes into arrays.
|
||||
|
||||
`require` fits this category despite its name not matching this pattern.
|
||||
"""
|
||||
from .overrides import (
|
||||
array_function_dispatch,
|
||||
finalize_array_function_like,
|
||||
set_module,
|
||||
)
|
||||
from .multiarray import array, asanyarray
|
||||
|
||||
|
||||
__all__ = ["require"]
|
||||
|
||||
|
||||
POSSIBLE_FLAGS = {
|
||||
'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
|
||||
'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
|
||||
'A': 'A', 'ALIGNED': 'A',
|
||||
'W': 'W', 'WRITEABLE': 'W',
|
||||
'O': 'O', 'OWNDATA': 'O',
|
||||
'E': 'E', 'ENSUREARRAY': 'E'
|
||||
}
|
||||
|
||||
|
||||
@finalize_array_function_like
|
||||
@set_module('numpy')
|
||||
def require(a, dtype=None, requirements=None, *, like=None):
|
||||
"""
|
||||
Return an ndarray of the provided type that satisfies requirements.
|
||||
|
||||
This function is useful to be sure that an array with the correct flags
|
||||
is returned for passing to compiled code (perhaps through ctypes).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
a : array_like
|
||||
The object to be converted to a type-and-requirement-satisfying array.
|
||||
dtype : data-type
|
||||
The required data-type. If None preserve the current dtype. If your
|
||||
application requires the data to be in native byteorder, include
|
||||
a byteorder specification as a part of the dtype specification.
|
||||
requirements : str or sequence of str
|
||||
The requirements list can be any of the following
|
||||
|
||||
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
|
||||
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
|
||||
* 'ALIGNED' ('A') - ensure a data-type aligned array
|
||||
* 'WRITEABLE' ('W') - ensure a writable array
|
||||
* 'OWNDATA' ('O') - ensure an array that owns its own data
|
||||
* 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
|
||||
${ARRAY_FUNCTION_LIKE}
|
||||
|
||||
.. versionadded:: 1.20.0
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
Array with specified requirements and type if given.
|
||||
|
||||
See Also
|
||||
--------
|
||||
asarray : Convert input to an ndarray.
|
||||
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
|
||||
ascontiguousarray : Convert input to a contiguous array.
|
||||
asfortranarray : Convert input to an ndarray with column-major
|
||||
memory order.
|
||||
ndarray.flags : Information about the memory layout of the array.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The returned array will be guaranteed to have the listed requirements
|
||||
by making a copy if needed.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> x = np.arange(6).reshape(2,3)
|
||||
>>> x.flags
|
||||
C_CONTIGUOUS : True
|
||||
F_CONTIGUOUS : False
|
||||
OWNDATA : False
|
||||
WRITEABLE : True
|
||||
ALIGNED : True
|
||||
WRITEBACKIFCOPY : False
|
||||
|
||||
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
|
||||
>>> y.flags
|
||||
C_CONTIGUOUS : False
|
||||
F_CONTIGUOUS : True
|
||||
OWNDATA : True
|
||||
WRITEABLE : True
|
||||
ALIGNED : True
|
||||
WRITEBACKIFCOPY : False
|
||||
|
||||
"""
|
||||
if like is not None:
|
||||
return _require_with_like(
|
||||
like,
|
||||
a,
|
||||
dtype=dtype,
|
||||
requirements=requirements,
|
||||
)
|
||||
|
||||
if not requirements:
|
||||
return asanyarray(a, dtype=dtype)
|
||||
|
||||
requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}
|
||||
|
||||
if 'E' in requirements:
|
||||
requirements.remove('E')
|
||||
subok = False
|
||||
else:
|
||||
subok = True
|
||||
|
||||
order = 'A'
|
||||
if requirements >= {'C', 'F'}:
|
||||
raise ValueError('Cannot specify both "C" and "F" order')
|
||||
elif 'F' in requirements:
|
||||
order = 'F'
|
||||
requirements.remove('F')
|
||||
elif 'C' in requirements:
|
||||
order = 'C'
|
||||
requirements.remove('C')
|
||||
|
||||
arr = array(a, dtype=dtype, order=order, copy=None, subok=subok)
|
||||
|
||||
for prop in requirements:
|
||||
if not arr.flags[prop]:
|
||||
return arr.copy(order)
|
||||
return arr
|
||||
|
||||
|
||||
_require_with_like = array_function_dispatch()(require)
|
||||
41
env/Lib/site-packages/numpy/_core/_asarray.pyi
vendored
Normal file
41
env/Lib/site-packages/numpy/_core/_asarray.pyi
vendored
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
from collections.abc import Iterable
|
||||
from typing import Any, TypeAlias, TypeVar, overload, Literal
|
||||
|
||||
from numpy._typing import NDArray, DTypeLike, _SupportsArrayFunc
|
||||
|
||||
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
|
||||
|
||||
_Requirements: TypeAlias = Literal[
|
||||
"C", "C_CONTIGUOUS", "CONTIGUOUS",
|
||||
"F", "F_CONTIGUOUS", "FORTRAN",
|
||||
"A", "ALIGNED",
|
||||
"W", "WRITEABLE",
|
||||
"O", "OWNDATA"
|
||||
]
|
||||
_E: TypeAlias = Literal["E", "ENSUREARRAY"]
|
||||
_RequirementsWithE: TypeAlias = _Requirements | _E
|
||||
|
||||
@overload
|
||||
def require(
|
||||
a: _ArrayType,
|
||||
dtype: None = ...,
|
||||
requirements: None | _Requirements | Iterable[_Requirements] = ...,
|
||||
*,
|
||||
like: _SupportsArrayFunc = ...
|
||||
) -> _ArrayType: ...
|
||||
@overload
|
||||
def require(
|
||||
a: object,
|
||||
dtype: DTypeLike = ...,
|
||||
requirements: _E | Iterable[_RequirementsWithE] = ...,
|
||||
*,
|
||||
like: _SupportsArrayFunc = ...
|
||||
) -> NDArray[Any]: ...
|
||||
@overload
|
||||
def require(
|
||||
a: object,
|
||||
dtype: DTypeLike = ...,
|
||||
requirements: None | _Requirements | Iterable[_Requirements] = ...,
|
||||
*,
|
||||
like: _SupportsArrayFunc = ...
|
||||
) -> NDArray[Any]: ...
|
||||
374
env/Lib/site-packages/numpy/_core/_dtype.py
vendored
Normal file
374
env/Lib/site-packages/numpy/_core/_dtype.py
vendored
Normal file
|
|
@ -0,0 +1,374 @@
|
|||
"""
|
||||
A place for code to be called from the implementation of np.dtype
|
||||
|
||||
String handling is much easier to do correctly in python.
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
|
||||
_kind_to_stem = {
|
||||
'u': 'uint',
|
||||
'i': 'int',
|
||||
'c': 'complex',
|
||||
'f': 'float',
|
||||
'b': 'bool',
|
||||
'V': 'void',
|
||||
'O': 'object',
|
||||
'M': 'datetime',
|
||||
'm': 'timedelta',
|
||||
'S': 'bytes',
|
||||
'U': 'str',
|
||||
}
|
||||
|
||||
|
||||
def _kind_name(dtype):
|
||||
try:
|
||||
return _kind_to_stem[dtype.kind]
|
||||
except KeyError as e:
|
||||
raise RuntimeError(
|
||||
"internal dtype error, unknown kind {!r}"
|
||||
.format(dtype.kind)
|
||||
) from None
|
||||
|
||||
|
||||
def __str__(dtype):
|
||||
if dtype.fields is not None:
|
||||
return _struct_str(dtype, include_align=True)
|
||||
elif dtype.subdtype:
|
||||
return _subarray_str(dtype)
|
||||
elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
|
||||
return dtype.str
|
||||
else:
|
||||
return dtype.name
|
||||
|
||||
|
||||
def __repr__(dtype):
|
||||
arg_str = _construction_repr(dtype, include_align=False)
|
||||
if dtype.isalignedstruct:
|
||||
arg_str = arg_str + ", align=True"
|
||||
return "dtype({})".format(arg_str)
|
||||
|
||||
|
||||
def _unpack_field(dtype, offset, title=None):
|
||||
"""
|
||||
Helper function to normalize the items in dtype.fields.
|
||||
|
||||
Call as:
|
||||
|
||||
dtype, offset, title = _unpack_field(*dtype.fields[name])
|
||||
"""
|
||||
return dtype, offset, title
|
||||
|
||||
|
||||
def _isunsized(dtype):
|
||||
# PyDataType_ISUNSIZED
|
||||
return dtype.itemsize == 0
|
||||
|
||||
|
||||
def _construction_repr(dtype, include_align=False, short=False):
|
||||
"""
|
||||
Creates a string repr of the dtype, excluding the 'dtype()' part
|
||||
surrounding the object. This object may be a string, a list, or
|
||||
a dict depending on the nature of the dtype. This
|
||||
is the object passed as the first parameter to the dtype
|
||||
constructor, and if no additional constructor parameters are
|
||||
given, will reproduce the exact memory layout.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
short : bool
|
||||
If true, this creates a shorter repr using 'kind' and 'itemsize',
|
||||
instead of the longer type name.
|
||||
|
||||
include_align : bool
|
||||
If true, this includes the 'align=True' parameter
|
||||
inside the struct dtype construction dict when needed. Use this flag
|
||||
if you want a proper repr string without the 'dtype()' part around it.
|
||||
|
||||
If false, this does not preserve the
|
||||
'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
|
||||
struct arrays like the regular repr does, because the 'align'
|
||||
flag is not part of first dtype constructor parameter. This
|
||||
mode is intended for a full 'repr', where the 'align=True' is
|
||||
provided as the second parameter.
|
||||
"""
|
||||
if dtype.fields is not None:
|
||||
return _struct_str(dtype, include_align=include_align)
|
||||
elif dtype.subdtype:
|
||||
return _subarray_str(dtype)
|
||||
else:
|
||||
return _scalar_str(dtype, short=short)
|
||||
|
||||
|
||||
def _scalar_str(dtype, short):
|
||||
byteorder = _byte_order_str(dtype)
|
||||
|
||||
if dtype.type == np.bool:
|
||||
if short:
|
||||
return "'?'"
|
||||
else:
|
||||
return "'bool'"
|
||||
|
||||
elif dtype.type == np.object_:
|
||||
# The object reference may be different sizes on different
|
||||
# platforms, so it should never include the itemsize here.
|
||||
return "'O'"
|
||||
|
||||
elif dtype.type == np.bytes_:
|
||||
if _isunsized(dtype):
|
||||
return "'S'"
|
||||
else:
|
||||
return "'S%d'" % dtype.itemsize
|
||||
|
||||
elif dtype.type == np.str_:
|
||||
if _isunsized(dtype):
|
||||
return "'%sU'" % byteorder
|
||||
else:
|
||||
return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
|
||||
|
||||
elif dtype.type == str:
|
||||
return "'T'"
|
||||
|
||||
elif not type(dtype)._legacy:
|
||||
return f"'{byteorder}{type(dtype).__name__}{dtype.itemsize * 8}'"
|
||||
|
||||
# unlike the other types, subclasses of void are preserved - but
|
||||
# historically the repr does not actually reveal the subclass
|
||||
elif issubclass(dtype.type, np.void):
|
||||
if _isunsized(dtype):
|
||||
return "'V'"
|
||||
else:
|
||||
return "'V%d'" % dtype.itemsize
|
||||
|
||||
elif dtype.type == np.datetime64:
|
||||
return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype))
|
||||
|
||||
elif dtype.type == np.timedelta64:
|
||||
return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype))
|
||||
|
||||
elif np.issubdtype(dtype, np.number):
|
||||
# Short repr with endianness, like '<f8'
|
||||
if short or dtype.byteorder not in ('=', '|'):
|
||||
return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize)
|
||||
|
||||
# Longer repr, like 'float64'
|
||||
else:
|
||||
return "'%s%d'" % (_kind_name(dtype), 8*dtype.itemsize)
|
||||
|
||||
elif dtype.isbuiltin == 2:
|
||||
return dtype.type.__name__
|
||||
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"Internal error: NumPy dtype unrecognized type number")
|
||||
|
||||
|
||||
def _byte_order_str(dtype):
|
||||
""" Normalize byteorder to '<' or '>' """
|
||||
# hack to obtain the native and swapped byte order characters
|
||||
swapped = np.dtype(int).newbyteorder('S')
|
||||
native = swapped.newbyteorder('S')
|
||||
|
||||
byteorder = dtype.byteorder
|
||||
if byteorder == '=':
|
||||
return native.byteorder
|
||||
if byteorder == 'S':
|
||||
# TODO: this path can never be reached
|
||||
return swapped.byteorder
|
||||
elif byteorder == '|':
|
||||
return ''
|
||||
else:
|
||||
return byteorder
|
||||
|
||||
|
||||
def _datetime_metadata_str(dtype):
|
||||
# TODO: this duplicates the C metastr_to_unicode functionality
|
||||
unit, count = np.datetime_data(dtype)
|
||||
if unit == 'generic':
|
||||
return ''
|
||||
elif count == 1:
|
||||
return '[{}]'.format(unit)
|
||||
else:
|
||||
return '[{}{}]'.format(count, unit)
|
||||
|
||||
|
||||
def _struct_dict_str(dtype, includealignedflag):
|
||||
# unpack the fields dictionary into ls
|
||||
names = dtype.names
|
||||
fld_dtypes = []
|
||||
offsets = []
|
||||
titles = []
|
||||
for name in names:
|
||||
fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
|
||||
fld_dtypes.append(fld_dtype)
|
||||
offsets.append(offset)
|
||||
titles.append(title)
|
||||
|
||||
# Build up a string to make the dictionary
|
||||
|
||||
if np._core.arrayprint._get_legacy_print_mode() <= 121:
|
||||
colon = ":"
|
||||
fieldsep = ","
|
||||
else:
|
||||
colon = ": "
|
||||
fieldsep = ", "
|
||||
|
||||
# First, the names
|
||||
ret = "{'names'%s[" % colon
|
||||
ret += fieldsep.join(repr(name) for name in names)
|
||||
|
||||
# Second, the formats
|
||||
ret += "], 'formats'%s[" % colon
|
||||
ret += fieldsep.join(
|
||||
_construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
|
||||
|
||||
# Third, the offsets
|
||||
ret += "], 'offsets'%s[" % colon
|
||||
ret += fieldsep.join("%d" % offset for offset in offsets)
|
||||
|
||||
# Fourth, the titles
|
||||
if any(title is not None for title in titles):
|
||||
ret += "], 'titles'%s[" % colon
|
||||
ret += fieldsep.join(repr(title) for title in titles)
|
||||
|
||||
# Fifth, the itemsize
|
||||
ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize)
|
||||
|
||||
if (includealignedflag and dtype.isalignedstruct):
|
||||
# Finally, the aligned flag
|
||||
ret += ", 'aligned'%sTrue}" % colon
|
||||
else:
|
||||
ret += "}"
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def _aligned_offset(offset, alignment):
|
||||
# round up offset:
|
||||
return - (-offset // alignment) * alignment
|
||||
|
||||
|
||||
def _is_packed(dtype):
|
||||
"""
|
||||
Checks whether the structured data type in 'dtype'
|
||||
has a simple layout, where all the fields are in order,
|
||||
and follow each other with no alignment padding.
|
||||
|
||||
When this returns true, the dtype can be reconstructed
|
||||
from a list of the field names and dtypes with no additional
|
||||
dtype parameters.
|
||||
|
||||
Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
|
||||
"""
|
||||
align = dtype.isalignedstruct
|
||||
max_alignment = 1
|
||||
total_offset = 0
|
||||
for name in dtype.names:
|
||||
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
|
||||
|
||||
if align:
|
||||
total_offset = _aligned_offset(total_offset, fld_dtype.alignment)
|
||||
max_alignment = max(max_alignment, fld_dtype.alignment)
|
||||
|
||||
if fld_offset != total_offset:
|
||||
return False
|
||||
total_offset += fld_dtype.itemsize
|
||||
|
||||
if align:
|
||||
total_offset = _aligned_offset(total_offset, max_alignment)
|
||||
|
||||
return total_offset == dtype.itemsize
|
||||
|
||||
|
||||
def _struct_list_str(dtype):
|
||||
items = []
|
||||
for name in dtype.names:
|
||||
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
|
||||
|
||||
item = "("
|
||||
if title is not None:
|
||||
item += "({!r}, {!r}), ".format(title, name)
|
||||
else:
|
||||
item += "{!r}, ".format(name)
|
||||
# Special case subarray handling here
|
||||
if fld_dtype.subdtype is not None:
|
||||
base, shape = fld_dtype.subdtype
|
||||
item += "{}, {}".format(
|
||||
_construction_repr(base, short=True),
|
||||
shape
|
||||
)
|
||||
else:
|
||||
item += _construction_repr(fld_dtype, short=True)
|
||||
|
||||
item += ")"
|
||||
items.append(item)
|
||||
|
||||
return "[" + ", ".join(items) + "]"
|
||||
|
||||
|
||||
def _struct_str(dtype, include_align):
|
||||
# The list str representation can't include the 'align=' flag,
|
||||
# so if it is requested and the struct has the aligned flag set,
|
||||
# we must use the dict str instead.
|
||||
if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
|
||||
sub = _struct_list_str(dtype)
|
||||
|
||||
else:
|
||||
sub = _struct_dict_str(dtype, include_align)
|
||||
|
||||
# If the data type isn't the default, void, show it
|
||||
if dtype.type != np.void:
|
||||
return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub)
|
||||
else:
|
||||
return sub
|
||||
|
||||
|
||||
def _subarray_str(dtype):
|
||||
base, shape = dtype.subdtype
|
||||
return "({}, {})".format(
|
||||
_construction_repr(base, short=True),
|
||||
shape
|
||||
)
|
||||
|
||||
|
||||
def _name_includes_bit_suffix(dtype):
|
||||
if dtype.type == np.object_:
|
||||
# pointer size varies by system, best to omit it
|
||||
return False
|
||||
elif dtype.type == np.bool:
|
||||
# implied
|
||||
return False
|
||||
elif dtype.type is None:
|
||||
return True
|
||||
elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
|
||||
# unspecified
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def _name_get(dtype):
|
||||
# provides dtype.name.__get__, documented as returning a "bit name"
|
||||
|
||||
if dtype.isbuiltin == 2:
|
||||
# user dtypes don't promise to do anything special
|
||||
return dtype.type.__name__
|
||||
|
||||
if not type(dtype)._legacy:
|
||||
name = type(dtype).__name__
|
||||
|
||||
elif issubclass(dtype.type, np.void):
|
||||
# historically, void subclasses preserve their name, eg `record64`
|
||||
name = dtype.type.__name__
|
||||
else:
|
||||
name = _kind_name(dtype)
|
||||
|
||||
# append bit counts
|
||||
if _name_includes_bit_suffix(dtype):
|
||||
name += "{}".format(dtype.itemsize * 8)
|
||||
|
||||
# append metadata to datetimes
|
||||
if dtype.type in (np.datetime64, np.timedelta64):
|
||||
name += _datetime_metadata_str(dtype)
|
||||
|
||||
return name
|
||||
58
env/Lib/site-packages/numpy/_core/_dtype.pyi
vendored
Normal file
58
env/Lib/site-packages/numpy/_core/_dtype.pyi
vendored
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
from typing import Any, Final, TypeAlias, TypedDict, overload, type_check_only
|
||||
from typing import Literal as L
|
||||
|
||||
from typing_extensions import ReadOnly, TypeVar
|
||||
|
||||
import numpy as np
|
||||
|
||||
###
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
_Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"]
|
||||
|
||||
@type_check_only
|
||||
class _KindToStemType(TypedDict):
|
||||
u: ReadOnly[L["uint"]]
|
||||
i: ReadOnly[L["int"]]
|
||||
c: ReadOnly[L["complex"]]
|
||||
f: ReadOnly[L["float"]]
|
||||
b: ReadOnly[L["bool"]]
|
||||
V: ReadOnly[L["void"]]
|
||||
O: ReadOnly[L["object"]]
|
||||
M: ReadOnly[L["datetime"]]
|
||||
m: ReadOnly[L["timedelta"]]
|
||||
S: ReadOnly[L["bytes"]]
|
||||
U: ReadOnly[L["str"]]
|
||||
|
||||
###
|
||||
|
||||
_kind_to_stem: Final[_KindToStemType] = ...
|
||||
|
||||
#
|
||||
def _kind_name(dtype: np.dtype[Any]) -> _Name: ...
|
||||
def __str__(dtype: np.dtype[Any]) -> str: ...
|
||||
def __repr__(dtype: np.dtype[Any]) -> str: ...
|
||||
|
||||
#
|
||||
def _isunsized(dtype: np.dtype[Any]) -> bool: ...
|
||||
def _is_packed(dtype: np.dtype[Any]) -> bool: ...
|
||||
def _name_includes_bit_suffix(dtype: np.dtype[Any]) -> bool: ...
|
||||
|
||||
#
|
||||
def _construction_repr(dtype: np.dtype[Any], include_align: bool = False, short: bool = False) -> str: ...
|
||||
def _scalar_str(dtype: np.dtype[Any], short: bool) -> str: ...
|
||||
def _byte_order_str(dtype: np.dtype[Any]) -> str: ...
|
||||
def _datetime_metadata_str(dtype: np.dtype[Any]) -> str: ...
|
||||
def _struct_dict_str(dtype: np.dtype[Any], includealignedflag: bool) -> str: ...
|
||||
def _struct_list_str(dtype: np.dtype[Any]) -> str: ...
|
||||
def _struct_str(dtype: np.dtype[Any], include_align: bool) -> str: ...
|
||||
def _subarray_str(dtype: np.dtype[Any]) -> str: ...
|
||||
def _name_get(dtype: np.dtype[Any]) -> str: ...
|
||||
|
||||
#
|
||||
@overload
|
||||
def _unpack_field(dtype: np.dtype[Any], offset: int, title: _T) -> tuple[np.dtype[Any], int, _T]: ...
|
||||
@overload
|
||||
def _unpack_field(dtype: np.dtype[Any], offset: int, title: None = None) -> tuple[np.dtype[Any], int, None]: ...
|
||||
def _aligned_offset(offset: int, alignment: int) -> int: ...
|
||||
120
env/Lib/site-packages/numpy/_core/_dtype_ctypes.py
vendored
Normal file
120
env/Lib/site-packages/numpy/_core/_dtype_ctypes.py
vendored
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
"""
|
||||
Conversion from ctypes to dtype.
|
||||
|
||||
In an ideal world, we could achieve this through the PEP3118 buffer protocol,
|
||||
something like::
|
||||
|
||||
def dtype_from_ctypes_type(t):
|
||||
# needed to ensure that the shape of `t` is within memoryview.format
|
||||
class DummyStruct(ctypes.Structure):
|
||||
_fields_ = [('a', t)]
|
||||
|
||||
# empty to avoid memory allocation
|
||||
ctype_0 = (DummyStruct * 0)()
|
||||
mv = memoryview(ctype_0)
|
||||
|
||||
# convert the struct, and slice back out the field
|
||||
return _dtype_from_pep3118(mv.format)['a']
|
||||
|
||||
Unfortunately, this fails because:
|
||||
|
||||
* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
|
||||
* PEP3118 cannot represent unions, but both numpy and ctypes can
|
||||
* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
|
||||
"""
|
||||
|
||||
# We delay-import ctypes for distributions that do not include it.
|
||||
# While this module is not used unless the user passes in ctypes
|
||||
# members, it is eagerly imported from numpy/_core/__init__.py.
|
||||
import numpy as np
|
||||
|
||||
|
||||
def _from_ctypes_array(t):
|
||||
return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
|
||||
|
||||
|
||||
def _from_ctypes_structure(t):
|
||||
for item in t._fields_:
|
||||
if len(item) > 2:
|
||||
raise TypeError(
|
||||
"ctypes bitfields have no dtype equivalent")
|
||||
|
||||
if hasattr(t, "_pack_"):
|
||||
import ctypes
|
||||
formats = []
|
||||
offsets = []
|
||||
names = []
|
||||
current_offset = 0
|
||||
for fname, ftyp in t._fields_:
|
||||
names.append(fname)
|
||||
formats.append(dtype_from_ctypes_type(ftyp))
|
||||
# Each type has a default offset, this is platform dependent
|
||||
# for some types.
|
||||
effective_pack = min(t._pack_, ctypes.alignment(ftyp))
|
||||
current_offset = (
|
||||
(current_offset + effective_pack - 1) // effective_pack
|
||||
) * effective_pack
|
||||
offsets.append(current_offset)
|
||||
current_offset += ctypes.sizeof(ftyp)
|
||||
|
||||
return np.dtype(dict(
|
||||
formats=formats,
|
||||
offsets=offsets,
|
||||
names=names,
|
||||
itemsize=ctypes.sizeof(t)))
|
||||
else:
|
||||
fields = []
|
||||
for fname, ftyp in t._fields_:
|
||||
fields.append((fname, dtype_from_ctypes_type(ftyp)))
|
||||
|
||||
# by default, ctypes structs are aligned
|
||||
return np.dtype(fields, align=True)
|
||||
|
||||
|
||||
def _from_ctypes_scalar(t):
|
||||
"""
|
||||
Return the dtype type with endianness included if it's the case
|
||||
"""
|
||||
if getattr(t, '__ctype_be__', None) is t:
|
||||
return np.dtype('>' + t._type_)
|
||||
elif getattr(t, '__ctype_le__', None) is t:
|
||||
return np.dtype('<' + t._type_)
|
||||
else:
|
||||
return np.dtype(t._type_)
|
||||
|
||||
|
||||
def _from_ctypes_union(t):
|
||||
import ctypes
|
||||
formats = []
|
||||
offsets = []
|
||||
names = []
|
||||
for fname, ftyp in t._fields_:
|
||||
names.append(fname)
|
||||
formats.append(dtype_from_ctypes_type(ftyp))
|
||||
offsets.append(0) # Union fields are offset to 0
|
||||
|
||||
return np.dtype(dict(
|
||||
formats=formats,
|
||||
offsets=offsets,
|
||||
names=names,
|
||||
itemsize=ctypes.sizeof(t)))
|
||||
|
||||
|
||||
def dtype_from_ctypes_type(t):
|
||||
"""
|
||||
Construct a dtype object from a ctypes type
|
||||
"""
|
||||
import _ctypes
|
||||
if issubclass(t, _ctypes.Array):
|
||||
return _from_ctypes_array(t)
|
||||
elif issubclass(t, _ctypes._Pointer):
|
||||
raise TypeError("ctypes pointers have no dtype equivalent")
|
||||
elif issubclass(t, _ctypes.Structure):
|
||||
return _from_ctypes_structure(t)
|
||||
elif issubclass(t, _ctypes.Union):
|
||||
return _from_ctypes_union(t)
|
||||
elif isinstance(getattr(t, '_type_', None), str):
|
||||
return _from_ctypes_scalar(t)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"Unknown ctypes type {}".format(t.__name__))
|
||||
83
env/Lib/site-packages/numpy/_core/_dtype_ctypes.pyi
vendored
Normal file
83
env/Lib/site-packages/numpy/_core/_dtype_ctypes.pyi
vendored
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
import _ctypes
|
||||
import ctypes as ct
|
||||
from typing import Any, overload
|
||||
|
||||
import numpy as np
|
||||
|
||||
#
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[_ctypes.Array[Any] | _ctypes.Structure]) -> np.dtype[np.void]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_bool]) -> np.dtype[np.bool]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_float]) -> np.dtype[np.float32]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_double]) -> np.dtype[np.float64]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ...
|
||||
@overload
|
||||
def dtype_from_ctypes_type(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ...
|
||||
|
||||
# NOTE: the complex ctypes on python>=3.14 are not yet supported at runtim, see
|
||||
# https://github.com/numpy/numpy/issues/28360
|
||||
|
||||
#
|
||||
def _from_ctypes_array(t: type[_ctypes.Array[Any]]) -> np.dtype[np.void]: ...
|
||||
def _from_ctypes_structure(t: type[_ctypes.Structure]) -> np.dtype[np.void]: ...
|
||||
def _from_ctypes_union(t: type[_ctypes.Union]) -> np.dtype[np.void]: ...
|
||||
|
||||
# keep in sync with `dtype_from_ctypes_type` (minus the first overload)
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_bool]) -> np.dtype[np.bool]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_float]) -> np.dtype[np.float32]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_double]) -> np.dtype[np.float64]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ...
|
||||
@overload
|
||||
def _from_ctypes_scalar(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ...
|
||||
172
env/Lib/site-packages/numpy/_core/_exceptions.py
vendored
Normal file
172
env/Lib/site-packages/numpy/_core/_exceptions.py
vendored
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
"""
|
||||
Various richly-typed exceptions, that also help us deal with string formatting
|
||||
in python where it's easier.
|
||||
|
||||
By putting the formatting in `__str__`, we also avoid paying the cost for
|
||||
users who silence the exceptions.
|
||||
"""
|
||||
from .._utils import set_module
|
||||
|
||||
def _unpack_tuple(tup):
|
||||
if len(tup) == 1:
|
||||
return tup[0]
|
||||
else:
|
||||
return tup
|
||||
|
||||
|
||||
def _display_as_base(cls):
|
||||
"""
|
||||
A decorator that makes an exception class look like its base.
|
||||
|
||||
We use this to hide subclasses that are implementation details - the user
|
||||
should catch the base type, which is what the traceback will show them.
|
||||
|
||||
Classes decorated with this decorator are subject to removal without a
|
||||
deprecation warning.
|
||||
"""
|
||||
assert issubclass(cls, Exception)
|
||||
cls.__name__ = cls.__base__.__name__
|
||||
return cls
|
||||
|
||||
|
||||
class UFuncTypeError(TypeError):
|
||||
""" Base class for all ufunc exceptions """
|
||||
def __init__(self, ufunc):
|
||||
self.ufunc = ufunc
|
||||
|
||||
|
||||
@_display_as_base
|
||||
class _UFuncNoLoopError(UFuncTypeError):
|
||||
""" Thrown when a ufunc loop cannot be found """
|
||||
def __init__(self, ufunc, dtypes):
|
||||
super().__init__(ufunc)
|
||||
self.dtypes = tuple(dtypes)
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
"ufunc {!r} did not contain a loop with signature matching types "
|
||||
"{!r} -> {!r}"
|
||||
).format(
|
||||
self.ufunc.__name__,
|
||||
_unpack_tuple(self.dtypes[:self.ufunc.nin]),
|
||||
_unpack_tuple(self.dtypes[self.ufunc.nin:])
|
||||
)
|
||||
|
||||
|
||||
@_display_as_base
|
||||
class _UFuncBinaryResolutionError(_UFuncNoLoopError):
|
||||
""" Thrown when a binary resolution fails """
|
||||
def __init__(self, ufunc, dtypes):
|
||||
super().__init__(ufunc, dtypes)
|
||||
assert len(self.dtypes) == 2
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
"ufunc {!r} cannot use operands with types {!r} and {!r}"
|
||||
).format(
|
||||
self.ufunc.__name__, *self.dtypes
|
||||
)
|
||||
|
||||
|
||||
@_display_as_base
|
||||
class _UFuncCastingError(UFuncTypeError):
|
||||
def __init__(self, ufunc, casting, from_, to):
|
||||
super().__init__(ufunc)
|
||||
self.casting = casting
|
||||
self.from_ = from_
|
||||
self.to = to
|
||||
|
||||
|
||||
@_display_as_base
|
||||
class _UFuncInputCastingError(_UFuncCastingError):
|
||||
""" Thrown when a ufunc input cannot be casted """
|
||||
def __init__(self, ufunc, casting, from_, to, i):
|
||||
super().__init__(ufunc, casting, from_, to)
|
||||
self.in_i = i
|
||||
|
||||
def __str__(self):
|
||||
# only show the number if more than one input exists
|
||||
i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else ""
|
||||
return (
|
||||
"Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting "
|
||||
"rule {!r}"
|
||||
).format(
|
||||
self.ufunc.__name__, i_str, self.from_, self.to, self.casting
|
||||
)
|
||||
|
||||
|
||||
@_display_as_base
|
||||
class _UFuncOutputCastingError(_UFuncCastingError):
|
||||
""" Thrown when a ufunc output cannot be casted """
|
||||
def __init__(self, ufunc, casting, from_, to, i):
|
||||
super().__init__(ufunc, casting, from_, to)
|
||||
self.out_i = i
|
||||
|
||||
def __str__(self):
|
||||
# only show the number if more than one output exists
|
||||
i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else ""
|
||||
return (
|
||||
"Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting "
|
||||
"rule {!r}"
|
||||
).format(
|
||||
self.ufunc.__name__, i_str, self.from_, self.to, self.casting
|
||||
)
|
||||
|
||||
|
||||
@_display_as_base
|
||||
class _ArrayMemoryError(MemoryError):
|
||||
""" Thrown when an array cannot be allocated"""
|
||||
def __init__(self, shape, dtype):
|
||||
self.shape = shape
|
||||
self.dtype = dtype
|
||||
|
||||
@property
|
||||
def _total_size(self):
|
||||
num_bytes = self.dtype.itemsize
|
||||
for dim in self.shape:
|
||||
num_bytes *= dim
|
||||
return num_bytes
|
||||
|
||||
@staticmethod
|
||||
def _size_to_string(num_bytes):
|
||||
""" Convert a number of bytes into a binary size string """
|
||||
|
||||
# https://en.wikipedia.org/wiki/Binary_prefix
|
||||
LOG2_STEP = 10
|
||||
STEP = 1024
|
||||
units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
|
||||
|
||||
unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
|
||||
unit_val = 1 << (unit_i * LOG2_STEP)
|
||||
n_units = num_bytes / unit_val
|
||||
del unit_val
|
||||
|
||||
# ensure we pick a unit that is correct after rounding
|
||||
if round(n_units) == STEP:
|
||||
unit_i += 1
|
||||
n_units /= STEP
|
||||
|
||||
# deal with sizes so large that we don't have units for them
|
||||
if unit_i >= len(units):
|
||||
new_unit_i = len(units) - 1
|
||||
n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
|
||||
unit_i = new_unit_i
|
||||
|
||||
unit_name = units[unit_i]
|
||||
# format with a sensible number of digits
|
||||
if unit_i == 0:
|
||||
# no decimal point on bytes
|
||||
return '{:.0f} {}'.format(n_units, unit_name)
|
||||
elif round(n_units) < 1000:
|
||||
# 3 significant figures, if none are dropped to the left of the .
|
||||
return '{:#.3g} {}'.format(n_units, unit_name)
|
||||
else:
|
||||
# just give all the digits otherwise
|
||||
return '{:#.0f} {}'.format(n_units, unit_name)
|
||||
|
||||
def __str__(self):
|
||||
size_str = self._size_to_string(self._total_size)
|
||||
return (
|
||||
"Unable to allocate {} for an array with shape {} and data type {}"
|
||||
.format(size_str, self.shape, self.dtype)
|
||||
)
|
||||
73
env/Lib/site-packages/numpy/_core/_exceptions.pyi
vendored
Normal file
73
env/Lib/site-packages/numpy/_core/_exceptions.pyi
vendored
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
from collections.abc import Iterable
|
||||
from typing import Any, Final, overload
|
||||
|
||||
from typing_extensions import TypeVar, Unpack
|
||||
|
||||
import numpy as np
|
||||
from numpy import _CastingKind
|
||||
from numpy._utils import set_module as set_module
|
||||
|
||||
###
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, Unpack[tuple[Any, ...]]])
|
||||
_ExceptionT = TypeVar("_ExceptionT", bound=Exception)
|
||||
|
||||
###
|
||||
|
||||
class UFuncTypeError(TypeError):
|
||||
ufunc: Final[np.ufunc]
|
||||
def __init__(self, /, ufunc: np.ufunc) -> None: ...
|
||||
|
||||
class _UFuncNoLoopError(UFuncTypeError):
|
||||
dtypes: tuple[np.dtype[Any], ...]
|
||||
def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ...
|
||||
|
||||
class _UFuncBinaryResolutionError(_UFuncNoLoopError):
|
||||
dtypes: tuple[np.dtype[Any], np.dtype[Any]]
|
||||
def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ...
|
||||
|
||||
class _UFuncCastingError(UFuncTypeError):
|
||||
casting: Final[_CastingKind]
|
||||
from_: Final[np.dtype[Any]]
|
||||
to: Final[np.dtype[Any]]
|
||||
def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype[Any], to: np.dtype[Any]) -> None: ...
|
||||
|
||||
class _UFuncInputCastingError(_UFuncCastingError):
|
||||
in_i: Final[int]
|
||||
def __init__(
|
||||
self,
|
||||
/,
|
||||
ufunc: np.ufunc,
|
||||
casting: _CastingKind,
|
||||
from_: np.dtype[Any],
|
||||
to: np.dtype[Any],
|
||||
i: int,
|
||||
) -> None: ...
|
||||
|
||||
class _UFuncOutputCastingError(_UFuncCastingError):
|
||||
out_i: Final[int]
|
||||
def __init__(
|
||||
self,
|
||||
/,
|
||||
ufunc: np.ufunc,
|
||||
casting: _CastingKind,
|
||||
from_: np.dtype[Any],
|
||||
to: np.dtype[Any],
|
||||
i: int,
|
||||
) -> None: ...
|
||||
|
||||
class _ArrayMemoryError(MemoryError):
|
||||
shape: tuple[int, ...]
|
||||
dtype: np.dtype[Any]
|
||||
def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype[Any]) -> None: ...
|
||||
@property
|
||||
def _total_size(self) -> int: ...
|
||||
@staticmethod
|
||||
def _size_to_string(num_bytes: int) -> str: ...
|
||||
|
||||
@overload
|
||||
def _unpack_tuple(tup: tuple[_T]) -> _T: ...
|
||||
@overload
|
||||
def _unpack_tuple(tup: _TupleT) -> _TupleT: ...
|
||||
def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ...
|
||||
963
env/Lib/site-packages/numpy/_core/_internal.py
vendored
Normal file
963
env/Lib/site-packages/numpy/_core/_internal.py
vendored
Normal file
|
|
@ -0,0 +1,963 @@
|
|||
"""
|
||||
A place for internal code
|
||||
|
||||
Some things are more easily handled Python.
|
||||
|
||||
"""
|
||||
import ast
|
||||
import math
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from ..exceptions import DTypePromotionError
|
||||
from .multiarray import dtype, array, ndarray, promote_types, StringDType
|
||||
from numpy import _NoValue
|
||||
try:
|
||||
import ctypes
|
||||
except ImportError:
|
||||
ctypes = None
|
||||
|
||||
IS_PYPY = sys.implementation.name == 'pypy'
|
||||
|
||||
if sys.byteorder == 'little':
|
||||
_nbo = '<'
|
||||
else:
|
||||
_nbo = '>'
|
||||
|
||||
def _makenames_list(adict, align):
|
||||
allfields = []
|
||||
|
||||
for fname, obj in adict.items():
|
||||
n = len(obj)
|
||||
if not isinstance(obj, tuple) or n not in (2, 3):
|
||||
raise ValueError("entry not a 2- or 3- tuple")
|
||||
if n > 2 and obj[2] == fname:
|
||||
continue
|
||||
num = int(obj[1])
|
||||
if num < 0:
|
||||
raise ValueError("invalid offset.")
|
||||
format = dtype(obj[0], align=align)
|
||||
if n > 2:
|
||||
title = obj[2]
|
||||
else:
|
||||
title = None
|
||||
allfields.append((fname, format, num, title))
|
||||
# sort by offsets
|
||||
allfields.sort(key=lambda x: x[2])
|
||||
names = [x[0] for x in allfields]
|
||||
formats = [x[1] for x in allfields]
|
||||
offsets = [x[2] for x in allfields]
|
||||
titles = [x[3] for x in allfields]
|
||||
|
||||
return names, formats, offsets, titles
|
||||
|
||||
# Called in PyArray_DescrConverter function when
|
||||
# a dictionary without "names" and "formats"
|
||||
# fields is used as a data-type descriptor.
|
||||
def _usefields(adict, align):
|
||||
try:
|
||||
names = adict[-1]
|
||||
except KeyError:
|
||||
names = None
|
||||
if names is None:
|
||||
names, formats, offsets, titles = _makenames_list(adict, align)
|
||||
else:
|
||||
formats = []
|
||||
offsets = []
|
||||
titles = []
|
||||
for name in names:
|
||||
res = adict[name]
|
||||
formats.append(res[0])
|
||||
offsets.append(res[1])
|
||||
if len(res) > 2:
|
||||
titles.append(res[2])
|
||||
else:
|
||||
titles.append(None)
|
||||
|
||||
return dtype({"names": names,
|
||||
"formats": formats,
|
||||
"offsets": offsets,
|
||||
"titles": titles}, align)
|
||||
|
||||
|
||||
# construct an array_protocol descriptor list
|
||||
# from the fields attribute of a descriptor
|
||||
# This calls itself recursively but should eventually hit
|
||||
# a descriptor that has no fields and then return
|
||||
# a simple typestring
|
||||
|
||||
def _array_descr(descriptor):
|
||||
fields = descriptor.fields
|
||||
if fields is None:
|
||||
subdtype = descriptor.subdtype
|
||||
if subdtype is None:
|
||||
if descriptor.metadata is None:
|
||||
return descriptor.str
|
||||
else:
|
||||
new = descriptor.metadata.copy()
|
||||
if new:
|
||||
return (descriptor.str, new)
|
||||
else:
|
||||
return descriptor.str
|
||||
else:
|
||||
return (_array_descr(subdtype[0]), subdtype[1])
|
||||
|
||||
names = descriptor.names
|
||||
ordered_fields = [fields[x] + (x,) for x in names]
|
||||
result = []
|
||||
offset = 0
|
||||
for field in ordered_fields:
|
||||
if field[1] > offset:
|
||||
num = field[1] - offset
|
||||
result.append(('', f'|V{num}'))
|
||||
offset += num
|
||||
elif field[1] < offset:
|
||||
raise ValueError(
|
||||
"dtype.descr is not defined for types with overlapping or "
|
||||
"out-of-order fields")
|
||||
if len(field) > 3:
|
||||
name = (field[2], field[3])
|
||||
else:
|
||||
name = field[2]
|
||||
if field[0].subdtype:
|
||||
tup = (name, _array_descr(field[0].subdtype[0]),
|
||||
field[0].subdtype[1])
|
||||
else:
|
||||
tup = (name, _array_descr(field[0]))
|
||||
offset += field[0].itemsize
|
||||
result.append(tup)
|
||||
|
||||
if descriptor.itemsize > offset:
|
||||
num = descriptor.itemsize - offset
|
||||
result.append(('', f'|V{num}'))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# format_re was originally from numarray by J. Todd Miller
|
||||
|
||||
format_re = re.compile(r'(?P<order1>[<>|=]?)'
|
||||
r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
|
||||
r'(?P<order2>[<>|=]?)'
|
||||
r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
|
||||
sep_re = re.compile(r'\s*,\s*')
|
||||
space_re = re.compile(r'\s+$')
|
||||
|
||||
# astr is a string (perhaps comma separated)
|
||||
|
||||
_convorder = {'=': _nbo}
|
||||
|
||||
def _commastring(astr):
|
||||
startindex = 0
|
||||
result = []
|
||||
islist = False
|
||||
while startindex < len(astr):
|
||||
mo = format_re.match(astr, pos=startindex)
|
||||
try:
|
||||
(order1, repeats, order2, dtype) = mo.groups()
|
||||
except (TypeError, AttributeError):
|
||||
raise ValueError(
|
||||
f'format number {len(result)+1} of "{astr}" is not recognized'
|
||||
) from None
|
||||
startindex = mo.end()
|
||||
# Separator or ending padding
|
||||
if startindex < len(astr):
|
||||
if space_re.match(astr, pos=startindex):
|
||||
startindex = len(astr)
|
||||
else:
|
||||
mo = sep_re.match(astr, pos=startindex)
|
||||
if not mo:
|
||||
raise ValueError(
|
||||
'format number %d of "%s" is not recognized' %
|
||||
(len(result)+1, astr))
|
||||
startindex = mo.end()
|
||||
islist = True
|
||||
|
||||
if order2 == '':
|
||||
order = order1
|
||||
elif order1 == '':
|
||||
order = order2
|
||||
else:
|
||||
order1 = _convorder.get(order1, order1)
|
||||
order2 = _convorder.get(order2, order2)
|
||||
if (order1 != order2):
|
||||
raise ValueError(
|
||||
'inconsistent byte-order specification %s and %s' %
|
||||
(order1, order2))
|
||||
order = order1
|
||||
|
||||
if order in ('|', '=', _nbo):
|
||||
order = ''
|
||||
dtype = order + dtype
|
||||
if repeats == '':
|
||||
newitem = dtype
|
||||
else:
|
||||
if (repeats[0] == "(" and repeats[-1] == ")"
|
||||
and repeats[1:-1].strip() != ""
|
||||
and "," not in repeats):
|
||||
warnings.warn(
|
||||
'Passing in a parenthesized single number for repeats '
|
||||
'is deprecated; pass either a single number or indicate '
|
||||
'a tuple with a comma, like "(2,)".', DeprecationWarning,
|
||||
stacklevel=2)
|
||||
newitem = (dtype, ast.literal_eval(repeats))
|
||||
|
||||
result.append(newitem)
|
||||
|
||||
return result if islist else result[0]
|
||||
|
||||
class dummy_ctype:
|
||||
|
||||
def __init__(self, cls):
|
||||
self._cls = cls
|
||||
|
||||
def __mul__(self, other):
|
||||
return self
|
||||
|
||||
def __call__(self, *other):
|
||||
return self._cls(other)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._cls == other._cls
|
||||
|
||||
def __ne__(self, other):
|
||||
return self._cls != other._cls
|
||||
|
||||
def _getintp_ctype():
|
||||
val = _getintp_ctype.cache
|
||||
if val is not None:
|
||||
return val
|
||||
if ctypes is None:
|
||||
import numpy as np
|
||||
val = dummy_ctype(np.intp)
|
||||
else:
|
||||
char = dtype('n').char
|
||||
if char == 'i':
|
||||
val = ctypes.c_int
|
||||
elif char == 'l':
|
||||
val = ctypes.c_long
|
||||
elif char == 'q':
|
||||
val = ctypes.c_longlong
|
||||
else:
|
||||
val = ctypes.c_long
|
||||
_getintp_ctype.cache = val
|
||||
return val
|
||||
|
||||
|
||||
_getintp_ctype.cache = None
|
||||
|
||||
# Used for .ctypes attribute of ndarray
|
||||
|
||||
class _missing_ctypes:
|
||||
def cast(self, num, obj):
|
||||
return num.value
|
||||
|
||||
class c_void_p:
|
||||
def __init__(self, ptr):
|
||||
self.value = ptr
|
||||
|
||||
|
||||
class _ctypes:
|
||||
def __init__(self, array, ptr=None):
|
||||
self._arr = array
|
||||
|
||||
if ctypes:
|
||||
self._ctypes = ctypes
|
||||
self._data = self._ctypes.c_void_p(ptr)
|
||||
else:
|
||||
# fake a pointer-like object that holds onto the reference
|
||||
self._ctypes = _missing_ctypes()
|
||||
self._data = self._ctypes.c_void_p(ptr)
|
||||
self._data._objects = array
|
||||
|
||||
if self._arr.ndim == 0:
|
||||
self._zerod = True
|
||||
else:
|
||||
self._zerod = False
|
||||
|
||||
def data_as(self, obj):
|
||||
"""
|
||||
Return the data pointer cast to a particular c-types object.
|
||||
For example, calling ``self._as_parameter_`` is equivalent to
|
||||
``self.data_as(ctypes.c_void_p)``. Perhaps you want to use
|
||||
the data as a pointer to a ctypes array of floating-point data:
|
||||
``self.data_as(ctypes.POINTER(ctypes.c_double))``.
|
||||
|
||||
The returned pointer will keep a reference to the array.
|
||||
"""
|
||||
# _ctypes.cast function causes a circular reference of self._data in
|
||||
# self._data._objects. Attributes of self._data cannot be released
|
||||
# until gc.collect is called. Make a copy of the pointer first then
|
||||
# let it hold the array reference. This is a workaround to circumvent
|
||||
# the CPython bug https://bugs.python.org/issue12836.
|
||||
ptr = self._ctypes.cast(self._data, obj)
|
||||
ptr._arr = self._arr
|
||||
return ptr
|
||||
|
||||
def shape_as(self, obj):
|
||||
"""
|
||||
Return the shape tuple as an array of some other c-types
|
||||
type. For example: ``self.shape_as(ctypes.c_short)``.
|
||||
"""
|
||||
if self._zerod:
|
||||
return None
|
||||
return (obj*self._arr.ndim)(*self._arr.shape)
|
||||
|
||||
def strides_as(self, obj):
|
||||
"""
|
||||
Return the strides tuple as an array of some other
|
||||
c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
|
||||
"""
|
||||
if self._zerod:
|
||||
return None
|
||||
return (obj*self._arr.ndim)(*self._arr.strides)
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
"""
|
||||
A pointer to the memory area of the array as a Python integer.
|
||||
This memory area may contain data that is not aligned, or not in
|
||||
correct byte-order. The memory area may not even be writeable.
|
||||
The array flags and data-type of this array should be respected
|
||||
when passing this attribute to arbitrary C-code to avoid trouble
|
||||
that can include Python crashing. User Beware! The value of this
|
||||
attribute is exactly the same as:
|
||||
``self._array_interface_['data'][0]``.
|
||||
|
||||
Note that unlike ``data_as``, a reference won't be kept to the array:
|
||||
code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
|
||||
pointer to a deallocated array, and should be spelt
|
||||
``(a + b).ctypes.data_as(ctypes.c_void_p)``
|
||||
"""
|
||||
return self._data.value
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
"""
|
||||
(c_intp*self.ndim): A ctypes array of length self.ndim where
|
||||
the basetype is the C-integer corresponding to ``dtype('p')`` on this
|
||||
platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
|
||||
`ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
|
||||
the platform. The ctypes array contains the shape of
|
||||
the underlying array.
|
||||
"""
|
||||
return self.shape_as(_getintp_ctype())
|
||||
|
||||
@property
|
||||
def strides(self):
|
||||
"""
|
||||
(c_intp*self.ndim): A ctypes array of length self.ndim where
|
||||
the basetype is the same as for the shape attribute. This ctypes
|
||||
array contains the strides information from the underlying array.
|
||||
This strides information is important for showing how many bytes
|
||||
must be jumped to get to the next element in the array.
|
||||
"""
|
||||
return self.strides_as(_getintp_ctype())
|
||||
|
||||
@property
|
||||
def _as_parameter_(self):
|
||||
"""
|
||||
Overrides the ctypes semi-magic method
|
||||
|
||||
Enables `c_func(some_array.ctypes)`
|
||||
"""
|
||||
return self.data_as(ctypes.c_void_p)
|
||||
|
||||
# Numpy 1.21.0, 2021-05-18
|
||||
|
||||
def get_data(self):
|
||||
"""Deprecated getter for the `_ctypes.data` property.
|
||||
|
||||
.. deprecated:: 1.21
|
||||
"""
|
||||
warnings.warn('"get_data" is deprecated. Use "data" instead',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
return self.data
|
||||
|
||||
def get_shape(self):
|
||||
"""Deprecated getter for the `_ctypes.shape` property.
|
||||
|
||||
.. deprecated:: 1.21
|
||||
"""
|
||||
warnings.warn('"get_shape" is deprecated. Use "shape" instead',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
return self.shape
|
||||
|
||||
def get_strides(self):
|
||||
"""Deprecated getter for the `_ctypes.strides` property.
|
||||
|
||||
.. deprecated:: 1.21
|
||||
"""
|
||||
warnings.warn('"get_strides" is deprecated. Use "strides" instead',
|
||||
DeprecationWarning, stacklevel=2)
|
||||
return self.strides
|
||||
|
||||
def get_as_parameter(self):
|
||||
"""Deprecated getter for the `_ctypes._as_parameter_` property.
|
||||
|
||||
.. deprecated:: 1.21
|
||||
"""
|
||||
warnings.warn(
|
||||
'"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
|
||||
DeprecationWarning, stacklevel=2,
|
||||
)
|
||||
return self._as_parameter_
|
||||
|
||||
|
||||
def _newnames(datatype, order):
|
||||
"""
|
||||
Given a datatype and an order object, return a new names tuple, with the
|
||||
order indicated
|
||||
"""
|
||||
oldnames = datatype.names
|
||||
nameslist = list(oldnames)
|
||||
if isinstance(order, str):
|
||||
order = [order]
|
||||
seen = set()
|
||||
if isinstance(order, (list, tuple)):
|
||||
for name in order:
|
||||
try:
|
||||
nameslist.remove(name)
|
||||
except ValueError:
|
||||
if name in seen:
|
||||
raise ValueError(f"duplicate field name: {name}") from None
|
||||
else:
|
||||
raise ValueError(f"unknown field name: {name}") from None
|
||||
seen.add(name)
|
||||
return tuple(list(order) + nameslist)
|
||||
raise ValueError(f"unsupported order value: {order}")
|
||||
|
||||
def _copy_fields(ary):
|
||||
"""Return copy of structured array with padding between fields removed.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ary : ndarray
|
||||
Structured array from which to remove padding bytes
|
||||
|
||||
Returns
|
||||
-------
|
||||
ary_copy : ndarray
|
||||
Copy of ary with padding bytes removed
|
||||
"""
|
||||
dt = ary.dtype
|
||||
copy_dtype = {'names': dt.names,
|
||||
'formats': [dt.fields[name][0] for name in dt.names]}
|
||||
return array(ary, dtype=copy_dtype, copy=True)
|
||||
|
||||
def _promote_fields(dt1, dt2):
|
||||
""" Perform type promotion for two structured dtypes.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dt1 : structured dtype
|
||||
First dtype.
|
||||
dt2 : structured dtype
|
||||
Second dtype.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : dtype
|
||||
The promoted dtype
|
||||
|
||||
Notes
|
||||
-----
|
||||
If one of the inputs is aligned, the result will be. The titles of
|
||||
both descriptors must match (point to the same field).
|
||||
"""
|
||||
# Both must be structured and have the same names in the same order
|
||||
if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
|
||||
raise DTypePromotionError(
|
||||
f"field names `{dt1.names}` and `{dt2.names}` mismatch.")
|
||||
|
||||
# if both are identical, we can (maybe!) just return the same dtype.
|
||||
identical = dt1 is dt2
|
||||
new_fields = []
|
||||
for name in dt1.names:
|
||||
field1 = dt1.fields[name]
|
||||
field2 = dt2.fields[name]
|
||||
new_descr = promote_types(field1[0], field2[0])
|
||||
identical = identical and new_descr is field1[0]
|
||||
|
||||
# Check that the titles match (if given):
|
||||
if field1[2:] != field2[2:]:
|
||||
raise DTypePromotionError(
|
||||
f"field titles of field '{name}' mismatch")
|
||||
if len(field1) == 2:
|
||||
new_fields.append((name, new_descr))
|
||||
else:
|
||||
new_fields.append(((field1[2], name), new_descr))
|
||||
|
||||
res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
|
||||
|
||||
# Might as well preserve identity (and metadata) if the dtype is identical
|
||||
# and the itemsize, offsets are also unmodified. This could probably be
|
||||
# sped up, but also probably just be removed entirely.
|
||||
if identical and res.itemsize == dt1.itemsize:
|
||||
for name in dt1.names:
|
||||
if dt1.fields[name][1] != res.fields[name][1]:
|
||||
return res # the dtype changed.
|
||||
return dt1
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def _getfield_is_safe(oldtype, newtype, offset):
|
||||
""" Checks safety of getfield for object arrays.
|
||||
|
||||
As in _view_is_safe, we need to check that memory containing objects is not
|
||||
reinterpreted as a non-object datatype and vice versa.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
oldtype : data-type
|
||||
Data type of the original ndarray.
|
||||
newtype : data-type
|
||||
Data type of the field being accessed by ndarray.getfield
|
||||
offset : int
|
||||
Offset of the field being accessed by ndarray.getfield
|
||||
|
||||
Raises
|
||||
------
|
||||
TypeError
|
||||
If the field access is invalid
|
||||
|
||||
"""
|
||||
if newtype.hasobject or oldtype.hasobject:
|
||||
if offset == 0 and newtype == oldtype:
|
||||
return
|
||||
if oldtype.names is not None:
|
||||
for name in oldtype.names:
|
||||
if (oldtype.fields[name][1] == offset and
|
||||
oldtype.fields[name][0] == newtype):
|
||||
return
|
||||
raise TypeError("Cannot get/set field of an object array")
|
||||
return
|
||||
|
||||
def _view_is_safe(oldtype, newtype):
|
||||
""" Checks safety of a view involving object arrays, for example when
|
||||
doing::
|
||||
|
||||
np.zeros(10, dtype=oldtype).view(newtype)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
oldtype : data-type
|
||||
Data type of original ndarray
|
||||
newtype : data-type
|
||||
Data type of the view
|
||||
|
||||
Raises
|
||||
------
|
||||
TypeError
|
||||
If the new type is incompatible with the old type.
|
||||
|
||||
"""
|
||||
|
||||
# if the types are equivalent, there is no problem.
|
||||
# for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
|
||||
if oldtype == newtype:
|
||||
return
|
||||
|
||||
if newtype.hasobject or oldtype.hasobject:
|
||||
raise TypeError("Cannot change data-type for array of references.")
|
||||
return
|
||||
|
||||
|
||||
# Given a string containing a PEP 3118 format specifier,
|
||||
# construct a NumPy dtype
|
||||
|
||||
_pep3118_native_map = {
|
||||
'?': '?',
|
||||
'c': 'S1',
|
||||
'b': 'b',
|
||||
'B': 'B',
|
||||
'h': 'h',
|
||||
'H': 'H',
|
||||
'i': 'i',
|
||||
'I': 'I',
|
||||
'l': 'l',
|
||||
'L': 'L',
|
||||
'q': 'q',
|
||||
'Q': 'Q',
|
||||
'e': 'e',
|
||||
'f': 'f',
|
||||
'd': 'd',
|
||||
'g': 'g',
|
||||
'Zf': 'F',
|
||||
'Zd': 'D',
|
||||
'Zg': 'G',
|
||||
's': 'S',
|
||||
'w': 'U',
|
||||
'O': 'O',
|
||||
'x': 'V', # padding
|
||||
}
|
||||
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
|
||||
|
||||
_pep3118_standard_map = {
|
||||
'?': '?',
|
||||
'c': 'S1',
|
||||
'b': 'b',
|
||||
'B': 'B',
|
||||
'h': 'i2',
|
||||
'H': 'u2',
|
||||
'i': 'i4',
|
||||
'I': 'u4',
|
||||
'l': 'i4',
|
||||
'L': 'u4',
|
||||
'q': 'i8',
|
||||
'Q': 'u8',
|
||||
'e': 'f2',
|
||||
'f': 'f',
|
||||
'd': 'd',
|
||||
'Zf': 'F',
|
||||
'Zd': 'D',
|
||||
's': 'S',
|
||||
'w': 'U',
|
||||
'O': 'O',
|
||||
'x': 'V', # padding
|
||||
}
|
||||
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
|
||||
|
||||
_pep3118_unsupported_map = {
|
||||
'u': 'UCS-2 strings',
|
||||
'&': 'pointers',
|
||||
't': 'bitfields',
|
||||
'X': 'function pointers',
|
||||
}
|
||||
|
||||
class _Stream:
|
||||
def __init__(self, s):
|
||||
self.s = s
|
||||
self.byteorder = '@'
|
||||
|
||||
def advance(self, n):
|
||||
res = self.s[:n]
|
||||
self.s = self.s[n:]
|
||||
return res
|
||||
|
||||
def consume(self, c):
|
||||
if self.s[:len(c)] == c:
|
||||
self.advance(len(c))
|
||||
return True
|
||||
return False
|
||||
|
||||
def consume_until(self, c):
|
||||
if callable(c):
|
||||
i = 0
|
||||
while i < len(self.s) and not c(self.s[i]):
|
||||
i = i + 1
|
||||
return self.advance(i)
|
||||
else:
|
||||
i = self.s.index(c)
|
||||
res = self.advance(i)
|
||||
self.advance(len(c))
|
||||
return res
|
||||
|
||||
@property
|
||||
def next(self):
|
||||
return self.s[0]
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.s)
|
||||
|
||||
|
||||
def _dtype_from_pep3118(spec):
|
||||
stream = _Stream(spec)
|
||||
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
|
||||
return dtype
|
||||
|
||||
def __dtype_from_pep3118(stream, is_subdtype):
|
||||
field_spec = dict(
|
||||
names=[],
|
||||
formats=[],
|
||||
offsets=[],
|
||||
itemsize=0
|
||||
)
|
||||
offset = 0
|
||||
common_alignment = 1
|
||||
is_padding = False
|
||||
|
||||
# Parse spec
|
||||
while stream:
|
||||
value = None
|
||||
|
||||
# End of structure, bail out to upper level
|
||||
if stream.consume('}'):
|
||||
break
|
||||
|
||||
# Sub-arrays (1)
|
||||
shape = None
|
||||
if stream.consume('('):
|
||||
shape = stream.consume_until(')')
|
||||
shape = tuple(map(int, shape.split(',')))
|
||||
|
||||
# Byte order
|
||||
if stream.next in ('@', '=', '<', '>', '^', '!'):
|
||||
byteorder = stream.advance(1)
|
||||
if byteorder == '!':
|
||||
byteorder = '>'
|
||||
stream.byteorder = byteorder
|
||||
|
||||
# Byte order characters also control native vs. standard type sizes
|
||||
if stream.byteorder in ('@', '^'):
|
||||
type_map = _pep3118_native_map
|
||||
type_map_chars = _pep3118_native_typechars
|
||||
else:
|
||||
type_map = _pep3118_standard_map
|
||||
type_map_chars = _pep3118_standard_typechars
|
||||
|
||||
# Item sizes
|
||||
itemsize_str = stream.consume_until(lambda c: not c.isdigit())
|
||||
if itemsize_str:
|
||||
itemsize = int(itemsize_str)
|
||||
else:
|
||||
itemsize = 1
|
||||
|
||||
# Data types
|
||||
is_padding = False
|
||||
|
||||
if stream.consume('T{'):
|
||||
value, align = __dtype_from_pep3118(
|
||||
stream, is_subdtype=True)
|
||||
elif stream.next in type_map_chars:
|
||||
if stream.next == 'Z':
|
||||
typechar = stream.advance(2)
|
||||
else:
|
||||
typechar = stream.advance(1)
|
||||
|
||||
is_padding = (typechar == 'x')
|
||||
dtypechar = type_map[typechar]
|
||||
if dtypechar in 'USV':
|
||||
dtypechar += '%d' % itemsize
|
||||
itemsize = 1
|
||||
numpy_byteorder = {'@': '=', '^': '='}.get(
|
||||
stream.byteorder, stream.byteorder)
|
||||
value = dtype(numpy_byteorder + dtypechar)
|
||||
align = value.alignment
|
||||
elif stream.next in _pep3118_unsupported_map:
|
||||
desc = _pep3118_unsupported_map[stream.next]
|
||||
raise NotImplementedError(
|
||||
"Unrepresentable PEP 3118 data type {!r} ({})"
|
||||
.format(stream.next, desc))
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unknown PEP 3118 data type specifier %r" % stream.s
|
||||
)
|
||||
|
||||
#
|
||||
# Native alignment may require padding
|
||||
#
|
||||
# Here we assume that the presence of a '@' character implicitly
|
||||
# implies that the start of the array is *already* aligned.
|
||||
#
|
||||
extra_offset = 0
|
||||
if stream.byteorder == '@':
|
||||
start_padding = (-offset) % align
|
||||
intra_padding = (-value.itemsize) % align
|
||||
|
||||
offset += start_padding
|
||||
|
||||
if intra_padding != 0:
|
||||
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
|
||||
# Inject internal padding to the end of the sub-item
|
||||
value = _add_trailing_padding(value, intra_padding)
|
||||
else:
|
||||
# We can postpone the injection of internal padding,
|
||||
# as the item appears at most once
|
||||
extra_offset += intra_padding
|
||||
|
||||
# Update common alignment
|
||||
common_alignment = _lcm(align, common_alignment)
|
||||
|
||||
# Convert itemsize to sub-array
|
||||
if itemsize != 1:
|
||||
value = dtype((value, (itemsize,)))
|
||||
|
||||
# Sub-arrays (2)
|
||||
if shape is not None:
|
||||
value = dtype((value, shape))
|
||||
|
||||
# Field name
|
||||
if stream.consume(':'):
|
||||
name = stream.consume_until(':')
|
||||
else:
|
||||
name = None
|
||||
|
||||
if not (is_padding and name is None):
|
||||
if name is not None and name in field_spec['names']:
|
||||
raise RuntimeError(
|
||||
f"Duplicate field name '{name}' in PEP3118 format"
|
||||
)
|
||||
field_spec['names'].append(name)
|
||||
field_spec['formats'].append(value)
|
||||
field_spec['offsets'].append(offset)
|
||||
|
||||
offset += value.itemsize
|
||||
offset += extra_offset
|
||||
|
||||
field_spec['itemsize'] = offset
|
||||
|
||||
# extra final padding for aligned types
|
||||
if stream.byteorder == '@':
|
||||
field_spec['itemsize'] += (-offset) % common_alignment
|
||||
|
||||
# Check if this was a simple 1-item type, and unwrap it
|
||||
if (field_spec['names'] == [None]
|
||||
and field_spec['offsets'][0] == 0
|
||||
and field_spec['itemsize'] == field_spec['formats'][0].itemsize
|
||||
and not is_subdtype):
|
||||
ret = field_spec['formats'][0]
|
||||
else:
|
||||
_fix_names(field_spec)
|
||||
ret = dtype(field_spec)
|
||||
|
||||
# Finished
|
||||
return ret, common_alignment
|
||||
|
||||
def _fix_names(field_spec):
|
||||
""" Replace names which are None with the next unused f%d name """
|
||||
names = field_spec['names']
|
||||
for i, name in enumerate(names):
|
||||
if name is not None:
|
||||
continue
|
||||
|
||||
j = 0
|
||||
while True:
|
||||
name = f'f{j}'
|
||||
if name not in names:
|
||||
break
|
||||
j = j + 1
|
||||
names[i] = name
|
||||
|
||||
def _add_trailing_padding(value, padding):
|
||||
"""Inject the specified number of padding bytes at the end of a dtype"""
|
||||
if value.fields is None:
|
||||
field_spec = dict(
|
||||
names=['f0'],
|
||||
formats=[value],
|
||||
offsets=[0],
|
||||
itemsize=value.itemsize
|
||||
)
|
||||
else:
|
||||
fields = value.fields
|
||||
names = value.names
|
||||
field_spec = dict(
|
||||
names=names,
|
||||
formats=[fields[name][0] for name in names],
|
||||
offsets=[fields[name][1] for name in names],
|
||||
itemsize=value.itemsize
|
||||
)
|
||||
|
||||
field_spec['itemsize'] += padding
|
||||
return dtype(field_spec)
|
||||
|
||||
def _prod(a):
|
||||
p = 1
|
||||
for x in a:
|
||||
p *= x
|
||||
return p
|
||||
|
||||
def _gcd(a, b):
|
||||
"""Calculate the greatest common divisor of a and b"""
|
||||
if not (math.isfinite(a) and math.isfinite(b)):
|
||||
raise ValueError('Can only find greatest common divisor of '
|
||||
f'finite arguments, found "{a}" and "{b}"')
|
||||
while b:
|
||||
a, b = b, a % b
|
||||
return a
|
||||
|
||||
def _lcm(a, b):
|
||||
return a // _gcd(a, b) * b
|
||||
|
||||
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
|
||||
""" Format the error message for when __array_ufunc__ gives up. """
|
||||
args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
|
||||
['{}={!r}'.format(k, v)
|
||||
for k, v in kwargs.items()])
|
||||
args = inputs + kwargs.get('out', ())
|
||||
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
|
||||
return ('operand type(s) all returned NotImplemented from '
|
||||
'__array_ufunc__({!r}, {!r}, {}): {}'
|
||||
.format(ufunc, method, args_string, types_string))
|
||||
|
||||
|
||||
def array_function_errmsg_formatter(public_api, types):
|
||||
""" Format the error message for when __array_ufunc__ gives up. """
|
||||
func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
|
||||
return ("no implementation found for '{}' on types that implement "
|
||||
'__array_function__: {}'.format(func_name, list(types)))
|
||||
|
||||
|
||||
def _ufunc_doc_signature_formatter(ufunc):
|
||||
"""
|
||||
Builds a signature string which resembles PEP 457
|
||||
|
||||
This is used to construct the first line of the docstring
|
||||
"""
|
||||
|
||||
# input arguments are simple
|
||||
if ufunc.nin == 1:
|
||||
in_args = 'x'
|
||||
else:
|
||||
in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
|
||||
|
||||
# output arguments are both keyword or positional
|
||||
if ufunc.nout == 0:
|
||||
out_args = ', /, out=()'
|
||||
elif ufunc.nout == 1:
|
||||
out_args = ', /, out=None'
|
||||
else:
|
||||
out_args = '[, {positional}], / [, out={default}]'.format(
|
||||
positional=', '.join(
|
||||
'out{}'.format(i+1) for i in range(ufunc.nout)),
|
||||
default=repr((None,)*ufunc.nout)
|
||||
)
|
||||
|
||||
# keyword only args depend on whether this is a gufunc
|
||||
kwargs = (
|
||||
", casting='same_kind'"
|
||||
", order='K'"
|
||||
", dtype=None"
|
||||
", subok=True"
|
||||
)
|
||||
|
||||
# NOTE: gufuncs may or may not support the `axis` parameter
|
||||
if ufunc.signature is None:
|
||||
kwargs = f", where=True{kwargs}[, signature]"
|
||||
else:
|
||||
kwargs += "[, signature, axes, axis]"
|
||||
|
||||
# join all the parts together
|
||||
return '{name}({in_args}{out_args}, *{kwargs})'.format(
|
||||
name=ufunc.__name__,
|
||||
in_args=in_args,
|
||||
out_args=out_args,
|
||||
kwargs=kwargs
|
||||
)
|
||||
|
||||
|
||||
def npy_ctypes_check(cls):
|
||||
# determine if a class comes from ctypes, in order to work around
|
||||
# a bug in the buffer protocol for those objects, bpo-10746
|
||||
try:
|
||||
# ctypes class are new-style, so have an __mro__. This probably fails
|
||||
# for ctypes classes with multiple inheritance.
|
||||
if IS_PYPY:
|
||||
# (..., _ctypes.basics._CData, Bufferable, object)
|
||||
ctype_base = cls.__mro__[-3]
|
||||
else:
|
||||
# # (..., _ctypes._CData, object)
|
||||
ctype_base = cls.__mro__[-2]
|
||||
# right now, they're part of the _ctypes module
|
||||
return '_ctypes' in ctype_base.__module__
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# used to handle the _NoValue default argument for na_object
|
||||
# in the C implementation of the __reduce__ method for stringdtype
|
||||
def _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue):
|
||||
if na_object is _NoValue:
|
||||
return StringDType(coerce=coerce)
|
||||
return StringDType(coerce=coerce, na_object=na_object)
|
||||
72
env/Lib/site-packages/numpy/_core/_internal.pyi
vendored
Normal file
72
env/Lib/site-packages/numpy/_core/_internal.pyi
vendored
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
import ctypes as ct
|
||||
import re
|
||||
from collections.abc import Callable, Iterable
|
||||
from typing import Any, Final, Generic, overload
|
||||
|
||||
from typing_extensions import Self, TypeVar, deprecated
|
||||
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
from numpy.ctypeslib import c_intp
|
||||
|
||||
_CastT = TypeVar("_CastT", bound=ct._CanCastTo)
|
||||
_T_co = TypeVar("_T_co", covariant=True)
|
||||
_CT = TypeVar("_CT", bound=ct._CData)
|
||||
_PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True)
|
||||
|
||||
###
|
||||
|
||||
IS_PYPY: Final[bool] = ...
|
||||
|
||||
format_re: Final[re.Pattern[str]] = ...
|
||||
sep_re: Final[re.Pattern[str]] = ...
|
||||
space_re: Final[re.Pattern[str]] = ...
|
||||
|
||||
###
|
||||
|
||||
# TODO: Let the likes of `shape_as` and `strides_as` return `None`
|
||||
# for 0D arrays once we've got shape-support
|
||||
|
||||
class _ctypes(Generic[_PT_co]):
|
||||
@overload
|
||||
def __init__(self: _ctypes[None], /, array: npt.NDArray[Any], ptr: None = None) -> None: ...
|
||||
@overload
|
||||
def __init__(self, /, array: npt.NDArray[Any], ptr: _PT_co) -> None: ...
|
||||
|
||||
#
|
||||
@property
|
||||
def data(self) -> _PT_co: ...
|
||||
@property
|
||||
def shape(self) -> ct.Array[c_intp]: ...
|
||||
@property
|
||||
def strides(self) -> ct.Array[c_intp]: ...
|
||||
@property
|
||||
def _as_parameter_(self) -> ct.c_void_p: ...
|
||||
|
||||
#
|
||||
def data_as(self, /, obj: type[_CastT]) -> _CastT: ...
|
||||
def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ...
|
||||
def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ...
|
||||
|
||||
#
|
||||
@deprecated('"get_data" is deprecated. Use "data" instead')
|
||||
def get_data(self, /) -> _PT_co: ...
|
||||
@deprecated('"get_shape" is deprecated. Use "shape" instead')
|
||||
def get_shape(self, /) -> ct.Array[c_intp]: ...
|
||||
@deprecated('"get_strides" is deprecated. Use "strides" instead')
|
||||
def get_strides(self, /) -> ct.Array[c_intp]: ...
|
||||
@deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead')
|
||||
def get_as_parameter(self, /) -> ct.c_void_p: ...
|
||||
|
||||
class dummy_ctype(Generic[_T_co]):
|
||||
_cls: type[_T_co]
|
||||
|
||||
def __init__(self, /, cls: type[_T_co]) -> None: ...
|
||||
def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
|
||||
def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
|
||||
def __mul__(self, other: object, /) -> Self: ...
|
||||
def __call__(self, /, *other: object) -> _T_co: ...
|
||||
|
||||
def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ...
|
||||
def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ...
|
||||
def npy_ctypes_check(cls: type) -> bool: ...
|
||||
356
env/Lib/site-packages/numpy/_core/_machar.py
vendored
Normal file
356
env/Lib/site-packages/numpy/_core/_machar.py
vendored
Normal file
|
|
@ -0,0 +1,356 @@
|
|||
"""
|
||||
Machine arithmetic - determine the parameters of the
|
||||
floating-point arithmetic system
|
||||
|
||||
Author: Pearu Peterson, September 2003
|
||||
|
||||
"""
|
||||
__all__ = ['MachAr']
|
||||
|
||||
from .fromnumeric import any
|
||||
from ._ufunc_config import errstate
|
||||
from .._utils import set_module
|
||||
|
||||
# Need to speed this up...especially for longdouble
|
||||
|
||||
# Deprecated 2021-10-20, NumPy 1.22
|
||||
class MachAr:
|
||||
"""
|
||||
Diagnosing machine parameters.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
ibeta : int
|
||||
Radix in which numbers are represented.
|
||||
it : int
|
||||
Number of base-`ibeta` digits in the floating point mantissa M.
|
||||
machep : int
|
||||
Exponent of the smallest (most negative) power of `ibeta` that,
|
||||
added to 1.0, gives something different from 1.0
|
||||
eps : float
|
||||
Floating-point number ``beta**machep`` (floating point precision)
|
||||
negep : int
|
||||
Exponent of the smallest power of `ibeta` that, subtracted
|
||||
from 1.0, gives something different from 1.0.
|
||||
epsneg : float
|
||||
Floating-point number ``beta**negep``.
|
||||
iexp : int
|
||||
Number of bits in the exponent (including its sign and bias).
|
||||
minexp : int
|
||||
Smallest (most negative) power of `ibeta` consistent with there
|
||||
being no leading zeros in the mantissa.
|
||||
xmin : float
|
||||
Floating-point number ``beta**minexp`` (the smallest [in
|
||||
magnitude] positive floating point number with full precision).
|
||||
maxexp : int
|
||||
Smallest (positive) power of `ibeta` that causes overflow.
|
||||
xmax : float
|
||||
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
|
||||
usable floating value).
|
||||
irnd : int
|
||||
In ``range(6)``, information on what kind of rounding is done
|
||||
in addition, and on how underflow is handled.
|
||||
ngrd : int
|
||||
Number of 'guard digits' used when truncating the product
|
||||
of two mantissas to fit the representation.
|
||||
epsilon : float
|
||||
Same as `eps`.
|
||||
tiny : float
|
||||
An alias for `smallest_normal`, kept for backwards compatibility.
|
||||
huge : float
|
||||
Same as `xmax`.
|
||||
precision : float
|
||||
``- int(-log10(eps))``
|
||||
resolution : float
|
||||
``- 10**(-precision)``
|
||||
smallest_normal : float
|
||||
The smallest positive floating point number with 1 as leading bit in
|
||||
the mantissa following IEEE-754. Same as `xmin`.
|
||||
smallest_subnormal : float
|
||||
The smallest positive floating point number with 0 as leading bit in
|
||||
the mantissa following IEEE-754.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
float_conv : function, optional
|
||||
Function that converts an integer or integer array to a float
|
||||
or float array. Default is `float`.
|
||||
int_conv : function, optional
|
||||
Function that converts a float or float array to an integer or
|
||||
integer array. Default is `int`.
|
||||
float_to_float : function, optional
|
||||
Function that converts a float array to float. Default is `float`.
|
||||
Note that this does not seem to do anything useful in the current
|
||||
implementation.
|
||||
float_to_str : function, optional
|
||||
Function that converts a single float to a string. Default is
|
||||
``lambda v:'%24.16e' %v``.
|
||||
title : str, optional
|
||||
Title that is printed in the string representation of `MachAr`.
|
||||
|
||||
See Also
|
||||
--------
|
||||
finfo : Machine limits for floating point types.
|
||||
iinfo : Machine limits for integer types.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Press, Teukolsky, Vetterling and Flannery,
|
||||
"Numerical Recipes in C++," 2nd ed,
|
||||
Cambridge University Press, 2002, p. 31.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, float_conv=float,int_conv=int,
|
||||
float_to_float=float,
|
||||
float_to_str=lambda v:'%24.16e' % v,
|
||||
title='Python floating point number'):
|
||||
"""
|
||||
|
||||
float_conv - convert integer to float (array)
|
||||
int_conv - convert float (array) to integer
|
||||
float_to_float - convert float array to float
|
||||
float_to_str - convert array float to str
|
||||
title - description of used floating point numbers
|
||||
|
||||
"""
|
||||
# We ignore all errors here because we are purposely triggering
|
||||
# underflow to detect the properties of the running arch.
|
||||
with errstate(under='ignore'):
|
||||
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
|
||||
|
||||
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
|
||||
max_iterN = 10000
|
||||
msg = "Did not converge after %d tries with %s"
|
||||
one = float_conv(1)
|
||||
two = one + one
|
||||
zero = one - one
|
||||
|
||||
# Do we really need to do this? Aren't they 2 and 2.0?
|
||||
# Determine ibeta and beta
|
||||
a = one
|
||||
for _ in range(max_iterN):
|
||||
a = a + a
|
||||
temp = a + one
|
||||
temp1 = temp - a
|
||||
if any(temp1 - one != zero):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
b = one
|
||||
for _ in range(max_iterN):
|
||||
b = b + b
|
||||
temp = a + b
|
||||
itemp = int_conv(temp-a)
|
||||
if any(itemp != 0):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
ibeta = itemp
|
||||
beta = float_conv(ibeta)
|
||||
|
||||
# Determine it and irnd
|
||||
it = -1
|
||||
b = one
|
||||
for _ in range(max_iterN):
|
||||
it = it + 1
|
||||
b = b * beta
|
||||
temp = b + one
|
||||
temp1 = temp - b
|
||||
if any(temp1 - one != zero):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
|
||||
betah = beta / two
|
||||
a = one
|
||||
for _ in range(max_iterN):
|
||||
a = a + a
|
||||
temp = a + one
|
||||
temp1 = temp - a
|
||||
if any(temp1 - one != zero):
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
temp = a + betah
|
||||
irnd = 0
|
||||
if any(temp-a != zero):
|
||||
irnd = 1
|
||||
tempa = a + beta
|
||||
temp = tempa + betah
|
||||
if irnd == 0 and any(temp-tempa != zero):
|
||||
irnd = 2
|
||||
|
||||
# Determine negep and epsneg
|
||||
negep = it + 3
|
||||
betain = one / beta
|
||||
a = one
|
||||
for i in range(negep):
|
||||
a = a * betain
|
||||
b = a
|
||||
for _ in range(max_iterN):
|
||||
temp = one - a
|
||||
if any(temp-one != zero):
|
||||
break
|
||||
a = a * beta
|
||||
negep = negep - 1
|
||||
# Prevent infinite loop on PPC with gcc 4.0:
|
||||
if negep < 0:
|
||||
raise RuntimeError("could not determine machine tolerance "
|
||||
"for 'negep', locals() -> %s" % (locals()))
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
negep = -negep
|
||||
epsneg = a
|
||||
|
||||
# Determine machep and eps
|
||||
machep = - it - 3
|
||||
a = b
|
||||
|
||||
for _ in range(max_iterN):
|
||||
temp = one + a
|
||||
if any(temp-one != zero):
|
||||
break
|
||||
a = a * beta
|
||||
machep = machep + 1
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
eps = a
|
||||
|
||||
# Determine ngrd
|
||||
ngrd = 0
|
||||
temp = one + eps
|
||||
if irnd == 0 and any(temp*one - one != zero):
|
||||
ngrd = 1
|
||||
|
||||
# Determine iexp
|
||||
i = 0
|
||||
k = 1
|
||||
z = betain
|
||||
t = one + eps
|
||||
nxres = 0
|
||||
for _ in range(max_iterN):
|
||||
y = z
|
||||
z = y*y
|
||||
a = z*one # Check here for underflow
|
||||
temp = z*t
|
||||
if any(a+a == zero) or any(abs(z) >= y):
|
||||
break
|
||||
temp1 = temp * betain
|
||||
if any(temp1*beta == z):
|
||||
break
|
||||
i = i + 1
|
||||
k = k + k
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
if ibeta != 10:
|
||||
iexp = i + 1
|
||||
mx = k + k
|
||||
else:
|
||||
iexp = 2
|
||||
iz = ibeta
|
||||
while k >= iz:
|
||||
iz = iz * ibeta
|
||||
iexp = iexp + 1
|
||||
mx = iz + iz - 1
|
||||
|
||||
# Determine minexp and xmin
|
||||
for _ in range(max_iterN):
|
||||
xmin = y
|
||||
y = y * betain
|
||||
a = y * one
|
||||
temp = y * t
|
||||
if any((a + a) != zero) and any(abs(y) < xmin):
|
||||
k = k + 1
|
||||
temp1 = temp * betain
|
||||
if any(temp1*beta == y) and any(temp != y):
|
||||
nxres = 3
|
||||
xmin = y
|
||||
break
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(msg % (_, one.dtype))
|
||||
minexp = -k
|
||||
|
||||
# Determine maxexp, xmax
|
||||
if mx <= k + k - 3 and ibeta != 10:
|
||||
mx = mx + mx
|
||||
iexp = iexp + 1
|
||||
maxexp = mx + minexp
|
||||
irnd = irnd + nxres
|
||||
if irnd >= 2:
|
||||
maxexp = maxexp - 2
|
||||
i = maxexp + minexp
|
||||
if ibeta == 2 and not i:
|
||||
maxexp = maxexp - 1
|
||||
if i > 20:
|
||||
maxexp = maxexp - 1
|
||||
if any(a != y):
|
||||
maxexp = maxexp - 2
|
||||
xmax = one - epsneg
|
||||
if any(xmax*one != xmax):
|
||||
xmax = one - beta*epsneg
|
||||
xmax = xmax / (xmin*beta*beta*beta)
|
||||
i = maxexp + minexp + 3
|
||||
for j in range(i):
|
||||
if ibeta == 2:
|
||||
xmax = xmax + xmax
|
||||
else:
|
||||
xmax = xmax * beta
|
||||
|
||||
smallest_subnormal = abs(xmin / beta ** (it))
|
||||
|
||||
self.ibeta = ibeta
|
||||
self.it = it
|
||||
self.negep = negep
|
||||
self.epsneg = float_to_float(epsneg)
|
||||
self._str_epsneg = float_to_str(epsneg)
|
||||
self.machep = machep
|
||||
self.eps = float_to_float(eps)
|
||||
self._str_eps = float_to_str(eps)
|
||||
self.ngrd = ngrd
|
||||
self.iexp = iexp
|
||||
self.minexp = minexp
|
||||
self.xmin = float_to_float(xmin)
|
||||
self._str_xmin = float_to_str(xmin)
|
||||
self.maxexp = maxexp
|
||||
self.xmax = float_to_float(xmax)
|
||||
self._str_xmax = float_to_str(xmax)
|
||||
self.irnd = irnd
|
||||
|
||||
self.title = title
|
||||
# Commonly used parameters
|
||||
self.epsilon = self.eps
|
||||
self.tiny = self.xmin
|
||||
self.huge = self.xmax
|
||||
self.smallest_normal = self.xmin
|
||||
self._str_smallest_normal = float_to_str(self.xmin)
|
||||
self.smallest_subnormal = float_to_float(smallest_subnormal)
|
||||
self._str_smallest_subnormal = float_to_str(smallest_subnormal)
|
||||
|
||||
import math
|
||||
self.precision = int(-math.log10(float_to_float(self.eps)))
|
||||
ten = two + two + two + two + two
|
||||
resolution = ten ** (-self.precision)
|
||||
self.resolution = float_to_float(resolution)
|
||||
self._str_resolution = float_to_str(resolution)
|
||||
|
||||
def __str__(self):
|
||||
fmt = (
|
||||
'Machine parameters for %(title)s\n'
|
||||
'---------------------------------------------------------------------\n'
|
||||
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
|
||||
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
|
||||
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
|
||||
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
|
||||
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
|
||||
'smallest_normal=%(smallest_normal)s '
|
||||
'smallest_subnormal=%(smallest_subnormal)s\n'
|
||||
'---------------------------------------------------------------------\n'
|
||||
)
|
||||
return fmt % self.__dict__
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(MachAr())
|
||||
73
env/Lib/site-packages/numpy/_core/_machar.pyi
vendored
Normal file
73
env/Lib/site-packages/numpy/_core/_machar.pyi
vendored
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
from collections.abc import Iterable
|
||||
from typing import Any, Final, overload
|
||||
|
||||
from typing_extensions import TypeVar, Unpack
|
||||
|
||||
import numpy as np
|
||||
from numpy import _CastingKind
|
||||
from numpy._utils import set_module as set_module
|
||||
|
||||
###
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, Unpack[tuple[Any, ...]]])
|
||||
_ExceptionT = TypeVar("_ExceptionT", bound=Exception)
|
||||
|
||||
###
|
||||
|
||||
class UFuncTypeError(TypeError):
|
||||
ufunc: Final[np.ufunc]
|
||||
def __init__(self, /, ufunc: np.ufunc) -> None: ...
|
||||
|
||||
class _UFuncNoLoopError(UFuncTypeError):
|
||||
dtypes: tuple[np.dtype[Any], ...]
|
||||
def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ...
|
||||
|
||||
class _UFuncBinaryResolutionError(_UFuncNoLoopError):
|
||||
dtypes: tuple[np.dtype[Any], np.dtype[Any]]
|
||||
def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ...
|
||||
|
||||
class _UFuncCastingError(UFuncTypeError):
|
||||
casting: Final[_CastingKind]
|
||||
from_: Final[np.dtype[Any]]
|
||||
to: Final[np.dtype[Any]]
|
||||
def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype[Any], to: np.dtype[Any]) -> None: ...
|
||||
|
||||
class _UFuncInputCastingError(_UFuncCastingError):
|
||||
in_i: Final[int]
|
||||
def __init__(
|
||||
self,
|
||||
/,
|
||||
ufunc: np.ufunc,
|
||||
casting: _CastingKind,
|
||||
from_: np.dtype[Any],
|
||||
to: np.dtype[Any],
|
||||
i: int,
|
||||
) -> None: ...
|
||||
|
||||
class _UFuncOutputCastingError(_UFuncCastingError):
|
||||
out_i: Final[int]
|
||||
def __init__(
|
||||
self,
|
||||
/,
|
||||
ufunc: np.ufunc,
|
||||
casting: _CastingKind,
|
||||
from_: np.dtype[Any],
|
||||
to: np.dtype[Any],
|
||||
i: int,
|
||||
) -> None: ...
|
||||
|
||||
class _ArrayMemoryError(MemoryError):
|
||||
shape: tuple[int, ...]
|
||||
dtype: np.dtype[Any]
|
||||
def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype[Any]) -> None: ...
|
||||
@property
|
||||
def _total_size(self) -> int: ...
|
||||
@staticmethod
|
||||
def _size_to_string(num_bytes: int) -> str: ...
|
||||
|
||||
@overload
|
||||
def _unpack_tuple(tup: tuple[_T]) -> _T: ...
|
||||
@overload
|
||||
def _unpack_tuple(tup: _TupleT) -> _TupleT: ...
|
||||
def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ...
|
||||
256
env/Lib/site-packages/numpy/_core/_methods.py
vendored
Normal file
256
env/Lib/site-packages/numpy/_core/_methods.py
vendored
Normal file
|
|
@ -0,0 +1,256 @@
|
|||
"""
|
||||
Array methods which are called by both the C-code for the method
|
||||
and the Python code for the NumPy-namespace function
|
||||
|
||||
"""
|
||||
import os
|
||||
import pickle
|
||||
import warnings
|
||||
from contextlib import nullcontext
|
||||
|
||||
import numpy as np
|
||||
from numpy._core import multiarray as mu
|
||||
from numpy._core import umath as um
|
||||
from numpy._core.multiarray import asanyarray
|
||||
from numpy._core import numerictypes as nt
|
||||
from numpy._core import _exceptions
|
||||
from numpy._globals import _NoValue
|
||||
|
||||
# save those O(100) nanoseconds!
|
||||
bool_dt = mu.dtype("bool")
|
||||
umr_maximum = um.maximum.reduce
|
||||
umr_minimum = um.minimum.reduce
|
||||
umr_sum = um.add.reduce
|
||||
umr_prod = um.multiply.reduce
|
||||
umr_bitwise_count = um.bitwise_count
|
||||
umr_any = um.logical_or.reduce
|
||||
umr_all = um.logical_and.reduce
|
||||
|
||||
# Complex types to -> (2,)float view for fast-path computation in _var()
|
||||
_complex_to_float = {
|
||||
nt.dtype(nt.csingle) : nt.dtype(nt.single),
|
||||
nt.dtype(nt.cdouble) : nt.dtype(nt.double),
|
||||
}
|
||||
# Special case for windows: ensure double takes precedence
|
||||
if nt.dtype(nt.longdouble) != nt.dtype(nt.double):
|
||||
_complex_to_float.update({
|
||||
nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble),
|
||||
})
|
||||
|
||||
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
|
||||
# small reductions
|
||||
def _amax(a, axis=None, out=None, keepdims=False,
|
||||
initial=_NoValue, where=True):
|
||||
return umr_maximum(a, axis, None, out, keepdims, initial, where)
|
||||
|
||||
def _amin(a, axis=None, out=None, keepdims=False,
|
||||
initial=_NoValue, where=True):
|
||||
return umr_minimum(a, axis, None, out, keepdims, initial, where)
|
||||
|
||||
def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
|
||||
initial=_NoValue, where=True):
|
||||
return umr_sum(a, axis, dtype, out, keepdims, initial, where)
|
||||
|
||||
def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
|
||||
initial=_NoValue, where=True):
|
||||
return umr_prod(a, axis, dtype, out, keepdims, initial, where)
|
||||
|
||||
def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
|
||||
# By default, return a boolean for any and all
|
||||
if dtype is None:
|
||||
dtype = bool_dt
|
||||
# Parsing keyword arguments is currently fairly slow, so avoid it for now
|
||||
if where is True:
|
||||
return umr_any(a, axis, dtype, out, keepdims)
|
||||
return umr_any(a, axis, dtype, out, keepdims, where=where)
|
||||
|
||||
def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
|
||||
# By default, return a boolean for any and all
|
||||
if dtype is None:
|
||||
dtype = bool_dt
|
||||
# Parsing keyword arguments is currently fairly slow, so avoid it for now
|
||||
if where is True:
|
||||
return umr_all(a, axis, dtype, out, keepdims)
|
||||
return umr_all(a, axis, dtype, out, keepdims, where=where)
|
||||
|
||||
def _count_reduce_items(arr, axis, keepdims=False, where=True):
|
||||
# fast-path for the default case
|
||||
if where is True:
|
||||
# no boolean mask given, calculate items according to axis
|
||||
if axis is None:
|
||||
axis = tuple(range(arr.ndim))
|
||||
elif not isinstance(axis, tuple):
|
||||
axis = (axis,)
|
||||
items = 1
|
||||
for ax in axis:
|
||||
items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
|
||||
items = nt.intp(items)
|
||||
else:
|
||||
# TODO: Optimize case when `where` is broadcast along a non-reduction
|
||||
# axis and full sum is more excessive than needed.
|
||||
|
||||
# guarded to protect circular imports
|
||||
from numpy.lib._stride_tricks_impl import broadcast_to
|
||||
# count True values in (potentially broadcasted) boolean mask
|
||||
items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None,
|
||||
keepdims)
|
||||
return items
|
||||
|
||||
def _clip(a, min=None, max=None, out=None, **kwargs):
|
||||
if a.dtype.kind in "iu":
|
||||
# If min/max is a Python integer, deal with out-of-bound values here.
|
||||
# (This enforces NEP 50 rules as no value based promotion is done.)
|
||||
if type(min) is int and min <= np.iinfo(a.dtype).min:
|
||||
min = None
|
||||
if type(max) is int and max >= np.iinfo(a.dtype).max:
|
||||
max = None
|
||||
|
||||
if min is None and max is None:
|
||||
# return identity
|
||||
return um.positive(a, out=out, **kwargs)
|
||||
elif min is None:
|
||||
return um.minimum(a, max, out=out, **kwargs)
|
||||
elif max is None:
|
||||
return um.maximum(a, min, out=out, **kwargs)
|
||||
else:
|
||||
return um.clip(a, min, max, out=out, **kwargs)
|
||||
|
||||
def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
|
||||
arr = asanyarray(a)
|
||||
|
||||
is_float16_result = False
|
||||
|
||||
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
|
||||
if rcount == 0 if where is True else umr_any(rcount == 0, axis=None):
|
||||
warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
|
||||
|
||||
# Cast bool, unsigned int, and int to float64 by default
|
||||
if dtype is None:
|
||||
if issubclass(arr.dtype.type, (nt.integer, nt.bool)):
|
||||
dtype = mu.dtype('f8')
|
||||
elif issubclass(arr.dtype.type, nt.float16):
|
||||
dtype = mu.dtype('f4')
|
||||
is_float16_result = True
|
||||
|
||||
ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
|
||||
if isinstance(ret, mu.ndarray):
|
||||
ret = um.true_divide(
|
||||
ret, rcount, out=ret, casting='unsafe', subok=False)
|
||||
if is_float16_result and out is None:
|
||||
ret = arr.dtype.type(ret)
|
||||
elif hasattr(ret, 'dtype'):
|
||||
if is_float16_result:
|
||||
ret = arr.dtype.type(ret / rcount)
|
||||
else:
|
||||
ret = ret.dtype.type(ret / rcount)
|
||||
else:
|
||||
ret = ret / rcount
|
||||
|
||||
return ret
|
||||
|
||||
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
|
||||
where=True, mean=None):
|
||||
arr = asanyarray(a)
|
||||
|
||||
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
|
||||
# Make this warning show up on top.
|
||||
if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None):
|
||||
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
|
||||
stacklevel=2)
|
||||
|
||||
# Cast bool, unsigned int, and int to float64 by default
|
||||
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool)):
|
||||
dtype = mu.dtype('f8')
|
||||
|
||||
if mean is not None:
|
||||
arrmean = mean
|
||||
else:
|
||||
# Compute the mean.
|
||||
# Note that if dtype is not of inexact type then arraymean will
|
||||
# not be either.
|
||||
arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where)
|
||||
# The shape of rcount has to match arrmean to not change the shape of
|
||||
# out in broadcasting. Otherwise, it cannot be stored back to arrmean.
|
||||
if rcount.ndim == 0:
|
||||
# fast-path for default case when where is True
|
||||
div = rcount
|
||||
else:
|
||||
# matching rcount to arrmean when where is specified as array
|
||||
div = rcount.reshape(arrmean.shape)
|
||||
if isinstance(arrmean, mu.ndarray):
|
||||
arrmean = um.true_divide(arrmean, div, out=arrmean,
|
||||
casting='unsafe', subok=False)
|
||||
elif hasattr(arrmean, "dtype"):
|
||||
arrmean = arrmean.dtype.type(arrmean / rcount)
|
||||
else:
|
||||
arrmean = arrmean / rcount
|
||||
|
||||
# Compute sum of squared deviations from mean
|
||||
# Note that x may not be inexact and that we need it to be an array,
|
||||
# not a scalar.
|
||||
x = asanyarray(arr - arrmean)
|
||||
|
||||
if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
|
||||
x = um.multiply(x, x, out=x)
|
||||
# Fast-paths for built-in complex types
|
||||
elif x.dtype in _complex_to_float:
|
||||
xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
|
||||
um.multiply(xv, xv, out=xv)
|
||||
x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
|
||||
# Most general case; includes handling object arrays containing imaginary
|
||||
# numbers and complex types with non-native byteorder
|
||||
else:
|
||||
x = um.multiply(x, um.conjugate(x), out=x).real
|
||||
|
||||
ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where)
|
||||
|
||||
# Compute degrees of freedom and make sure it is not negative.
|
||||
rcount = um.maximum(rcount - ddof, 0)
|
||||
|
||||
# divide by degrees of freedom
|
||||
if isinstance(ret, mu.ndarray):
|
||||
ret = um.true_divide(
|
||||
ret, rcount, out=ret, casting='unsafe', subok=False)
|
||||
elif hasattr(ret, 'dtype'):
|
||||
ret = ret.dtype.type(ret / rcount)
|
||||
else:
|
||||
ret = ret / rcount
|
||||
|
||||
return ret
|
||||
|
||||
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
|
||||
where=True, mean=None):
|
||||
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
|
||||
keepdims=keepdims, where=where, mean=mean)
|
||||
|
||||
if isinstance(ret, mu.ndarray):
|
||||
ret = um.sqrt(ret, out=ret)
|
||||
elif hasattr(ret, 'dtype'):
|
||||
ret = ret.dtype.type(um.sqrt(ret))
|
||||
else:
|
||||
ret = um.sqrt(ret)
|
||||
|
||||
return ret
|
||||
|
||||
def _ptp(a, axis=None, out=None, keepdims=False):
|
||||
return um.subtract(
|
||||
umr_maximum(a, axis, None, out, keepdims),
|
||||
umr_minimum(a, axis, None, None, keepdims),
|
||||
out
|
||||
)
|
||||
|
||||
def _dump(self, file, protocol=2):
|
||||
if hasattr(file, 'write'):
|
||||
ctx = nullcontext(file)
|
||||
else:
|
||||
ctx = open(os.fspath(file), "wb")
|
||||
with ctx as f:
|
||||
pickle.dump(self, f, protocol=protocol)
|
||||
|
||||
def _dumps(self, protocol=2):
|
||||
return pickle.dumps(self, protocol=protocol)
|
||||
|
||||
def _bitwise_count(a, out=None, *, where=True, casting='same_kind',
|
||||
order='K', dtype=None, subok=True):
|
||||
return umr_bitwise_count(a, out, where=where, casting=casting,
|
||||
order=order, dtype=dtype, subok=subok)
|
||||
24
env/Lib/site-packages/numpy/_core/_methods.pyi
vendored
Normal file
24
env/Lib/site-packages/numpy/_core/_methods.pyi
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
from collections.abc import Callable
|
||||
from typing import Any, TypeAlias
|
||||
|
||||
from typing_extensions import Concatenate
|
||||
|
||||
import numpy as np
|
||||
|
||||
from . import _exceptions as _exceptions
|
||||
|
||||
###
|
||||
|
||||
_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any]
|
||||
|
||||
###
|
||||
|
||||
bool_dt: np.dtype[np.bool] = ...
|
||||
umr_maximum: _Reduce2 = ...
|
||||
umr_minimum: _Reduce2 = ...
|
||||
umr_sum: _Reduce2 = ...
|
||||
umr_prod: _Reduce2 = ...
|
||||
umr_bitwise_count = np.bitwise_count
|
||||
umr_any: _Reduce2 = ...
|
||||
umr_all: _Reduce2 = ...
|
||||
_complex_to_float: dict[np.dtype[np.complexfloating], np.dtype[np.floating]] = ...
|
||||
BIN
env/Lib/site-packages/numpy/_core/_multiarray_tests.cp310-win_amd64.lib
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/_multiarray_tests.cp310-win_amd64.lib
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/_multiarray_tests.cp310-win_amd64.pyd
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/_multiarray_tests.cp310-win_amd64.pyd
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/_multiarray_umath.cp310-win_amd64.lib
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/_multiarray_umath.cp310-win_amd64.lib
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/_multiarray_umath.cp310-win_amd64.pyd
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/_multiarray_umath.cp310-win_amd64.pyd
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/_operand_flag_tests.cp310-win_amd64.lib
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/_operand_flag_tests.cp310-win_amd64.lib
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/_operand_flag_tests.cp310-win_amd64.pyd
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/_operand_flag_tests.cp310-win_amd64.pyd
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/_rational_tests.cp310-win_amd64.lib
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/_rational_tests.cp310-win_amd64.lib
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/_rational_tests.cp310-win_amd64.pyd
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/_rational_tests.cp310-win_amd64.pyd
vendored
Normal file
Binary file not shown.
BIN
env/Lib/site-packages/numpy/_core/_simd.cp310-win_amd64.lib
vendored
Normal file
BIN
env/Lib/site-packages/numpy/_core/_simd.cp310-win_amd64.lib
vendored
Normal file
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue