From 46acdc4c4f0f85c68a506b06442dee4e0dd2c1aa Mon Sep 17 00:00:00 2001
From: DongHun Kwak
Date: Thu, 31 Dec 2020 12:08:31 +0900
Subject: [PATCH] Imported Upstream version 0.28.0
---
.travis.yml | 24 +-
CHANGES.rst | 158 ++++-
Cython/Build/Dependencies.py | 115 ++--
Cython/CodeWriter.py | 2 +-
Cython/Compiler/Annotate.py | 23 +-
Cython/Compiler/Buffer.py | 8 +-
Cython/Compiler/Builtin.py | 7 +-
Cython/Compiler/CmdLine.py | 2 +
Cython/Compiler/Code.py | 184 +++++-
Cython/Compiler/CodeGeneration.py | 2 +-
Cython/Compiler/CythonScope.py | 4 +
Cython/Compiler/ExprNodes.py | 554 +++++++++-------
Cython/Compiler/FusedNode.py | 49 +-
Cython/Compiler/Main.py | 35 +-
Cython/Compiler/MemoryView.py | 12 +-
Cython/Compiler/ModuleNode.py | 321 +++++-----
Cython/Compiler/Nodes.py | 693 ++++++++++++++++-----
Cython/Compiler/Optimize.py | 496 +++++++++++++--
Cython/Compiler/Options.py | 4 +
Cython/Compiler/ParseTreeTransforms.py | 39 +-
Cython/Compiler/Parsing.py | 39 +-
Cython/Compiler/PyrexTypes.py | 234 ++++---
Cython/Compiler/Pythran.py | 13 +-
Cython/Compiler/Scanning.py | 2 +-
Cython/Compiler/StringEncoding.py | 8 +
Cython/Compiler/Symtab.py | 128 +++-
Cython/Compiler/Tests/TestTreeFragment.py | 2 +-
Cython/Compiler/Tests/TestTreePath.py | 72 +--
Cython/Compiler/Tests/TestUtilityLoad.py | 20 +-
Cython/Compiler/TreeFragment.py | 4 +-
Cython/Compiler/TypeInference.py | 29 +-
Cython/Compiler/TypeSlots.py | 17 +-
Cython/Compiler/UtilNodes.py | 5 +-
Cython/Compiler/UtilityCode.py | 10 +-
Cython/Compiler/Visitor.py | 15 +-
Cython/Coverage.py | 2 +
Cython/Debugger/libcython.py | 2 +-
Cython/Debugger/libpython.py | 12 +-
Cython/Distutils/build_ext.py | 4 +-
Cython/Includes/Deprecated/python2.5.pxd | 2 +-
Cython/Includes/cpython/__init__.pxd | 4 +-
Cython/Includes/cpython/array.pxd | 4 +-
Cython/Includes/cpython/object.pxd | 22 +-
Cython/Includes/cpython/pythread.pxd | 19 +-
Cython/Includes/libc/limits.pxd | 39 +-
Cython/Includes/libc/signal.pxd | 102 ++-
Cython/Includes/libcpp/deque.pxd | 31 +-
Cython/Includes/libcpp/string.pxd | 102 +--
Cython/Includes/libcpp/vector.pxd | 9 +-
Cython/Includes/numpy/__init__.pxd | 25 +-
Cython/Includes/posix/signal.pxd | 7 +
Cython/Includes/posix/time.pxd | 3 -
Cython/Parser/Grammar | 2 +-
Cython/Shadow.py | 10 +-
Cython/StringIOTree.pxd | 17 +
Cython/StringIOTree.py | 69 +-
Cython/Tests/TestCodeWriter.py | 2 +-
Cython/Utility/AsyncGen.c | 37 +-
Cython/Utility/Buffer.c | 4 +-
Cython/Utility/Builtins.c | 2 +-
Cython/Utility/Coroutine.c | 184 +++++-
Cython/Utility/CythonFunction.c | 3 +-
Cython/Utility/Exceptions.c | 2 +-
Cython/Utility/ExtensionTypes.c | 75 +++
Cython/Utility/ImportExport.c | 61 ++
Cython/Utility/MemoryView.pyx | 16 +-
Cython/Utility/MemoryView_C.c | 6 +-
Cython/Utility/ModuleSetupCode.c | 421 ++++++++-----
Cython/Utility/ObjectHandling.c | 389 ++++++++++--
Cython/Utility/Optimize.c | 176 +++++-
Cython/Utility/Overflow.c | 2 +-
Cython/Utility/Profile.c | 13 +-
Cython/Utility/StringTools.c | 26 +-
Cython/Utility/TypeConversion.c | 18 +-
Demos/freeze/README.txt | 2 +-
Tools/cython-mode.el | 2 +
Tools/rules.bzl | 2 +-
Tools/site_scons/site_tools/pyext.py | 2 +-
appveyor.yml | 3 +-
docs/src/quickstart/htmlreport.png | Bin 36200 -> 22739 bytes
docs/src/quickstart/install.rst | 3 +-
docs/src/reference/compilation.rst | 158 ++++-
docs/src/reference/extension_types.rst | 26 +-
docs/src/reference/language_basics.rst | 205 +++---
docs/src/tutorial/cdef_classes.rst | 2 +-
docs/src/tutorial/numpy.rst | 6 +
docs/src/tutorial/strings.rst | 18 +-
docs/src/userguide/debugging.rst | 6 +-
docs/src/userguide/extension_types.rst | 6 +-
docs/src/userguide/external_C_code.rst | 42 ++
docs/src/userguide/language_basics.rst | 8 +-
docs/src/userguide/memoryviews.rst | 29 +-
docs/src/userguide/numpy_pythran.rst | 4 +-
docs/src/userguide/numpy_tutorial.rst | 6 -
docs/src/userguide/special_methods.rst | 59 +-
docs/src/userguide/wrapping_CPlusPlus.rst | 4 +-
pyximport/pyximport.py | 6 +-
runtests.py | 12 +-
setup.py | 25 +-
tests/buffers/bufaccess.pyx | 18 +-
tests/buffers/mockbuffers.pxi | 17 +-
tests/buffers/userbuffer.pyx | 85 +++
tests/bugs.txt | 1 +
tests/build/cythonize_rename_ext.srctree | 38 ++
tests/build/setuptools_reimport.srctree | 23 +
tests/compile/cnamespec.h | 1 -
tests/compile/cnamespec.pyx | 5 +-
tests/compile/cpp_class_redefinition.pyx | 2 +-
tests/compile/min_async.pyx | 12 +
tests/compile/verbatiminclude_cimport.srctree | 36 ++
tests/errors/builtin_type_inheritance.pyx | 6 +-
tests/errors/cpp_class_gil_GH1986.pyx | 20 +
tests/errors/cpp_no_auto_conversion.pyx | 2 +-
tests/errors/e_arrayassign.pyx | 5 +-
tests/errors/e_cython_parallel.pyx | 1 +
tests/errors/e_directives.pyx | 2 +-
tests/errors/e_invalid_num_threads.pyx | 14 +-
tests/errors/e_nonlocal_T490.pyx | 2 +
tests/errors/subtyping_final_class.pyx | 2 +-
tests/memoryview/memoryview.pyx | 48 +-
tests/memoryview/memslice.pyx | 195 +++---
tests/memoryview/numpy_memoryview.pyx | 24 +-
tests/memoryview/numpy_memoryview_readonly.pyx | 112 ++++
tests/run/annotation_typing.pyx | 27 +-
tests/run/arithmetic_analyse_types_helper.h | 2 +-
tests/run/asyncio_generators.srctree | 32 +-
tests/run/bytearray_coercion.pyx | 25 +-
tests/run/bytearraymethods.pyx | 12 +
tests/run/cdef_multiple_inheritance.pyx | 45 ++
tests/run/cdef_multiple_inheritance_errors.srctree | 94 +++
tests/run/cdef_multiple_inheritance_nodict.pyx | 48 ++
tests/run/cpdef_pickle.srctree | 69 ++
tests/run/cpp_class_redef.pyx | 10 +-
tests/run/cpp_iterators.pyx | 51 +-
tests/run/cpp_operators.pyx | 2 +-
tests/run/cpp_stl_conversion.pyx | 10 +
tests/run/cpp_stl_string.pyx | 41 +-
tests/run/cpp_template_functions.pyx | 3 +-
tests/run/cpp_templates.pyx | 27 +
tests/run/cstringmul.pyx | 36 +-
tests/run/cyfunction.pyx | 19 +
tests/run/cyfunction_defaults.pyx | 25 +
tests/run/dict_getitem.pyx | 68 ++
tests/run/dict_pop.pyx | 34 +
tests/run/ext_auto_richcmp.py | 18 +
tests/run/extern_include_order.srctree | 56 ++
tests/run/fastcall.pyx | 13 +
tests/run/for_from_pyvar_loop_T601.pyx | 3 +-
tests/run/for_from_pyvar_loop_T601_extern_def.h | 2 -
tests/run/for_in_range_T372.pyx | 7 +
tests/run/fstring.pyx | 140 ++++-
tests/run/line_profile_test.srctree | 44 ++
tests/run/list.pyx | 149 ++++-
tests/run/numpy_subarray.pyx | 10 +-
tests/run/numpy_test.pyx | 23 +-
tests/run/or.pyx | 15 +
tests/run/overflow_check.pxi | 4 +-
tests/run/pep526_variable_annotations.py | 59 +-
tests/run/pure_py.py | 31 +
tests/run/py35_asyncio_async_def.srctree | 58 ++
tests/run/py35_pep492_interop.pyx | 73 ++-
tests/run/reduce_pickle.pyx | 3 +
tests/run/set.pyx | 26 +
tests/run/set_iter.pyx | 99 +++
tests/run/staticmethod.pyx | 8 +
tests/run/strmethods.pyx | 20 +
tests/run/test_coroutines_pep492.pyx | 25 +
tests/run/tss.pyx | 75 +++
tests/run/tuple.pyx | 26 +
tests/run/type_inference.pyx | 12 +-
tests/run/unicodefunction.pyx | 43 ++
tests/run/verbatiminclude.h | 6 +
tests/run/verbatiminclude.pyx | 62 ++
173 files changed, 6454 insertions(+), 1897 deletions(-)
create mode 100644 Cython/StringIOTree.pxd
create mode 100644 tests/buffers/userbuffer.pyx
create mode 100644 tests/build/cythonize_rename_ext.srctree
create mode 100644 tests/build/setuptools_reimport.srctree
delete mode 100644 tests/compile/cnamespec.h
create mode 100644 tests/compile/min_async.pyx
create mode 100644 tests/compile/verbatiminclude_cimport.srctree
create mode 100644 tests/errors/cpp_class_gil_GH1986.pyx
create mode 100644 tests/memoryview/numpy_memoryview_readonly.pyx
create mode 100644 tests/run/cdef_multiple_inheritance.pyx
create mode 100644 tests/run/cdef_multiple_inheritance_errors.srctree
create mode 100644 tests/run/cdef_multiple_inheritance_nodict.pyx
create mode 100644 tests/run/cpdef_pickle.srctree
create mode 100644 tests/run/dict_pop.pyx
create mode 100644 tests/run/extern_include_order.srctree
delete mode 100644 tests/run/for_from_pyvar_loop_T601_extern_def.h
create mode 100644 tests/run/py35_asyncio_async_def.srctree
create mode 100644 tests/run/set_iter.pyx
create mode 100644 tests/run/tss.pyx
create mode 100644 tests/run/verbatiminclude.h
create mode 100644 tests/run/verbatiminclude.pyx
diff --git a/.travis.yml b/.travis.yml
index 19a5c9f..13cb868 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,10 +4,17 @@ sudo: false
addons:
apt:
+ sources:
+ - ubuntu-toolchain-r-test
packages:
- gdb
- python-dbg
- python3-dbg
+ - gcc-6
+ - g++-6
+ # GCC-7 currently takes 5-7 *minutes* to download on travis
+ #- gcc-7
+ #- g++-7
cache:
pip: true
@@ -32,14 +39,16 @@ env:
- USE_CCACHE=1
- CCACHE_SLOPPINESS=pch_defines,time_macros
- CCACHE_COMPRESS=1
- - CCACHE_MAXSIZE=100M
- - PATH="/usr/lib/ccache:$PATH"
+ - CCACHE_MAXSIZE=150M
+ - PATH="/usr/lib/ccache:$HOME/gcc-symlinks:$PATH"
matrix:
- BACKEND=c
- BACKEND=cpp
matrix:
include:
+ #- python: 3.7-dev
+ # env: BACKEND=c PY=3 CC=gcc-7
- os: osx
osx_image: xcode6.4
env: BACKEND=c PY=2
@@ -85,6 +94,15 @@ branches:
before_install:
- |
+ if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
+ mkdir "$HOME/gcc-symlinks"
+ ln -s /usr/bin/gcc-6 $HOME/gcc-symlinks/gcc
+ ln -s /usr/bin/g++-6 $HOME/gcc-symlinks/g++
+
+ if [ -n "$CC" ]; then "$CC" --version; else gcc --version; fi
+ fi
+
+ - |
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then # Install Miniconda
curl -s -o miniconda.sh https://repo.continuum.io/miniconda/Miniconda$PY-latest-MacOSX-x86_64.sh;
bash miniconda.sh -b -p $HOME/miniconda && rm miniconda.sh;
@@ -94,7 +112,7 @@ before_install:
install:
- python -c 'import sys; print("Python %s" % (sys.version,))'
- - if [ -n "${TRAVIS_PYTHON_VERSION##*-dev}" -a -n "${TRAVIS_PYTHON_VERSION##2.6*}" ]; then pip install -r test-requirements.txt $( [ -z "${TRAVIS_PYTHON_VERSION##pypy*}" ] || echo " -r test-requirements-cpython.txt" ) ; fi
+ - if [ -n "${TRAVIS_PYTHON_VERSION##*-dev}" -a -n "${TRAVIS_PYTHON_VERSION##2.6*}" ]; then pip install -r test-requirements.txt $( [ -z "${TRAVIS_PYTHON_VERSION##pypy*}" ] || echo " -r test-requirements-cpython.txt" ) $( [ -n "${TRAVIS_PYTHON_VERSION##3.3*}" ] || echo " tornado<5.0" ) ; fi
- CFLAGS="-O2 -ggdb -Wall -Wextra $(python -c 'import sys; print("-fno-strict-aliasing" if sys.version_info[0] == 2 else "")')" python setup.py build
before_script: ccache -s || true
diff --git a/CHANGES.rst b/CHANGES.rst
index e4ac35e..e0d275d 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -2,6 +2,161 @@
Cython Changelog
================
+0.28 (2018-03-13)
+=================
+
+Features added
+--------------
+
+* Cdef classes can now multiply inherit from ordinary Python classes.
+ (The primary base must still be a c class, possibly ``object``, and
+ the other bases must *not* be cdef classes.)
+
+* Type inference is now supported for Pythran compiled NumPy expressions.
+ Patch by Nils Braun. (Github issue #1954)
+
+* The ``const`` modifier can be applied to memoryview declarations to allow
+ read-only buffers as input. (Github issues #1605, #1869)
+
+* C code in the docstring of a ``cdef extern`` block is copied verbatimly
+ into the generated file.
+ Patch by Jeroen Demeyer. (Github issue #1915)
+
+* When compiling with gcc, the module init function is now tuned for small
+ code size instead of whatever compile flags were provided externally.
+ Cython now also disables some code intensive optimisations in that function
+ to further reduce the code size. (Github issue #2102)
+
+* Decorating an async coroutine with ``@cython.iterable_coroutine`` changes its
+ type at compile time to make it iterable. While this is not strictly in line
+ with PEP-492, it improves the interoperability with old-style coroutines that
+ use ``yield from`` instead of ``await``.
+
+* The IPython magic has preliminary support for JupyterLab.
+ (Github issue #1775)
+
+* The new TSS C-API in CPython 3.7 is supported and has been backported.
+ Patch by Naotoshi Seo. (Github issue #1932)
+
+* Cython knows the new ``Py_tss_t`` type defined in PEP-539 and automatically
+ initialises variables declared with that type to ``Py_tss_NEEDS_INIT``,
+ a value which cannot be used outside of static assignments.
+
+* The set methods ``.remove()`` and ``.discard()`` are optimised.
+ Patch by Antoine Pitrou. (Github issue #2042)
+
+* ``dict.pop()`` is optimised.
+ Original patch by Antoine Pitrou. (Github issue #2047)
+
+* Iteration over sets and frozensets is optimised.
+ (Github issue #2048)
+
+* Safe integer loops (< range(2^30)) are automatically optimised into C loops.
+
+* ``alist.extend([a,b,c])`` is optimised into sequential ``list.append()`` calls
+ for short literal sequences.
+
+* Calls to builtin methods that are not specifically optimised into C-API calls
+ now use a cache that avoids repeated lookups of the underlying C function.
+ (Github issue #2054)
+
+* Single argument function calls can avoid the argument tuple creation in some cases.
+
+* Some redundant extension type checks are avoided.
+
+* Formatting C enum values in f-strings is faster, as well as some other special cases.
+
+* String formatting with the '%' operator is optimised into f-strings in simple cases.
+
+* Subscripting (item access) is faster in some cases.
+
+* Some ``bytearray`` operations have been optimised similar to ``bytes``.
+
+* Some PEP-484/526 container type declarations are now considered for
+ loop optimisations.
+
+* Indexing into memoryview slices with ``view[i][j]`` is now optimised into
+ ``view[i, j]``.
+
+* Python compatible ``cython.*`` types can now be mixed with type declarations
+ in Cython syntax.
+
+* Name lookups in the module and in classes are faster.
+
+* Python attribute lookups on extension types without instance dict are faster.
+
+* Some missing signals were added to ``libc/signal.pxd``.
+ Patch by Jeroen Demeyer. (Github issue #1914)
+
+* The warning about repeated extern declarations is now visible by default.
+ (Github issue #1874)
+
+* The exception handling of the function types used by CPython's type slot
+ functions was corrected to match the de-facto standard behaviour, so that
+ code that uses them directly benefits from automatic and correct exception
+ propagation. Patch by Jeroen Demeyer. (Github issue #1980)
+
+* Defining the macro ``CYTHON_NO_PYINIT_EXPORT`` will prevent the module init
+ function from being exported as symbol, e.g. when linking modules statically
+ in an embedding setup. Patch by AraHaan. (Github issue #1944)
+
+Bugs fixed
+----------
+
+* If a module name is explicitly provided for an ``Extension()`` that is compiled
+ via ``cythonize()``, it was previously ignored and replaced by the source file
+ name. It can now be used to override the target module name, e.g. for compiling
+ prefixed accelerator modules from Python files. (Github issue #2038)
+
+* The arguments of the ``num_threads`` parameter of parallel sections
+ were not sufficiently validated and could lead to invalid C code.
+ (Github issue #1957)
+
+* Catching exceptions with a non-trivial exception pattern could call into
+ CPython with a live exception set. This triggered incorrect behaviour
+ and crashes, especially in CPython 3.7.
+
+* The signature of the special ``__richcmp__()`` method was corrected to recognise
+ the type of the first argument as ``self``. It was previously treated as plain
+ object, but CPython actually guarantees that it always has the correct type.
+ Note: this can change the semantics of user code that previously relied on
+ ``self`` being untyped.
+
+* Some Python 3 exceptions were not recognised as builtins when running Cython
+ under Python 2.
+
+* Some async helper functions were not defined in the generated C code when
+ compiling simple async code. (Github issue #2075)
+
+* Line tracing did not include generators and coroutines.
+ (Github issue #1949)
+
+* C++ declarations for ``unordered_map`` were corrected.
+ Patch by Michael Schatzow. (Github issue #1484)
+
+* Iterator declarations in C++ ``deque`` and ``vector`` were corrected.
+ Patch by Alex Huszagh. (Github issue #1870)
+
+* The const modifiers in the C++ ``string`` declarations were corrected, together
+ with the coercion behaviour of string literals into C++ strings.
+ (Github issue #2132)
+
+* Some declaration types in ``libc.limits`` were corrected.
+ Patch by Jeroen Demeyer. (Github issue #2016)
+
+* ``@cython.final`` was not accepted on Python classes with an ``@cython.cclass``
+ decorator. (Github issue #2040)
+
+* Cython no longer creates useless and incorrect ``PyInstanceMethod`` wrappers for
+ methods in Python 3. Patch by Jeroen Demeyer. (Github issue #2105)
+
+* The builtin ``bytearray`` type could not be used as base type of cdef classes.
+ (Github issue #2106)
+
+Other changes
+-------------
+
+
0.27.3 (2017-11-03)
===================
@@ -419,7 +574,8 @@ Features added
* The new METH_FASTCALL calling convention for PyCFunctions is supported
in CPython 3.6. See https://bugs.python.org/issue27810
-* Initial support for using Cython modules in Pyston. Patch by Daetalus.
+* Initial support for using Cython modules in Pyston.
+ Patch by Boxiang Sun.
* Dynamic Python attributes are allowed on cdef classes if an attribute
``cdef dict __dict__`` is declared in the class. Patch by empyrical.
diff --git a/Cython/Build/Dependencies.py b/Cython/Build/Dependencies.py
index fa5d199..666307f 100644
--- a/Cython/Build/Dependencies.py
+++ b/Cython/Build/Dependencies.py
@@ -3,9 +3,17 @@ from __future__ import absolute_import, print_function
import cython
from .. import __version__
+import os
+import shutil
+import hashlib
+import subprocess
import collections
-import re, os, sys, time
+import re, sys, time
from glob import iglob
+from io import open as io_open
+from os.path import relpath as _relpath
+from distutils.extension import Extension
+from distutils.util import strtobool
try:
import gzip
@@ -14,34 +22,6 @@ try:
except ImportError:
gzip_open = open
gzip_ext = ''
-import shutil
-import subprocess
-import os
-
-try:
- import hashlib
-except ImportError:
- import md5 as hashlib
-
-try:
- from io import open as io_open
-except ImportError:
- from codecs import open as io_open
-
-try:
- from os.path import relpath as _relpath
-except ImportError:
- # Py<2.6
- def _relpath(path, start=os.path.curdir):
- if not path:
- raise ValueError("no path specified")
- start_list = os.path.abspath(start).split(os.path.sep)
- path_list = os.path.abspath(path).split(os.path.sep)
- i = len(os.path.commonprefix([start_list, path_list]))
- rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
- if not rel_list:
- return os.path.curdir
- return os.path.join(*rel_list)
try:
import pythran
@@ -50,9 +30,6 @@ try:
except:
PythranAvailable = False
-from distutils.extension import Extension
-from distutils.util import strtobool
-
from .. import Utils
from ..Utils import (cached_function, cached_method, path_exists,
safe_makedirs, copy_file_to_dir_if_newer, is_package_dir)
@@ -777,11 +754,11 @@ def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=
cython_sources = [s for s in pattern.sources
if os.path.splitext(s)[1] in ('.py', '.pyx')]
if cython_sources:
- filepattern = cython_sources[0]
- if len(cython_sources) > 1:
- print("Warning: Multiple cython sources found for extension '%s': %s\n"
- "See http://cython.readthedocs.io/en/latest/src/userguide/sharing_declarations.html "
- "for sharing declarations among Cython files." % (pattern.name, cython_sources))
+ filepattern = cython_sources[0]
+ if len(cython_sources) > 1:
+ print("Warning: Multiple cython sources found for extension '%s': %s\n"
+ "See http://cython.readthedocs.io/en/latest/src/userguide/sharing_declarations.html "
+ "for sharing declarations among Cython files." % (pattern.name, cython_sources))
else:
# ignore non-cython modules
module_list.append(pattern)
@@ -800,16 +777,16 @@ def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=
for file in nonempty(sorted(extended_iglob(filepattern)), "'%s' doesn't match any files" % filepattern):
if os.path.abspath(file) in to_exclude:
continue
- pkg = deps.package(file)
module_name = deps.fully_qualified_name(file)
if '*' in name:
if module_name in explicit_modules:
continue
- elif name != module_name:
- print("Warning: Extension name '%s' does not match fully qualified name '%s' of '%s'" % (
- name, module_name, file))
+ elif name:
module_name = name
+ if module_name == 'cython':
+ raise ValueError('cython is a special module, cannot be used as a module name')
+
if module_name not in seen:
try:
kwds = deps.distutils_info(file, aliases, base).values
@@ -921,22 +898,33 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False,
deps = create_dependency_tree(ctx, quiet=quiet)
build_dir = getattr(options, 'build_dir', None)
- modules_by_cfile = {}
+ def copy_to_build_dir(filepath, root=os.getcwd()):
+ filepath_abs = os.path.abspath(filepath)
+ if os.path.isabs(filepath):
+ filepath = filepath_abs
+ if filepath_abs.startswith(root):
+ # distutil extension depends are relative to cwd
+ mod_dir = join_path(build_dir,
+ os.path.dirname(_relpath(filepath, root)))
+ copy_once_if_newer(filepath_abs, mod_dir)
+
+ modules_by_cfile = collections.defaultdict(list)
to_compile = []
for m in module_list:
if build_dir:
- root = os.getcwd() # distutil extension depends are relative to cwd
- def copy_to_build_dir(filepath, root=root):
- filepath_abs = os.path.abspath(filepath)
- if os.path.isabs(filepath):
- filepath = filepath_abs
- if filepath_abs.startswith(root):
- mod_dir = join_path(build_dir,
- os.path.dirname(_relpath(filepath, root)))
- copy_once_if_newer(filepath_abs, mod_dir)
for dep in m.depends:
copy_to_build_dir(dep)
+ cy_sources = [
+ source for source in m.sources
+ if os.path.splitext(source)[1] in ('.pyx', '.py')]
+ if len(cy_sources) == 1:
+ # normal "special" case: believe the Extension module name to allow user overrides
+ full_module_name = m.name
+ else:
+ # infer FQMN from source files
+ full_module_name = None
+
new_sources = []
for source in m.sources:
base, ext = os.path.splitext(source)
@@ -981,13 +969,12 @@ def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False,
fingerprint = deps.transitive_fingerprint(source, extra)
else:
fingerprint = None
- to_compile.append((priority, source, c_file, fingerprint, quiet,
- options, not exclude_failures, module_metadata.get(m.name)))
+ to_compile.append((
+ priority, source, c_file, fingerprint, quiet,
+ options, not exclude_failures, module_metadata.get(m.name),
+ full_module_name))
new_sources.append(c_file)
- if c_file not in modules_by_cfile:
- modules_by_cfile[c_file] = [m]
- else:
- modules_by_cfile[c_file].append(m)
+ modules_by_cfile[c_file].append(m)
else:
new_sources.append(source)
if build_dir:
@@ -1103,17 +1090,15 @@ else:
# TODO: Share context? Issue: pyx processing leaks into pxd module
@record_results
-def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None, raise_on_failure=True, embedded_metadata=None, progress=""):
- from ..Compiler.Main import compile, default_options
+def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None,
+ raise_on_failure=True, embedded_metadata=None, full_module_name=None,
+ progress=""):
+ from ..Compiler.Main import compile_single, default_options
from ..Compiler.Errors import CompileError, PyrexError
if fingerprint:
if not os.path.exists(options.cache):
- try:
- os.mkdir(options.cache)
- except:
- if not os.path.exists(options.cache):
- raise
+ safe_makedirs(options.cache)
# Cython-generated c files are highly compressible.
# (E.g. a compression ratio of about 10 for Sage).
fingerprint_file = join_path(
@@ -1141,7 +1126,7 @@ def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None, raise_on_f
any_failures = 0
try:
- result = compile([pyx_file], options)
+ result = compile_single(pyx_file, options, full_module_name=full_module_name)
if result.num_errors > 0:
any_failures = 1
except (EnvironmentError, PyrexError) as e:
diff --git a/Cython/CodeWriter.py b/Cython/CodeWriter.py
index 5786495..2e4646a 100644
--- a/Cython/CodeWriter.py
+++ b/Cython/CodeWriter.py
@@ -363,7 +363,7 @@ class CodeWriter(DeclarationWriter):
self.dedent()
def visit_IfStatNode(self, node):
- # The IfClauseNode is handled directly without a seperate match
+ # The IfClauseNode is handled directly without a separate match
# for clariy.
self.startline(u"if ")
self.visit(node.if_clauses[0].condition)
diff --git a/Cython/Compiler/Annotate.py b/Cython/Compiler/Annotate.py
index 33f68cd..5feac02 100644
--- a/Cython/Compiler/Annotate.py
+++ b/Cython/Compiler/Annotate.py
@@ -79,14 +79,6 @@ class AnnotationCCodeWriter(CCodeWriter):
css.append(HtmlFormatter().get_style_defs('.cython'))
return '\n'.join(css)
- _js = """
- function toggleDiv(id) {
- theDiv = id.nextElementSibling
- if (theDiv.style.display != 'block') theDiv.style.display = 'block';
- else theDiv.style.display = 'none';
- }
- """.strip()
-
_css_template = textwrap.dedent("""
body.cython { font-family: courier; font-size: 12; }
@@ -114,6 +106,14 @@ class AnnotationCCodeWriter(CCodeWriter):
.cython.code .c_call { color: #0000FF; }
""")
+ # on-click toggle function to show/hide C source code
+ _onclick_attr = ' onclick="{0}"'.format((
+ "(function(s){"
+ " s.display = s.display === 'block' ? 'none' : 'block'"
+ "})(this.nextElementSibling.style)"
+ ).replace(' ', '') # poor dev's JS minification
+ )
+
def save_annotation(self, source_filename, target_filename, coverage_xml=None):
with Utils.open_source_file(source_filename) as f:
code = f.read()
@@ -141,9 +141,6 @@ class AnnotationCCodeWriter(CCodeWriter):
-
Generated by Cython {watermark} {more_info}
@@ -151,7 +148,7 @@ class AnnotationCCodeWriter(CCodeWriter):
Yellow lines hint at Python interaction.
Click on a line that starts with a "+
" to see the C code that Cython generated for it.
- ''').format(css=self._css(), js=self._js, watermark=Version.watermark,
+ ''').format(css=self._css(), watermark=Version.watermark,
filename=os.path.basename(source_filename) if source_filename else '',
more_info=coverage_info)
]
@@ -253,7 +250,7 @@ class AnnotationCCodeWriter(CCodeWriter):
calls['py_macro_api'] + calls['pyx_macro_api'])
if c_code:
- onclick = " onclick='toggleDiv(this)'"
+ onclick = self._onclick_attr
expandsymbol = '+'
else:
onclick = ''
diff --git a/Cython/Compiler/Buffer.py b/Cython/Compiler/Buffer.py
index 3c7c9bb..04385b4 100644
--- a/Cython/Compiler/Buffer.py
+++ b/Cython/Compiler/Buffer.py
@@ -326,7 +326,7 @@ def put_acquire_arg_buffer(entry, code, pos):
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % entry.type.dtype.struct_nesting_depth())
code.putln(code.error_goto_if("%s == -1" % getbuffer, pos))
code.putln("}")
- # An exception raised in arg parsing cannot be catched, so no
+ # An exception raised in arg parsing cannot be caught, so no
# need to care about the buffer then.
put_unpack_buffer_aux_into_scope(entry, code)
@@ -370,7 +370,7 @@ def put_assign_to_buffer(lhs_cname, rhs_cname, buf_entry,
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
flags = get_flags(buffer_aux, buffer_type)
- code.putln("{") # Set up necesarry stack for getbuffer
+ code.putln("{") # Set up necessary stack for getbuffer
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth())
getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below
@@ -617,7 +617,7 @@ class GetAndReleaseBufferUtilityCode(object):
def mangle_dtype_name(dtype):
- # Use prefixes to seperate user defined types from builtins
+ # Use prefixes to separate user defined types from builtins
# (consider "typedef float unsigned_int")
if dtype.is_pyobject:
return "object"
@@ -636,7 +636,7 @@ def get_type_information_cname(code, dtype, maxdepth=None):
and return the name of the type info struct.
Structs with two floats of the same size are encoded as complex numbers.
- One can seperate between complex numbers declared as struct or with native
+ One can separate between complex numbers declared as struct or with native
encoding by inspecting to see if the fields field of the type is
filled in.
"""
diff --git a/Cython/Compiler/Builtin.py b/Cython/Compiler/Builtin.py
index c9ed656..f8ee614 100644
--- a/Cython/Compiler/Builtin.py
+++ b/Cython/Compiler/Builtin.py
@@ -328,7 +328,10 @@ builtin_types_table = [
("set", "PySet_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("clear", "T", "r", "PySet_Clear"),
# discard() and remove() have a special treatment for unhashable values
-# BuiltinMethod("discard", "TO", "r", "PySet_Discard"),
+ BuiltinMethod("discard", "TO", "r", "__Pyx_PySet_Discard",
+ utility_code=UtilityCode.load("py_set_discard", "Optimize.c")),
+ BuiltinMethod("remove", "TO", "r", "__Pyx_PySet_Remove",
+ utility_code=UtilityCode.load("py_set_remove", "Optimize.c")),
# update is actually variadic (see Github issue #1645)
# BuiltinMethod("update", "TO", "r", "__Pyx_PySet_Update",
# utility_code=UtilityCode.load_cached("PySet_Update", "Builtins.c")),
@@ -388,6 +391,8 @@ def init_builtin_types():
utility = builtin_utility_code.get(name)
if name == 'frozenset':
objstruct_cname = 'PySetObject'
+ elif name == 'bytearray':
+ objstruct_cname = 'PyByteArrayObject'
elif name == 'bool':
objstruct_cname = None
elif name == 'Exception':
diff --git a/Cython/Compiler/CmdLine.py b/Cython/Compiler/CmdLine.py
index e913810..a587324 100644
--- a/Cython/Compiler/CmdLine.py
+++ b/Cython/Compiler/CmdLine.py
@@ -154,6 +154,8 @@ def parse_command_line(args):
options.capi_reexport_cincludes = True
elif option == "--fast-fail":
Options.fast_fail = True
+ elif option == "--cimport-from-pyx":
+ Options.cimport_from_pyx = True
elif option in ('-Werror', '--warning-errors'):
Options.warning_errors = True
elif option in ('-Wextra', '--warning-extra'):
diff --git a/Cython/Compiler/Code.py b/Cython/Compiler/Code.py
index 92044be..0974ab3 100644
--- a/Cython/Compiler/Code.py
+++ b/Cython/Compiler/Code.py
@@ -60,10 +60,42 @@ basicsize_builtins_map = {
}
uncachable_builtins = [
- # builtin names that cannot be cached because they may or may not
- # be available at import time
+ # Global/builtin names that cannot be cached because they may or may not
+ # be available at import time, for various reasons:
+ ## - Py3.7+
+ 'breakpoint', # might deserve an implementation in Cython
+ ## - Py3.4+
+ '__loader__',
+ '__spec__',
+ ## - Py3+
+ 'BlockingIOError',
+ 'BrokenPipeError',
+ 'ChildProcessError',
+ 'ConnectionAbortedError',
+ 'ConnectionError',
+ 'ConnectionRefusedError',
+ 'ConnectionResetError',
+ 'FileExistsError',
+ 'FileNotFoundError',
+ 'InterruptedError',
+ 'IsADirectoryError',
+ 'ModuleNotFoundError',
+ 'NotADirectoryError',
+ 'PermissionError',
+ 'ProcessLookupError',
+ 'RecursionError',
+ 'ResourceWarning',
+ #'StopAsyncIteration', # backported
+ 'TimeoutError',
+ '__build_class__',
+ 'ascii', # might deserve an implementation in Cython
+ #'exec', # implemented in Cython
+ ## - Py2.7+
+ 'memoryview',
+ ## - platform specific
'WindowsError',
- '_', # e.g. gettext
+ ## - others
+ '_', # e.g. used by gettext
]
special_py_methods = set([
@@ -80,6 +112,82 @@ modifier_output_mapper = {
is_self_assignment = re.compile(r" *(\w+) = (\1);\s*$").match
+class IncludeCode(object):
+ """
+ An include file and/or verbatim C code to be included in the
+ generated sources.
+ """
+ # attributes:
+ #
+ # pieces {order: unicode}: pieces of C code to be generated.
+ # For the included file, the key "order" is zero.
+ # For verbatim include code, the "order" is the "order"
+ # attribute of the original IncludeCode where this piece
+ # of C code was first added. This is needed to prevent
+ # duplication if the same include code is found through
+ # multiple cimports.
+ # location int: where to put this include in the C sources, one
+ # of the constants INITIAL, EARLY, LATE
+ # order int: sorting order (automatically set by increasing counter)
+
+ # Constants for location. If the same include occurs with different
+ # locations, the earliest one takes precedense.
+ INITIAL = 0
+ EARLY = 1
+ LATE = 2
+
+ counter = 1 # Counter for "order"
+
+ def __init__(self, include=None, verbatim=None, late=True, initial=False):
+ self.order = self.counter
+ type(self).counter += 1
+ self.pieces = {}
+
+ if include:
+ if include[0] == '<' and include[-1] == '>':
+ self.pieces[0] = u'#include {0}'.format(include)
+ late = False # system include is never late
+ else:
+ self.pieces[0] = u'#include "{0}"'.format(include)
+
+ if verbatim:
+ self.pieces[self.order] = verbatim
+
+ if initial:
+ self.location = self.INITIAL
+ elif late:
+ self.location = self.LATE
+ else:
+ self.location = self.EARLY
+
+ def dict_update(self, d, key):
+ """
+ Insert `self` in dict `d` with key `key`. If that key already
+ exists, update the attributes of the existing value with `self`.
+ """
+ if key in d:
+ other = d[key]
+ other.location = min(self.location, other.location)
+ other.pieces.update(self.pieces)
+ else:
+ d[key] = self
+
+ def sortkey(self):
+ return self.order
+
+ def mainpiece(self):
+ """
+ Return the main piece of C code, corresponding to the include
+ file. If there was no include file, return None.
+ """
+ return self.pieces.get(0)
+
+ def write(self, code):
+ # Write values of self.pieces dict, sorted by the keys
+ for k in sorted(self.pieces):
+ code.putln(self.pieces[k])
+
+
def get_utility_dir():
# make this a function and not global variables:
# http://trac.cython.org/cython_trac/ticket/475
@@ -332,7 +440,7 @@ class UtilityCode(UtilityCodeBase):
hashes/equals by instance
proto C prototypes
- impl implemenation code
+ impl implementation code
init code to call on module initialization
requires utility code dependencies
proto_block the place in the resulting file where the prototype should
@@ -406,21 +514,22 @@ class UtilityCode(UtilityCodeBase):
def inject_string_constants(self, impl, output):
"""Replace 'PYIDENT("xyz")' by a constant Python identifier cname.
"""
- if 'PYIDENT(' not in impl:
+ if 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl:
return False, impl
replacements = {}
def externalise(matchobj):
- name = matchobj.group(1)
+ key = matchobj.groups()
try:
- cname = replacements[name]
+ cname = replacements[key]
except KeyError:
- cname = replacements[name] = output.get_interned_identifier(
- StringEncoding.EncodedString(name)).cname
+ str_type, name = key
+ cname = replacements[key] = output.get_py_string_const(
+ StringEncoding.EncodedString(name), identifier=str_type == 'IDENT').cname
return cname
- impl = re.sub(r'PYIDENT\("([^"]+)"\)', externalise, impl)
- assert 'PYIDENT(' not in impl
+ impl = re.sub(r'PY(IDENT|UNICODE)\("([^"]+)"\)', externalise, impl)
+ assert 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl
return bool(replacements), impl
def inject_unbound_methods(self, impl, output):
@@ -431,21 +540,18 @@ class UtilityCode(UtilityCodeBase):
utility_code = set()
def externalise(matchobj):
- type_cname, method_name, args = matchobj.groups()
- args = [arg.strip() for arg in args[1:].split(',')]
- if len(args) == 1:
- call = '__Pyx_CallUnboundCMethod0'
- utility_code.add("CallUnboundCMethod0")
- elif len(args) == 2:
- call = '__Pyx_CallUnboundCMethod1'
- utility_code.add("CallUnboundCMethod1")
- else:
- assert False, "CALL_UNBOUND_METHOD() requires 1 or 2 call arguments"
-
- cname = output.get_cached_unbound_method(type_cname, method_name, len(args))
- return '%s(&%s, %s)' % (call, cname, ', '.join(args))
-
- impl = re.sub(r'CALL_UNBOUND_METHOD\(([a-zA-Z_]+),\s*"([^"]+)"((?:,\s*[^),]+)+)\)', externalise, impl)
+ type_cname, method_name, obj_cname, args = matchobj.groups()
+ args = [arg.strip() for arg in args[1:].split(',')] if args else []
+ assert len(args) < 3, "CALL_UNBOUND_METHOD() does not support %d call arguments" % len(args)
+ return output.cached_unbound_method_call_code(obj_cname, type_cname, method_name, args)
+
+ impl = re.sub(
+ r'CALL_UNBOUND_METHOD\('
+ r'([a-zA-Z_]+),' # type cname
+ r'\s*"([^"]+)",' # method name
+ r'\s*([^),]+)' # object cname
+ r'((?:,\s*[^),]+)*)' # args*
+ r'\)', externalise, impl)
assert 'CALL_UNBOUND_METHOD(' not in impl
for helper in sorted(utility_code):
@@ -985,6 +1091,7 @@ class GlobalState(object):
'global_var',
'string_decls',
'decls',
+ 'late_includes',
'all_the_rest',
'pystring_table',
'cached_builtins',
@@ -1239,8 +1346,8 @@ class GlobalState(object):
prefix = Naming.const_prefix
return "%s%s" % (prefix, name_suffix)
- def get_cached_unbound_method(self, type_cname, method_name, args_count):
- key = (type_cname, method_name, args_count)
+ def get_cached_unbound_method(self, type_cname, method_name):
+ key = (type_cname, method_name)
try:
cname = self.cached_cmethods[key]
except KeyError:
@@ -1248,6 +1355,18 @@ class GlobalState(object):
'umethod', '%s_%s' % (type_cname, method_name))
return cname
+ def cached_unbound_method_call_code(self, obj_cname, type_cname, method_name, arg_cnames):
+ # admittedly, not the best place to put this method, but it is reused by UtilityCode and ExprNodes ...
+ utility_code_name = "CallUnboundCMethod%d" % len(arg_cnames)
+ self.use_utility_code(UtilityCode.load_cached(utility_code_name, "ObjectHandling.c"))
+ cache_cname = self.get_cached_unbound_method(type_cname, method_name)
+ args = [obj_cname] + arg_cnames
+ return "__Pyx_%s(&%s, %s)" % (
+ utility_code_name,
+ cache_cname,
+ ', '.join(args),
+ )
+
def add_cached_builtin_decl(self, entry):
if entry.is_builtin and entry.is_const:
if self.should_declare(entry.cname, entry):
@@ -1300,7 +1419,7 @@ class GlobalState(object):
decl = self.parts['decls']
init = self.parts['init_globals']
cnames = []
- for (type_cname, method_name, _), cname in sorted(self.cached_cmethods.items()):
+ for (type_cname, method_name), cname in sorted(self.cached_cmethods.items()):
cnames.append(cname)
method_name_cname = self.get_interned_identifier(StringEncoding.EncodedString(method_name)).cname
decl.putln('static __Pyx_CachedCFunction %s = {0, &%s, 0, 0, 0};' % (
@@ -1520,7 +1639,7 @@ class CCodeWriter(object):
as well
- labels, temps, exc_vars: One must construct a scope in which these can
exist by calling enter_cfunc_scope/exit_cfunc_scope (these are for
- sanity checking and forward compatabilty). Created insertion points
+ sanity checking and forward compatibility). Created insertion points
looses this scope and cannot access it.
- marker: Not copied to insertion point
- filename_table, filename_list, input_file_contents: All codewriters
@@ -1942,8 +2061,8 @@ class CCodeWriter(object):
self.put_xdecref_memoryviewslice(cname, have_gil=have_gil)
return
- prefix = nanny and '__Pyx' or 'Py'
- X = null_check and 'X' or ''
+ prefix = '__Pyx' if nanny else 'Py'
+ X = 'X' if null_check else ''
if clear:
if clear_before_decref:
@@ -2293,6 +2412,7 @@ class CCodeWriter(object):
self.putln(" #define unlikely(x) __builtin_expect(!!(x), 0)")
self.putln("#endif")
+
class PyrexCodeWriter(object):
# f file output file
# level int indentation level
diff --git a/Cython/Compiler/CodeGeneration.py b/Cython/Compiler/CodeGeneration.py
index 6805aa9..e64049c 100644
--- a/Cython/Compiler/CodeGeneration.py
+++ b/Cython/Compiler/CodeGeneration.py
@@ -12,7 +12,7 @@ class ExtractPxdCode(VisitorTransform):
The result is a tuple (StatListNode, ModuleScope), i.e.
everything that is needed from the pxd after it is processed.
- A purer approach would be to seperately compile the pxd code,
+ A purer approach would be to separately compile the pxd code,
but the result would have to be slightly more sophisticated
than pure strings (functions + wanted interned strings +
wanted utility code + wanted cached objects) so for now this
diff --git a/Cython/Compiler/CythonScope.py b/Cython/Compiler/CythonScope.py
index 00b912a..1c25d1a 100644
--- a/Cython/Compiler/CythonScope.py
+++ b/Cython/Compiler/CythonScope.py
@@ -26,6 +26,10 @@ class CythonScope(ModuleScope):
cname='')
entry.in_cinclude = True
+ def is_cpp(self):
+ # Allow C++ utility code in C++ contexts.
+ return self.context.cpp
+
def lookup_type(self, name):
# This function should go away when types are all first-level objects.
type = parse_basic_type(name)
diff --git a/Cython/Compiler/ExprNodes.py b/Cython/Compiler/ExprNodes.py
index dfe46f8..7e10c6d 100644
--- a/Cython/Compiler/ExprNodes.py
+++ b/Cython/Compiler/ExprNodes.py
@@ -870,16 +870,19 @@ class ExprNode(Node):
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" % (src_type,))
- elif not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
- copying=self.is_memview_copy_assignment):
- if src.type.dtype.same_as(dst_type.dtype):
- msg = "Memoryview '%s' not conformable to memoryview '%s'."
- tup = src.type, dst_type
- else:
- msg = "Different base types for memoryviews (%s, %s)"
- tup = src.type.dtype, dst_type.dtype
+ else:
+ if src.type.writable_needed:
+ dst_type.writable_needed = True
+ if not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
+ copying=self.is_memview_copy_assignment):
+ if src.type.dtype.same_as(dst_type.dtype):
+ msg = "Memoryview '%s' not conformable to memoryview '%s'."
+ tup = src.type, dst_type
+ else:
+ msg = "Different base types for memoryviews (%s, %s)"
+ tup = src.type.dtype, dst_type.dtype
- error(self.pos, msg % tup)
+ error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
@@ -1081,6 +1084,12 @@ class NoneNode(PyConstNode):
def may_be_none(self):
return True
+ def coerce_to(self, dst_type, env):
+ if not (dst_type.is_pyobject or dst_type.is_memoryviewslice or dst_type.is_error):
+ # Catch this error early and loudly.
+ error(self.pos, "Cannot assign None to %s" % dst_type)
+ return super(NoneNode, self).coerce_to(dst_type, env)
+
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
@@ -1433,7 +1442,7 @@ class BytesNode(ConstNode):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
- return node
+ return node
elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
node.type = dst_type
return node
@@ -1442,8 +1451,10 @@ class BytesNode(ConstNode):
else PyrexTypes.c_char_ptr_type)
return CastNode(node, dst_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
- node.type = dst_type
- return node
+ # Exclude the case of passing a C string literal into a non-const C++ string.
+ if not dst_type.is_cpp_class or dst_type.is_const:
+ node.type = dst_type
+ return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
@@ -1863,6 +1874,7 @@ class NameNode(AtomicExprNode):
if atype is None:
atype = unspecified_type if as_target and env.directives['infer_types'] != False else py_object_type
self.entry = env.declare_var(name, atype, self.pos, is_cdef=not as_target)
+ self.entry.annotation = annotation
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
@@ -2073,7 +2085,11 @@ class NameNode(AtomicExprNode):
def check_const(self):
entry = self.entry
- if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
+ if entry is not None and not (
+ entry.is_const or
+ entry.is_cfunction or
+ entry.is_builtin or
+ entry.type.is_const):
self.not_const()
return False
return True
@@ -2215,7 +2231,8 @@ class NameNode(AtomicExprNode):
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
- setter = 'PyObject_SetItem'
+ code.globalstate.use_utility_code(UtilityCode.load_cached("SetNameInClass", "ObjectHandling.c"))
+ setter = '__Pyx_SetNameInClass'
else:
assert False, repr(entry)
code.put_error_if_neg(
@@ -2288,7 +2305,11 @@ class NameNode(AtomicExprNode):
code.putln('%s = %s;' % (self.result(), result))
else:
result = rhs.result_as(self.ctype())
- code.putln('%s = %s;' % (self.result(), result))
+
+ if is_pythran_expr(self.type):
+ code.putln('new (&%s) decltype(%s){%s};' % (self.result(), self.result(), result))
+ else:
+ code.putln('%s = %s;' % (self.result(), result))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
@@ -3294,7 +3315,7 @@ class _IndexingBaseNode(ExprNode):
# in most cases, indexing will return a safe reference to an object in a container,
# so we consider the result safe if the base object is
return self.base.is_ephemeral() or self.base.type in (
- basestring_type, str_type, bytes_type, unicode_type)
+ basestring_type, str_type, bytes_type, bytearray_type, unicode_type)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
@@ -3354,7 +3375,7 @@ class IndexNode(_IndexingBaseNode):
return False
if isinstance(self.index, SliceNode):
# slicing!
- if base_type in (bytes_type, str_type, unicode_type,
+ if base_type in (bytes_type, bytearray_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
@@ -3454,6 +3475,10 @@ class IndexNode(_IndexingBaseNode):
if index_func is not None:
return index_func.type.return_type
+ if is_pythran_expr(base_type) and is_pythran_expr(index_type):
+ index_with_type = (self.index, index_type)
+ return PythranExpr(pythran_indexing_type(base_type, [index_with_type]))
+
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
@@ -3579,7 +3604,7 @@ class IndexNode(_IndexingBaseNode):
else:
# not using 'uchar' to enable fast and safe error reporting as '-1'
self.type = PyrexTypes.c_int_type
- elif is_slice and base_type in (bytes_type, str_type, unicode_type, list_type, tuple_type):
+ elif is_slice and base_type in (bytes_type, bytearray_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
item_type = None
@@ -3680,23 +3705,33 @@ class IndexNode(_IndexingBaseNode):
else:
indices = [self.index]
- base_type = self.base.type
+ base = self.base
+ base_type = base.type
replacement_node = None
if base_type.is_memoryviewslice:
# memoryviewslice indexing or slicing
from . import MemoryView
+ if base.is_memview_slice:
+ # For memory views, "view[i][j]" is the same as "view[i, j]" => use the latter for speed.
+ merged_indices = base.merged_indices(indices)
+ if merged_indices is not None:
+ base = base.base
+ base_type = base.type
+ indices = merged_indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim)
if have_slices:
- replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=self.base)
+ replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=base)
else:
- replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=self.base)
+ replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=base)
elif base_type.is_buffer or base_type.is_pythran_expr:
if base_type.is_pythran_expr or len(indices) == base_type.ndim:
# Buffer indexing
is_buffer_access = True
indices = [index.analyse_types(env) for index in indices]
if base_type.is_pythran_expr:
- do_replacement = all(index.type.is_int or index.is_slice or index.type.is_pythran_expr for index in indices)
+ do_replacement = all(
+ index.type.is_int or index.is_slice or index.type.is_pythran_expr
+ for index in indices)
if do_replacement:
for i,index in enumerate(indices):
if index.is_slice:
@@ -3706,7 +3741,7 @@ class IndexNode(_IndexingBaseNode):
else:
do_replacement = all(index.type.is_int for index in indices)
if do_replacement:
- replacement_node = BufferIndexNode(self.pos, indices=indices, base=self.base)
+ replacement_node = BufferIndexNode(self.pos, indices=indices, base=base)
# On cloning, indices is cloned. Otherwise, unpack index into indices.
assert not isinstance(self.index, CloneNode)
@@ -3873,6 +3908,8 @@ class IndexNode(_IndexingBaseNode):
if not self.is_temp:
# all handled in self.calculate_result_code()
return
+
+ utility_code = None
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
@@ -3882,32 +3919,38 @@ class IndexNode(_IndexingBaseNode):
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
- code.globalstate.use_utility_code(
- TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
+ utility_code = TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c")
else:
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("DictGetItem", "ObjectHandling.c"))
+ utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
+ elif self.base.type is py_object_type and self.index.type in (str_type, unicode_type):
+ # obj[str] is probably doing a dict lookup
+ function = "__Pyx_PyObject_Dict_GetItem"
+ utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
else:
- function = "PyObject_GetItem"
+ function = "__Pyx_PyObject_GetItem"
+ code.globalstate.use_utility_code(
+ TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
+ utility_code = UtilityCode.load_cached("ObjectGetItem", "ObjectHandling.c")
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
function = "__Pyx_GetItemInt_Unicode"
error_value = '(Py_UCS4)-1'
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c"))
+ utility_code = UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c")
elif self.base.type is bytearray_type:
assert self.index.type.is_int
assert self.type.is_int
function = "__Pyx_GetItemInt_ByteArray"
error_value = '-1'
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c"))
+ utility_code = UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c")
elif not (self.base.type.is_cpp_class and self.exception_check):
assert False, "unexpected type %s and base type %s for indexing" % (
self.type, self.base.type)
+ if utility_code is not None:
+ code.globalstate.use_utility_code(utility_code)
+
if self.index.type.is_int:
index_code = self.index.result()
else:
@@ -4104,7 +4147,8 @@ class BufferIndexNode(_IndexingBaseNode):
def analyse_buffer_index(self, env, getting):
if is_pythran_expr(self.base.type):
- self.type = PythranExpr(pythran_indexing_type(self.base.type, self.indices))
+ index_with_type_list = [(idx, idx.type) for idx in self.indices]
+ self.type = PythranExpr(pythran_indexing_type(self.base.type, index_with_type_list))
else:
self.base = self.base.coerce_to_simple(env)
self.type = self.base.type.dtype
@@ -4126,10 +4170,6 @@ class BufferIndexNode(_IndexingBaseNode):
def nogil_check(self, env):
if self.is_buffer_access or self.is_memview_index:
- if env.directives['boundscheck']:
- warning(self.pos, "Use boundscheck(False) for faster access",
- level=1)
-
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
self.type = error_type
@@ -4156,6 +4196,11 @@ class BufferIndexNode(_IndexingBaseNode):
"""
ndarray[1, 2, 3] and memslice[1, 2, 3]
"""
+ if self.in_nogil_context:
+ if self.is_buffer_access or self.is_memview_index:
+ if code.globalstate.directives['boundscheck']:
+ warning(self.pos, "Use boundscheck(False) for faster access", level=1)
+
# Assign indices to temps of at least (s)size_t to allow further index calculations.
index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices]
@@ -4189,7 +4234,7 @@ class BufferIndexNode(_IndexingBaseNode):
if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type):
obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False)
# We have got to do this because we have to declare pythran objects
- # at the beggining of the functions.
+ # at the beginning of the functions.
# Indeed, Cython uses "goto" statement for error management, and
# RAII doesn't work with that kind of construction.
# Moreover, the way Pythran expressions are made is that they don't
@@ -4258,6 +4303,11 @@ class MemoryViewIndexNode(BufferIndexNode):
indices = self.indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim)
+ if not getting:
+ self.writable_needed = True
+ if self.base.is_name or self.base.is_attribute:
+ self.base.entry.type.writable_needed = True
+
self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim)
axes = []
@@ -4405,6 +4455,37 @@ class MemoryViewSliceNode(MemoryViewIndexNode):
else:
return MemoryCopySlice(self.pos, self)
+ def merged_indices(self, indices):
+ """Return a new list of indices/slices with 'indices' merged into the current ones
+ according to slicing rules.
+ Is used to implement "view[i][j]" => "view[i, j]".
+ Return None if the indices cannot (easily) be merged at compile time.
+ """
+ if not indices:
+ return None
+ # NOTE: Need to evaluate "self.original_indices" here as they might differ from "self.indices".
+ new_indices = self.original_indices[:]
+ indices = indices[:]
+ for i, s in enumerate(self.original_indices):
+ if s.is_slice:
+ if s.start.is_none and s.stop.is_none and s.step.is_none:
+ # Full slice found, replace by index.
+ new_indices[i] = indices[0]
+ indices.pop(0)
+ if not indices:
+ return new_indices
+ else:
+ # Found something non-trivial, e.g. a partial slice.
+ return None
+ elif not s.type.is_int:
+ # Not a slice, not an integer index => could be anything...
+ return None
+ if indices:
+ if len(new_indices) + len(indices) > self.base.type.ndim:
+ return None
+ new_indices += indices
+ return new_indices
+
def is_simple(self):
if self.is_ellipsis_noop:
# TODO: fix SimpleCallNode.is_simple()
@@ -4576,7 +4657,7 @@ class SliceIndexNode(ExprNode):
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
- elif base_type in (bytes_type, str_type, unicode_type,
+ elif base_type in (bytes_type, bytearray_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
@@ -5188,6 +5269,32 @@ class CallNode(ExprNode):
return False
return ExprNode.may_be_none(self)
+ def set_py_result_type(self, function, func_type=None):
+ if func_type is None:
+ func_type = function.type
+ if func_type is Builtin.type_type and (
+ function.is_name and
+ function.entry and
+ function.entry.is_builtin and
+ function.entry.name in Builtin.types_that_construct_their_instance):
+ # calling a builtin type that returns a specific object type
+ if function.entry.name == 'float':
+ # the following will come true later on in a transform
+ self.type = PyrexTypes.c_double_type
+ self.result_ctype = PyrexTypes.c_double_type
+ else:
+ self.type = Builtin.builtin_types[function.entry.name]
+ self.result_ctype = py_object_type
+ self.may_return_none = False
+ elif function.is_name and function.type_entry:
+ # We are calling an extension type constructor. As long as we do not
+ # support __new__(), the result type is clear
+ self.type = function.type_entry.type
+ self.result_ctype = py_object_type
+ self.may_return_none = False
+ else:
+ self.type = py_object_type
+
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
@@ -5271,6 +5378,11 @@ class SimpleCallNode(CallNode):
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
+ elif attr == 'typeof':
+ if len(self.args) != 1:
+ error(self.args.pos, "only one type allowed.")
+ operand = self.args[0].analyse_types(env)
+ return operand.type
def explicit_args_kwds(self):
return self.args, None
@@ -5301,37 +5413,18 @@ class SimpleCallNode(CallNode):
has_pythran_args &= is_pythran_supported_node_or_none(arg)
self.is_numpy_call_with_exprs = bool(has_pythran_args)
if self.is_numpy_call_with_exprs:
- self.args = None
env.add_include_file("pythonic/numpy/%s.hpp" % self.function.attribute)
- self.type = PythranExpr(pythran_func_type(self.function.attribute, self.arg_tuple.args))
- self.may_return_none = True
- self.is_temp = 1
+ return NumPyMethodCallNode.from_node(
+ self,
+ function=self.function,
+ arg_tuple=self.arg_tuple,
+ type=PythranExpr(pythran_func_type(self.function.attribute, self.arg_tuple.args)),
+ )
elif func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env)
self.args = None
- if func_type is Builtin.type_type and function.is_name and \
- function.entry and \
- function.entry.is_builtin and \
- function.entry.name in Builtin.types_that_construct_their_instance:
- # calling a builtin type that returns a specific object type
- if function.entry.name == 'float':
- # the following will come true later on in a transform
- self.type = PyrexTypes.c_double_type
- self.result_ctype = PyrexTypes.c_double_type
- else:
- self.type = Builtin.builtin_types[function.entry.name]
- self.result_ctype = py_object_type
- self.may_return_none = False
- elif function.is_name and function.type_entry:
- # We are calling an extension type constructor. As
- # long as we do not support __new__(), the result type
- # is clear
- self.type = function.type_entry.type
- self.result_ctype = py_object_type
- self.may_return_none = False
- else:
- self.type = py_object_type
+ self.set_py_result_type(function, func_type)
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
@@ -5452,8 +5545,6 @@ class SimpleCallNode(CallNode):
for i in range(min(max_nargs, actual_nargs)):
formal_arg = func_type.args[i]
formal_type = formal_arg.type
- if formal_type.is_const:
- formal_type = formal_type.const_base_type
arg = args[i].coerce_to(formal_type, env)
if formal_arg.not_none:
# C methods must do the None checks at *call* time
@@ -5601,29 +5692,64 @@ class SimpleCallNode(CallNode):
return False # skip allocation of unused result temp
return True
+ def generate_evaluation_code(self, code):
+ function = self.function
+ if function.is_name or function.is_attribute:
+ code.globalstate.use_entry_utility_code(function.entry)
+
+ if not function.type.is_pyobject or len(self.arg_tuple.args) > 1 or (
+ self.arg_tuple.args and self.arg_tuple.is_literal):
+ super(SimpleCallNode, self).generate_evaluation_code(code)
+ return
+
+ # Special case 0-args and try to avoid explicit tuple creation for Python calls with 1 arg.
+ arg = self.arg_tuple.args[0] if self.arg_tuple.args else None
+ subexprs = (self.self, self.coerced_self, function, arg)
+ for subexpr in subexprs:
+ if subexpr is not None:
+ subexpr.generate_evaluation_code(code)
+
+ code.mark_pos(self.pos)
+ assert self.is_temp
+ self.allocate_temp_result(code)
+
+ if arg is None:
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCallNoArg", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
+ self.result(),
+ function.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ else:
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCallOneArg", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
+ self.result(),
+ function.py_result(),
+ arg.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ code.put_gotref(self.py_result())
+
+ for subexpr in subexprs:
+ if subexpr is not None:
+ subexpr.generate_disposal_code(code)
+ subexpr.free_temps(code)
+
def generate_result_code(self, code):
func_type = self.function_type()
- if self.function.is_name or self.function.is_attribute:
- code.globalstate.use_entry_utility_code(self.function.entry)
if func_type.is_pyobject:
- if func_type is not type_type and not self.arg_tuple.args and self.arg_tuple.is_literal:
- code.globalstate.use_utility_code(UtilityCode.load_cached(
- "PyObjectCallNoArg", "ObjectHandling.c"))
- code.putln(
- "%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
- self.result(),
- self.function.py_result(),
- code.error_goto_if_null(self.result(), self.pos)))
- else:
- arg_code = self.arg_tuple.py_result()
- code.globalstate.use_utility_code(UtilityCode.load_cached(
- "PyObjectCall", "ObjectHandling.c"))
- code.putln(
- "%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
- self.result(),
- self.function.py_result(),
- arg_code,
- code.error_goto_if_null(self.result(), self.pos)))
+ arg_code = self.arg_tuple.py_result()
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCall", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
+ self.result(),
+ self.function.py_result(),
+ arg_code,
+ code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
@@ -5687,11 +5813,34 @@ class SimpleCallNode(CallNode):
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
- @classmethod
- def from_node(cls, node, **kwargs):
- ret = super(SimpleCallNode, cls).from_node(node, **kwargs)
- ret.is_numpy_call_with_exprs = node.is_numpy_call_with_exprs
- return ret
+
+class NumPyMethodCallNode(SimpleCallNode):
+ # Pythran call to a NumPy function or method.
+ #
+ # function ExprNode the function/method to call
+ # arg_tuple TupleNode the arguments as an args tuple
+
+ subexprs = ['function', 'arg_tuple']
+ is_temp = True
+ may_return_none = True
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ self.function.generate_evaluation_code(code)
+ assert self.arg_tuple.mult_factor is None
+ args = self.arg_tuple.args
+ for arg in args:
+ arg.generate_evaluation_code(code)
+
+ code.putln("// function evaluation code for numpy function")
+ code.putln("__Pyx_call_destructor(%s);" % self.result())
+ code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::%s{}(%s)};" % (
+ self.result(),
+ self.result(),
+ self.function.attribute,
+ ", ".join(a.pythran_result() for a in args)))
class PyMethodCallNode(SimpleCallNode):
@@ -5714,16 +5863,6 @@ class PyMethodCallNode(SimpleCallNode):
for arg in args:
arg.generate_evaluation_code(code)
- if self.is_numpy_call_with_exprs:
- code.putln("// function evaluation code for numpy function")
- code.putln("__Pyx_call_destructor(%s);" % self.result())
- code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::%s{}(%s)};" % (
- self.result(),
- self.result(),
- self.function.attribute,
- ", ".join(a.pythran_result() for a in self.arg_tuple.args)))
- return
-
# make sure function is in temp so that we can replace the reference below if it's a method
reuse_function_temp = self.function.is_temp
if reuse_function_temp:
@@ -6034,6 +6173,37 @@ class PythonCapiCallNode(SimpleCallNode):
SimpleCallNode.__init__(self, pos, **kwargs)
+class CachedBuiltinMethodCallNode(CallNode):
+ # Python call to a method of a known Python builtin (only created in transforms)
+
+ subexprs = ['obj', 'args']
+ is_temp = True
+
+ def __init__(self, call_node, obj, method_name, args):
+ super(CachedBuiltinMethodCallNode, self).__init__(
+ call_node.pos,
+ obj=obj, method_name=method_name, args=args,
+ may_return_none=call_node.may_return_none,
+ type=call_node.type)
+
+ def may_be_none(self):
+ if self.may_return_none is not None:
+ return self.may_return_none
+ return ExprNode.may_be_none(self)
+
+ def generate_result_code(self, code):
+ type_cname = self.obj.type.cname
+ obj_cname = self.obj.py_result()
+ args = [arg.py_result() for arg in self.args]
+ call_code = code.globalstate.cached_unbound_method_call_code(
+ obj_cname, type_cname, self.method_name, args)
+ code.putln("%s = %s; %s" % (
+ self.result(), call_code,
+ code.error_goto_if_null(self.result(), self.pos)
+ ))
+ code.put_gotref(self.result())
+
+
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
@@ -6092,15 +6262,7 @@ class GeneralCallNode(CallNode):
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
- function = self.function
- if function.is_name and function.type_entry:
- # We are calling an extension type constructor. As long
- # as we do not support __new__(), the result type is clear
- self.type = function.type_entry.type
- self.result_ctype = py_object_type
- self.may_return_none = False
- else:
- self.type = py_object_type
+ self.set_py_result_type(self.function)
self.is_temp = 1
return self
@@ -7365,17 +7527,14 @@ class SequenceNode(ExprNode):
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
- code.putln("#if !CYTHON_COMPILING_IN_PYPY")
- code.putln("Py_ssize_t size = Py_SIZE(sequence);")
- code.putln("#else")
- code.putln("Py_ssize_t size = PySequence_Size(sequence);") # < 0 => exception
- code.putln("#endif")
+ code.putln("Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
+ # < 0 => exception
code.putln(code.error_goto(self.pos))
code.putln("}")
@@ -7606,10 +7765,10 @@ class TupleNode(SequenceNode):
if self.mult_factor or not self.args:
return tuple_type
arg_types = [arg.infer_type(env) for arg in self.args]
- if any(type.is_pyobject or type.is_unspecified or type.is_fused for type in arg_types):
+ if any(type.is_pyobject or type.is_memoryviewslice or type.is_unspecified or type.is_fused
+ for type in arg_types):
return tuple_type
- else:
- return env.declare_tuple_type(self.pos, arg_types).type
+ return env.declare_tuple_type(self.pos, arg_types).type
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
@@ -7623,7 +7782,8 @@ class TupleNode(SequenceNode):
arg.starred_expr_allowed_here = True
self.args[i] = arg.analyse_types(env)
if (not self.mult_factor and
- not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_fused) for arg in self.args)):
+ not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_memoryviewslice or arg.type.is_fused)
+ for arg in self.args)):
self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type
self.is_temp = 1
return self
@@ -7706,26 +7866,21 @@ class TupleNode(SequenceNode):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
- if self.is_partly_literal:
- # underlying tuple is const, but factor is not
+
+ if self.is_literal or self.is_partly_literal:
tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
const_code = code.get_cached_constants_writer()
const_code.mark_pos(self.pos)
- self.generate_sequence_packing_code(const_code, tuple_target, plain=True)
+ self.generate_sequence_packing_code(const_code, tuple_target, plain=not self.is_literal)
const_code.put_giveref(tuple_target)
- code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
- self.result(), tuple_target, self.mult_factor.py_result(),
- code.error_goto_if_null(self.result(), self.pos)
+ if self.is_literal:
+ self.result_code = tuple_target
+ else:
+ code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
+ self.result(), tuple_target, self.mult_factor.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)
))
- code.put_gotref(self.py_result())
- elif self.is_literal:
- # non-empty cached tuple => result is global constant,
- # creation code goes into separate code writer
- self.result_code = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
- code = code.get_cached_constants_writer()
- code.mark_pos(self.pos)
- self.generate_sequence_packing_code(code)
- code.put_giveref(self.py_result())
+ code.put_gotref(self.py_result())
else:
self.type.entry.used = True
self.generate_sequence_packing_code(code)
@@ -8891,66 +9046,6 @@ class ClassCellNode(ExprNode):
code.put_incref(self.result(), py_object_type)
-class BoundMethodNode(ExprNode):
- # Helper class used in the implementation of Python
- # class definitions. Constructs an bound method
- # object from a class and a function.
- #
- # function ExprNode Function object
- # self_object ExprNode self object
-
- subexprs = ['function']
-
- def analyse_types(self, env):
- self.function = self.function.analyse_types(env)
- self.type = py_object_type
- self.is_temp = 1
- return self
-
- gil_message = "Constructing a bound method"
-
- def generate_result_code(self, code):
- code.putln(
- "%s = __Pyx_PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % (
- self.result(),
- self.function.py_result(),
- self.self_object.py_result(),
- self.self_object.py_result(),
- code.error_goto_if_null(self.result(), self.pos)))
- code.put_gotref(self.py_result())
-
-class UnboundMethodNode(ExprNode):
- # Helper class used in the implementation of Python
- # class definitions. Constructs an unbound method
- # object from a class and a function.
- #
- # function ExprNode Function object
-
- type = py_object_type
- is_temp = 1
-
- subexprs = ['function']
-
- def analyse_types(self, env):
- self.function = self.function.analyse_types(env)
- return self
-
- def may_be_none(self):
- return False
-
- gil_message = "Constructing an unbound method"
-
- def generate_result_code(self, code):
- class_cname = code.pyclass_stack[-1].classobj.result()
- code.putln(
- "%s = __Pyx_PyMethod_New(%s, 0, %s); %s" % (
- self.result(),
- self.function.py_result(),
- class_cname,
- code.error_goto_if_null(self.result(), self.pos)))
- code.put_gotref(self.py_result())
-
-
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# functions. Constructs a PyCFunction object
@@ -9056,7 +9151,7 @@ class PyCFunctionNode(ExprNode, ModuleNameMixin):
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
- allow_pyobject=False)
+ allow_pyobject=False, allow_memoryview=True)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
@@ -10426,7 +10521,7 @@ class CythonArrayNode(ExprNode):
def allocate_temp_result(self, code):
if self.temp_code:
- raise RuntimeError("temp allocated mulitple times")
+ raise RuntimeError("temp allocated multiple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
@@ -10687,6 +10782,10 @@ class TypeofNode(ExprNode):
self.literal = literal.coerce_to_pyobject(env)
return self
+ def analyse_as_type(self, env):
+ self.operand = self.operand.analyse_types(env)
+ return self.operand.type
+
def may_be_none(self):
return False
@@ -11132,7 +11231,7 @@ class AddNode(NumBinopNode):
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
# so we can safely infer the Py2 type for bytes here
- string_types = (bytes_type, str_type, basestring_type, unicode_type)
+ string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
@@ -11191,7 +11290,7 @@ class MulNode(NumBinopNode):
def infer_builtin_types_operation(self, type1, type2):
# let's assume that whatever builtin type you multiply a string with
# will either return a string of the same type or fail with an exception
- string_types = (bytes_type, str_type, basestring_type, unicode_type)
+ string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2.is_builtin_type:
return type1
if type2 in string_types and type1.is_builtin_type:
@@ -11607,7 +11706,7 @@ class BoolBinopNode(ExprNode):
operator=self.operator,
operand1=operand1, operand2=operand2)
- def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label, fall_through):
+ def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
outer_labels = (and_label, or_label)
@@ -11616,19 +11715,20 @@ class BoolBinopNode(ExprNode):
else:
my_label = or_label = code.new_label('next_or')
self.operand1.generate_bool_evaluation_code(
- code, final_result_temp, and_label, or_label, end_label, my_label)
+ code, final_result_temp, final_result_type, and_label, or_label, end_label, my_label)
and_label, or_label = outer_labels
code.put_label(my_label)
self.operand2.generate_bool_evaluation_code(
- code, final_result_temp, and_label, or_label, end_label, fall_through)
+ code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through)
def generate_evaluation_code(self, code):
self.allocate_temp_result(code)
+ result_type = PyrexTypes.py_object_type if self.type.is_pyobject else self.type
or_label = and_label = None
end_label = code.new_label('bool_binop_done')
- self.generate_bool_evaluation_code(code, self.result(), and_label, or_label, end_label, end_label)
+ self.generate_bool_evaluation_code(code, self.result(), result_type, and_label, or_label, end_label, end_label)
code.put_label(end_label)
gil_message = "Truth-testing Python object"
@@ -11713,7 +11813,7 @@ class BoolBinopResultNode(ExprNode):
test_result = self.arg.result()
return (test_result, self.arg.type.is_pyobject)
- def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label, fall_through):
+ def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
# x => x
@@ -11756,7 +11856,7 @@ class BoolBinopResultNode(ExprNode):
code.putln("} else {")
self.value.generate_evaluation_code(code)
self.value.make_owned_reference(code)
- code.putln("%s = %s;" % (final_result_temp, self.value.result()))
+ code.putln("%s = %s;" % (final_result_temp, self.value.result_as(final_result_type)))
self.value.generate_post_assignment_code(code)
# disposal: {not (and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
@@ -12117,6 +12217,11 @@ class CmpNode(object):
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF"
return True
+ elif self.operand2.type is Builtin.set_type:
+ self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySetContains", "ObjectHandling.c")
+ self.special_bool_cmp_function = "__Pyx_PySet_ContainsTF"
+ return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
@@ -12236,7 +12341,14 @@ class PrimaryCmpNode(ExprNode, CmpNode):
is_memslice_nonecheck = False
def infer_type(self, env):
- # TODO: Actually implement this (after merging with -unstable).
+ type1 = self.operand1.infer_type(env)
+ type2 = self.operand2.infer_type(env)
+
+ if is_pythran_expr(type1) or is_pythran_expr(type2):
+ if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
+ return PythranExpr(pythran_binop_type(self.operator, type1, type2))
+
+ # TODO: implement this for other types.
return py_object_type
def type_dependencies(self, env):
@@ -12662,12 +12774,12 @@ class CoerceToMemViewSliceNode(CoercionNode):
def generate_result_code(self, code):
self.type.create_from_py_utility_code(self.env)
- code.putln("%s = %s(%s);" % (self.result(),
- self.type.from_py_function,
- self.arg.py_result()))
-
- error_cond = self.type.error_condition(self.result())
- code.putln(code.error_goto_if(error_cond, self.pos))
+ code.putln(self.type.from_py_call_code(
+ self.arg.py_result(),
+ self.result(),
+ self.pos,
+ code
+ ))
class CastNode(CoercionNode):
@@ -12726,6 +12838,15 @@ class PyTypeTestNode(CoercionNode):
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
+ def reanalyse(self):
+ if self.type != self.arg.type or not self.arg.is_temp:
+ return self
+ if not self.type.typeobj_is_available():
+ return self
+ if self.arg.may_be_none() and self.notnone:
+ return self.arg.as_none_safe_node("Cannot convert NoneType to %.200s" % self.type.name)
+ return self.arg
+
def calculate_constant_result(self):
# FIXME
pass
@@ -13016,6 +13137,7 @@ class CoerceToBooleanNode(CoercionNode):
Builtin.set_type: 'PySet_GET_SIZE',
Builtin.frozenset_type: 'PySet_GET_SIZE',
Builtin.bytes_type: 'PyBytes_GET_SIZE',
+ Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
Builtin.unicode_type: '__Pyx_PyUnicode_IS_TRUE',
}
@@ -13044,11 +13166,9 @@ class CoerceToBooleanNode(CoercionNode):
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
- code.putln("%s = (%s != Py_None) && (%s(%s) != 0);" % (
- self.result(),
- self.arg.py_result(),
- test_func,
- self.arg.py_result()))
+ checks = ["(%s != Py_None)" % self.arg.py_result()] if self.arg.may_be_none() else []
+ checks.append("(%s(%s) != 0)" % (test_func, self.arg.py_result()))
+ code.putln("%s = %s;" % (self.result(), '&&'.join(checks)))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
diff --git a/Cython/Compiler/FusedNode.py b/Cython/Compiler/FusedNode.py
index 3effc15..011290e 100644
--- a/Cython/Compiler/FusedNode.py
+++ b/Cython/Compiler/FusedNode.py
@@ -225,7 +225,7 @@ class FusedCFuncDefNode(StatListNode):
"""
Create a new local scope for the copied node and append it to
self.nodes. A new local scope is needed because the arguments with the
- fused types are aready in the local scope, and we need the specialized
+ fused types are already in the local scope, and we need the specialized
entries created after analyse_declarations on each specialized version
of the (CFunc)DefNode.
f2s is a dict mapping each fused type to its specialized version
@@ -276,7 +276,7 @@ class FusedCFuncDefNode(StatListNode):
def _fused_instance_checks(self, normal_types, pyx_code, env):
"""
- Genereate Cython code for instance checks, matching an object to
+ Generate Cython code for instance checks, matching an object to
specialized types.
"""
for specialized_type in normal_types:
@@ -390,7 +390,7 @@ class FusedCFuncDefNode(StatListNode):
coerce_from_py_func=memslice_type.from_py_function,
dtype=dtype)
decl_code.putln(
- "{{memviewslice_cname}} {{coerce_from_py_func}}(object)")
+ "{{memviewslice_cname}} {{coerce_from_py_func}}(object, int)")
pyx_code.context.update(
specialized_type_name=specialized_type.specialization_string,
@@ -400,7 +400,7 @@ class FusedCFuncDefNode(StatListNode):
u"""
# try {{dtype}}
if itemsize == -1 or itemsize == {{sizeof_dtype}}:
- memslice = {{coerce_from_py_func}}(arg)
+ memslice = {{coerce_from_py_func}}(arg, 0)
if memslice.memview:
__PYX_XDEC_MEMVIEW(&memslice, 1)
# print 'found a match for the buffer through format parsing'
@@ -421,10 +421,11 @@ class FusedCFuncDefNode(StatListNode):
# The first thing to find a match in this loop breaks out of the loop
pyx_code.put_chunk(
u"""
+ """ + (u"arg_is_pythran_compatible = False" if pythran_types else u"") + u"""
if ndarray is not None:
if isinstance(arg, ndarray):
dtype = arg.dtype
- arg_is_pythran_compatible = True
+ """ + (u"arg_is_pythran_compatible = True" if pythran_types else u"") + u"""
elif __pyx_memoryview_check(arg):
arg_base = arg.base
if isinstance(arg_base, ndarray):
@@ -438,24 +439,30 @@ class FusedCFuncDefNode(StatListNode):
if dtype is not None:
itemsize = dtype.itemsize
kind = ord(dtype.kind)
- # We only support the endianess of the current compiler
+ dtype_signed = kind == 'i'
+ """)
+ pyx_code.indent(2)
+ if pythran_types:
+ pyx_code.put_chunk(
+ u"""
+ # Pythran only supports the endianness of the current compiler
byteorder = dtype.byteorder
if byteorder == "<" and not __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
- if byteorder == ">" and __Pyx_Is_Little_Endian():
+ elif byteorder == ">" and __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
- dtype_signed = kind == 'i'
if arg_is_pythran_compatible:
cur_stride = itemsize
- for dim,stride in zip(reversed(arg.shape),reversed(arg.strides)):
- if stride != cur_stride:
+ shape = arg.shape
+ strides = arg.strides
+ for i in range(arg.ndim-1, -1, -1):
+ if (strides[i]) != cur_stride:
arg_is_pythran_compatible = False
break
- cur_stride *= dim
+ cur_stride *= shape[i]
else:
- arg_is_pythran_compatible = not (arg.flags.f_contiguous and arg.ndim > 1)
- """)
- pyx_code.indent(2)
+ arg_is_pythran_compatible = not (arg.flags.f_contiguous and (arg.ndim) > 1)
+ """)
pyx_code.named_insertion_point("numpy_dtype_checks")
self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types)
pyx_code.dedent(2)
@@ -464,7 +471,7 @@ class FusedCFuncDefNode(StatListNode):
self._buffer_parse_format_string_check(
pyx_code, decl_code, specialized_type, env)
- def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types):
+ def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types):
"""
If we have any buffer specializations, write out some variable
declarations and imports.
@@ -484,10 +491,14 @@ class FusedCFuncDefNode(StatListNode):
cdef Py_ssize_t itemsize
cdef bint dtype_signed
cdef char kind
- cdef bint arg_is_pythran_compatible
itemsize = -1
- arg_is_pythran_compatible = False
+ """)
+
+ if pythran_types:
+ pyx_code.local_variable_declarations.put_chunk(u"""
+ cdef bint arg_is_pythran_compatible
+ cdef Py_ssize_t cur_stride
""")
pyx_code.imports.put_chunk(
@@ -514,7 +525,7 @@ class FusedCFuncDefNode(StatListNode):
pyx_code.local_variable_declarations.put_chunk(
u"""
cdef bint {{dtype_name}}_is_signed
- {{dtype_name}}_is_signed = <{{dtype_type}}> -1 < 0
+ {{dtype_name}}_is_signed = not (<{{dtype_type}}> -1 > 0)
""")
def _split_fused_types(self, arg):
@@ -670,7 +681,7 @@ class FusedCFuncDefNode(StatListNode):
default_idx += 1
if all_buffer_types:
- self._buffer_declarations(pyx_code, decl_code, all_buffer_types)
+ self._buffer_declarations(pyx_code, decl_code, all_buffer_types, pythran_types)
env.use_utility_code(Code.UtilityCode.load_cached("Import", "ImportExport.c"))
env.use_utility_code(Code.UtilityCode.load_cached("ImportNumPyArray", "ImportExport.c"))
diff --git a/Cython/Compiler/Main.py b/Cython/Compiler/Main.py
index 895f375..6a582ba 100644
--- a/Cython/Compiler/Main.py
+++ b/Cython/Compiler/Main.py
@@ -18,12 +18,12 @@ try:
except ImportError:
basestring = str
-from . import Errors
# Do not import Parsing here, import it when needed, because Parsing imports
# Nodes, which globally needs debug command line options initialized to set a
# conditional metaclass. These options are processed by CmdLine called from
# main() in this file.
# import Parsing
+from . import Errors
from .StringEncoding import EncodedString
from .Scanning import PyrexScanner, FileSourceDescriptor
from .Errors import PyrexError, CompileError, error, warning
@@ -38,6 +38,7 @@ module_name_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_
verbose = 0
+
class CompilationData(object):
# Bundles the information that is passed from transform to transform.
# (For now, this is only)
@@ -52,6 +53,7 @@ class CompilationData(object):
# result CompilationResult
pass
+
class Context(object):
# This class encapsulates the context needed for compiling
# one or more Cython implementation files along with their
@@ -239,7 +241,7 @@ class Context(object):
pxd = self.search_include_directories(qualified_name, ".pxd", pos, sys_path=sys_path)
if pxd is None: # XXX Keep this until Includes/Deprecated is removed
if (qualified_name.startswith('python') or
- qualified_name in ('stdlib', 'stdio', 'stl')):
+ qualified_name in ('stdlib', 'stdio', 'stl')):
standard_include_path = os.path.abspath(os.path.normpath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
deprecated_include_path = os.path.join(standard_include_path, 'Deprecated')
@@ -356,7 +358,7 @@ class Context(object):
from ..Parser import ConcreteSyntaxTree
except ImportError:
raise RuntimeError(
- "Formal grammer can only be used with compiled Cython with an available pgen.")
+ "Formal grammar can only be used with compiled Cython with an available pgen.")
ConcreteSyntaxTree.p_module(source_filename)
except UnicodeDecodeError as e:
#import traceback
@@ -426,6 +428,7 @@ class Context(object):
pass
result.c_file = None
+
def get_output_filename(source_filename, cwd, options):
if options.cplus:
c_suffix = ".cpp"
@@ -441,6 +444,7 @@ def get_output_filename(source_filename, cwd, options):
else:
return suggested_file_name
+
def create_default_resultobj(compilation_source, options):
result = CompilationResult()
result.main_source_file = compilation_source.source_desc.filename
@@ -451,6 +455,7 @@ def create_default_resultobj(compilation_source, options):
result.embedded_metadata = options.embedded_metadata
return result
+
def run_pipeline(source, options, full_module_name=None, context=None):
from . import Pipeline
@@ -496,15 +501,15 @@ def run_pipeline(source, options, full_module_name=None, context=None):
return result
-#------------------------------------------------------------------------
+# ------------------------------------------------------------------------
#
# Main Python entry points
#
-#------------------------------------------------------------------------
+# ------------------------------------------------------------------------
class CompilationSource(object):
"""
- Contains the data necesarry to start up a compilation pipeline for
+ Contains the data necessary to start up a compilation pipeline for
a single compilation unit.
"""
def __init__(self, source_desc, full_module_name, cwd):
@@ -512,6 +517,7 @@ class CompilationSource(object):
self.full_module_name = full_module_name
self.cwd = cwd
+
class CompilationOptions(object):
"""
Options to the Cython compiler:
@@ -678,13 +684,14 @@ def compile_multiple(sources, options):
processed.add(source)
return results
+
def compile(source, options = None, full_module_name = None, **kwds):
"""
compile(source [, options], [, = ]...)
Compile one or more Pyrex implementation files, with optional timestamp
- checking and recursing on dependecies. The source argument may be a string
- or a sequence of strings If it is a string and no recursion or timestamp
+ checking and recursing on dependencies. The source argument may be a string
+ or a sequence of strings. If it is a string and no recursion or timestamp
checking is requested, a CompilationResult is returned, otherwise a
CompilationResultSet is returned.
"""
@@ -694,14 +701,17 @@ def compile(source, options = None, full_module_name = None, **kwds):
else:
return compile_multiple(source, options)
-#------------------------------------------------------------------------
+
+# ------------------------------------------------------------------------
#
# Main command-line entry point
#
-#------------------------------------------------------------------------
+# ------------------------------------------------------------------------
+
def setuptools_main():
return main(command_line = 1)
+
def main(command_line = 0):
args = sys.argv[1:]
any_failures = 0
@@ -727,12 +737,11 @@ def main(command_line = 0):
sys.exit(1)
-
-#------------------------------------------------------------------------
+# ------------------------------------------------------------------------
#
# Set the default options depending on the platform
#
-#------------------------------------------------------------------------
+# ------------------------------------------------------------------------
default_options = dict(
show_version = 0,
diff --git a/Cython/Compiler/MemoryView.py b/Cython/Compiler/MemoryView.py
index 7df5ec5..77ef59c 100644
--- a/Cython/Compiler/MemoryView.py
+++ b/Cython/Compiler/MemoryView.py
@@ -28,12 +28,12 @@ def concat_flags(*flags):
format_flag = "PyBUF_FORMAT"
-memview_c_contiguous = "(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
-memview_f_contiguous = "(PyBUF_F_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
-memview_any_contiguous = "(PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
-memview_full_access = "PyBUF_FULL"
-#memview_strided_access = "PyBUF_STRIDED"
-memview_strided_access = "PyBUF_RECORDS"
+memview_c_contiguous = "(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT)"
+memview_f_contiguous = "(PyBUF_F_CONTIGUOUS | PyBUF_FORMAT)"
+memview_any_contiguous = "(PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT)"
+memview_full_access = "PyBUF_FULL_RO"
+#memview_strided_access = "PyBUF_STRIDED_RO"
+memview_strided_access = "PyBUF_RECORDS_RO"
MEMVIEW_DIRECT = '__Pyx_MEMVIEW_DIRECT'
MEMVIEW_PTR = '__Pyx_MEMVIEW_PTR'
diff --git a/Cython/Compiler/ModuleNode.py b/Cython/Compiler/ModuleNode.py
index d6c322e..31266ec 100644
--- a/Cython/Compiler/ModuleNode.py
+++ b/Cython/Compiler/ModuleNode.py
@@ -7,10 +7,11 @@ from __future__ import absolute_import
import cython
cython.declare(Naming=object, Options=object, PyrexTypes=object, TypeSlots=object,
error=object, warning=object, py_object_type=object, UtilityCode=object,
- EncodedString=object)
+ EncodedString=object, re=object)
import json
import os
+import re
import operator
from .PyrexTypes import CPtrType
from . import Future
@@ -27,7 +28,7 @@ from . import Pythran
from .Errors import error, warning
from .PyrexTypes import py_object_type
from ..Utils import open_new_file, replace_suffix, decode_filename
-from .Code import UtilityCode
+from .Code import UtilityCode, IncludeCode
from .StringEncoding import EncodedString
from .Pythran import has_np_pythran
@@ -85,15 +86,15 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self.scope.utility_code_list.extend(scope.utility_code_list)
+ for inc in scope.c_includes.values():
+ self.scope.process_include(inc)
+
def extend_if_not_in(L1, L2):
for x in L2:
if x not in L1:
L1.append(x)
- extend_if_not_in(self.scope.include_files, scope.include_files)
extend_if_not_in(self.scope.included_files, scope.included_files)
- extend_if_not_in(self.scope.python_include_files,
- scope.python_include_files)
if merge_scope:
# Ensure that we don't generate import code for these entries!
@@ -364,6 +365,10 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("")
code.putln("/* Implementation of '%s' */" % env.qualified_name)
+ code = globalstate['late_includes']
+ code.putln("/* Late includes */")
+ self.generate_includes(env, modules, code, early=False)
+
code = globalstate['all_the_rest']
self.generate_cached_builtins_decls(env, code)
@@ -615,8 +620,9 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("")
code.putln("#define PY_SSIZE_T_CLEAN")
- for filename in env.python_include_files:
- code.putln('#include "%s"' % filename)
+ for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey):
+ if inc.location == inc.INITIAL:
+ inc.write(code)
code.putln("#ifndef Py_PYTHON_H")
code.putln(" #error Python headers needed to compile C extensions, "
"please install development version of Python.")
@@ -636,6 +642,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
self._put_setup_code(code, "CppInitCode")
else:
self._put_setup_code(code, "CInitCode")
+ self._put_setup_code(code, "PythonCompatibility")
self._put_setup_code(code, "MathInitCode")
if options.c_line_in_traceback:
@@ -655,7 +662,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("#define %s" % Naming.h_guard_prefix + self.api_name(env))
code.putln("#define %s" % Naming.api_guard_prefix + self.api_name(env))
- self.generate_includes(env, cimported_modules, code)
+ code.putln("/* Early includes */")
+ self.generate_includes(env, cimported_modules, code, late=False)
code.putln("")
code.putln("#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)")
code.putln("#define CYTHON_WITHOUT_ASSERTIONS")
@@ -729,16 +737,17 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln(" #define DL_IMPORT(_T) _T")
code.putln("#endif")
- def generate_includes(self, env, cimported_modules, code):
+ def generate_includes(self, env, cimported_modules, code, early=True, late=True):
includes = []
- for filename in env.include_files:
- byte_decoded_filenname = str(filename)
- if byte_decoded_filenname[0] == '<' and byte_decoded_filenname[-1] == '>':
- code.putln('#include %s' % byte_decoded_filenname)
- else:
- code.putln('#include "%s"' % byte_decoded_filenname)
-
- code.putln_openmp("#include ")
+ for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey):
+ if inc.location == inc.EARLY:
+ if early:
+ inc.write(code)
+ elif inc.location == inc.LATE:
+ if late:
+ inc.write(code)
+ if early:
+ code.putln_openmp("#include ")
def generate_filename_table(self, code):
from os.path import isabs, basename
@@ -1871,16 +1880,19 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
# If that raises an AttributeError, call the __getattr__ if defined.
#
# In both cases, defined can be in this class, or any base class.
- def lookup_here_or_base(n, type=None):
+ def lookup_here_or_base(n, tp=None, extern_return=None):
# Recursive lookup
- if type is None:
- type = scope.parent_type
- r = type.scope.lookup_here(n)
- if r is None and \
- type.base_type is not None:
- return lookup_here_or_base(n, type.base_type)
- else:
- return r
+ if tp is None:
+ tp = scope.parent_type
+ r = tp.scope.lookup_here(n)
+ if r is None:
+ if tp.is_external and extern_return is not None:
+ return extern_return
+ if tp.base_type is not None:
+ return lookup_here_or_base(n, tp.base_type)
+ return r
+
+ has_instance_dict = lookup_here_or_base("__dict__", extern_return="extern")
getattr_entry = lookup_here_or_base("__getattr__")
getattribute_entry = lookup_here_or_base("__getattribute__")
code.putln("")
@@ -1892,8 +1904,20 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
"PyObject *v = %s(o, n);" % (
getattribute_entry.func_cname))
else:
+ if not has_instance_dict and scope.parent_type.is_final_type:
+ # Final with no dict => use faster type attribute lookup.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObject_GenericGetAttrNoDict", "ObjectHandling.c"))
+ generic_getattr_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict"
+ elif not has_instance_dict or has_instance_dict == "extern":
+ # No dict in the known ancestors, but don't know about extern ancestors or subtypes.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObject_GenericGetAttr", "ObjectHandling.c"))
+ generic_getattr_cfunc = "__Pyx_PyObject_GenericGetAttr"
+ else:
+ generic_getattr_cfunc = "PyObject_GenericGetAttr"
code.putln(
- "PyObject *v = PyObject_GenericGetAttr(o, n);")
+ "PyObject *v = %s(o, n);" % generic_getattr_cfunc)
if getattr_entry is not None:
code.putln(
"if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {")
@@ -2243,19 +2267,23 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("return -1;")
code.putln("}")
code.putln("")
- code.putln(UtilityCode.load_cached("ImportStar", "ImportExport.c").impl)
+ code.putln(UtilityCode.load_as_string("ImportStar", "ImportExport.c")[1])
code.exit_cfunc_scope() # done with labels
def generate_module_init_func(self, imported_modules, env, code):
+ subfunction = self.mod_init_subfunction(self.scope, code)
+
code.enter_cfunc_scope(self.scope)
code.putln("")
- header2 = "PyMODINIT_FUNC init%s(void)" % env.module_name
- header3 = "PyMODINIT_FUNC %s(void)" % self.mod_init_func_cname('PyInit', env)
+ code.putln(UtilityCode.load_as_string("PyModInitFuncType", "ModuleSetupCode.c")[0])
+ header2 = "__Pyx_PyMODINIT_FUNC init%s(void)" % env.module_name
+ header3 = "__Pyx_PyMODINIT_FUNC %s(void)" % self.mod_init_func_cname('PyInit', env)
code.putln("#if PY_MAJOR_VERSION < 3")
- code.putln("%s; /*proto*/" % header2)
+ # Optimise for small code size as the module init function is only executed once.
+ code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header2)
code.putln(header2)
code.putln("#else")
- code.putln("%s; /*proto*/" % header3)
+ code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header3)
code.putln(header3)
# CPython 3.5+ supports multi-phase module initialisation (gives access to __spec__, __file__, etc.)
@@ -2269,7 +2297,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("")
# main module init code lives in Py_mod_exec function, not in PyInit function
- code.putln("static int %s(PyObject *%s)" % (
+ code.putln("static int %s(PyObject *%s) CYTHON_SMALL_CODE " % (
self.mod_init_func_cname(Naming.pymodule_exec_func_cname, env),
Naming.pymodinit_module_arg))
code.putln("#endif") # PEP489
@@ -2294,21 +2322,20 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
Naming.module_cname,
Naming.pymodinit_module_arg,
))
+ code.putln("#elif PY_MAJOR_VERSION >= 3")
+ # Hack: enforce single initialisation also on reimports under different names on Python 3 (with PEP 3121/489).
+ code.putln("if (%s) return __Pyx_NewRef(%s);" % (
+ Naming.module_cname,
+ Naming.module_cname,
+ ))
code.putln("#endif")
if profile or linetrace:
tempdecl_code.put_trace_declarations()
code.put_trace_frame_init()
- code.putln("#if CYTHON_REFNANNY")
- code.putln("__Pyx_RefNanny = __Pyx_RefNannyImportAPI(\"refnanny\");")
- code.putln("if (!__Pyx_RefNanny) {")
- code.putln(" PyErr_Clear();")
- code.putln(" __Pyx_RefNanny = __Pyx_RefNannyImportAPI(\"Cython.Runtime.refnanny\");")
- code.putln(" if (!__Pyx_RefNanny)")
- code.putln(" Py_FatalError(\"failed to import 'refnanny' module\");")
- code.putln("}")
- code.putln("#endif")
+ refnanny_import_code = UtilityCode.load_as_string("ImportRefnannyAPI", "ModuleSetupCode.c")[1]
+ code.putln(refnanny_import_code.rstrip())
code.put_setup_refcount_context(header3)
env.use_utility_code(UtilityCode.load("CheckBinaryVersion", "ModuleSetupCode.c"))
@@ -2366,30 +2393,32 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.putln("/*--- Constants init code ---*/")
code.put_error_if_neg(self.pos, "__Pyx_InitCachedConstants()")
- code.putln("/*--- Global init code ---*/")
- self.generate_global_init_code(env, code)
+ code.putln("/*--- Global type/function init code ---*/")
+
+ with subfunction("Global init code") as inner_code:
+ self.generate_global_init_code(env, inner_code)
- code.putln("/*--- Variable export code ---*/")
- self.generate_c_variable_export_code(env, code)
+ with subfunction("Variable export code") as inner_code:
+ self.generate_c_variable_export_code(env, inner_code)
- code.putln("/*--- Function export code ---*/")
- self.generate_c_function_export_code(env, code)
+ with subfunction("Function export code") as inner_code:
+ self.generate_c_function_export_code(env, inner_code)
- code.putln("/*--- Type init code ---*/")
- self.generate_type_init_code(env, code)
+ with subfunction("Type init code") as inner_code:
+ self.generate_type_init_code(env, inner_code)
- code.putln("/*--- Type import code ---*/")
- for module in imported_modules:
- self.generate_type_import_code_for_module(module, env, code)
+ with subfunction("Type import code") as inner_code:
+ for module in imported_modules:
+ self.generate_type_import_code_for_module(module, env, inner_code)
- code.putln("/*--- Variable import code ---*/")
- for module in imported_modules:
- self.generate_c_variable_import_code_for_module(module, env, code)
+ with subfunction("Variable import code") as inner_code:
+ for module in imported_modules:
+ self.generate_c_variable_import_code_for_module(module, env, inner_code)
- code.putln("/*--- Function import code ---*/")
- for module in imported_modules:
- self.specialize_fused_types(module)
- self.generate_c_function_import_code_for_module(module, env, code)
+ with subfunction("Function import code") as inner_code:
+ for module in imported_modules:
+ self.specialize_fused_types(module)
+ self.generate_c_function_import_code_for_module(module, env, inner_code)
code.putln("/*--- Execution code ---*/")
code.mark_pos(None)
@@ -2454,6 +2483,71 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
code.exit_cfunc_scope()
+ def mod_init_subfunction(self, scope, orig_code):
+ """
+ Return a context manager that allows deviating the module init code generation
+ into a separate function and instead inserts a call to it.
+
+ Can be reused sequentially to create multiple functions.
+ The functions get inserted at the point where the context manager was created.
+ The call gets inserted where the context manager is used (on entry).
+ """
+ prototypes = orig_code.insertion_point()
+ prototypes.putln("")
+ function_code = orig_code.insertion_point()
+ function_code.putln("")
+
+ class ModInitSubfunction(object):
+ def __init__(self, code_type):
+ cname = '_'.join(code_type.lower().split())
+ assert re.match("^[a-z0-9_]+$", cname)
+ self.cfunc_name = "__Pyx_modinit_%s" % cname
+ self.description = code_type
+ self.tempdecl_code = None
+ self.call_code = None
+
+ def __enter__(self):
+ self.call_code = orig_code.insertion_point()
+ code = function_code
+ code.enter_cfunc_scope(scope)
+ prototypes.putln("static int %s(void); /*proto*/" % self.cfunc_name)
+ code.putln("static int %s(void) {" % self.cfunc_name)
+ code.put_declare_refcount_context()
+ self.tempdecl_code = code.insertion_point()
+ code.put_setup_refcount_context(self.cfunc_name)
+ # Leave a grepable marker that makes it easy to find the generator source.
+ code.putln("/*--- %s ---*/" % self.description)
+ return code
+
+ def __exit__(self, *args):
+ code = function_code
+ code.put_finish_refcount_context()
+ code.putln("return 0;")
+
+ self.tempdecl_code.put_temp_declarations(code.funcstate)
+ self.tempdecl_code = None
+
+ needs_error_handling = code.label_used(code.error_label)
+ if needs_error_handling:
+ code.put_label(code.error_label)
+ for cname, type in code.funcstate.all_managed_temps():
+ code.put_xdecref(cname, type)
+ code.put_finish_refcount_context()
+ code.putln("return -1;")
+ code.putln("}")
+ code.exit_cfunc_scope()
+ code.putln("")
+
+ if needs_error_handling:
+ self.call_code.use_label(orig_code.error_label)
+ self.call_code.putln("if (unlikely(%s() != 0)) goto %s;" % (
+ self.cfunc_name, orig_code.error_label))
+ else:
+ self.call_code.putln("(void)%s();" % self.cfunc_name)
+ self.call_code = None
+
+ return ModInitSubfunction
+
def generate_module_import_setup(self, env, code):
module_path = env.directives['set_initial_path']
if module_path == 'SOURCEFILE':
@@ -2863,8 +2957,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
else:
self.generate_base_type_import_code(env, entry, code)
self.generate_exttype_vtable_init_code(entry, code)
- self.generate_type_ready_code(env, entry, code)
- self.generate_typeptr_assignment_code(entry, code)
+ if entry.type.early_init:
+ self.generate_type_ready_code(entry, code)
def generate_base_type_import_code(self, env, entry, code):
base_type = entry.type.base_type
@@ -2938,98 +3032,8 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
not type.is_external or type.is_subclassed,
error_code))
- def generate_type_ready_code(self, env, entry, code):
- # Generate a call to PyType_Ready for an extension
- # type defined in this module.
- type = entry.type
- typeobj_cname = type.typeobj_cname
- scope = type.scope
- if scope: # could be None if there was an error
- if entry.visibility != 'extern':
- for slot in TypeSlots.slot_table:
- slot.generate_dynamic_init_code(scope, code)
- code.putln(
- "if (PyType_Ready(&%s) < 0) %s" % (
- typeobj_cname,
- code.error_goto(entry.pos)))
- # Don't inherit tp_print from builtin types, restoring the
- # behavior of using tp_repr or tp_str instead.
- code.putln("%s.tp_print = 0;" % typeobj_cname)
- # Fix special method docstrings. This is a bit of a hack, but
- # unless we let PyType_Ready create the slot wrappers we have
- # a significant performance hit. (See trac #561.)
- for func in entry.type.scope.pyfunc_entries:
- is_buffer = func.name in ('__getbuffer__', '__releasebuffer__')
- if (func.is_special and Options.docstrings and
- func.wrapperbase_cname and not is_buffer):
- slot = TypeSlots.method_name_to_slot[func.name]
- preprocessor_guard = slot.preprocessor_guard_code()
- if preprocessor_guard:
- code.putln(preprocessor_guard)
- code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
- code.putln("{")
- code.putln(
- 'PyObject *wrapper = PyObject_GetAttrString((PyObject *)&%s, "%s"); %s' % (
- typeobj_cname,
- func.name,
- code.error_goto_if_null('wrapper', entry.pos)))
- code.putln(
- "if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {")
- code.putln(
- "%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
- func.wrapperbase_cname))
- code.putln(
- "%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname))
- code.putln(
- "((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % (
- func.wrapperbase_cname))
- code.putln("}")
- code.putln("}")
- code.putln('#endif')
- if preprocessor_guard:
- code.putln('#endif')
- if type.vtable_cname:
- code.putln(
- "if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
- typeobj_cname,
- type.vtabptr_cname,
- code.error_goto(entry.pos)))
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
- if not type.scope.is_internal and not type.scope.directives['internal']:
- # scope.is_internal is set for types defined by
- # Cython (such as closures), the 'internal'
- # directive is set by users
- code.putln(
- 'if (PyObject_SetAttrString(%s, "%s", (PyObject *)&%s) < 0) %s' % (
- Naming.module_cname,
- scope.class_name,
- typeobj_cname,
- code.error_goto(entry.pos)))
- weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
- if weakref_entry:
- if weakref_entry.type is py_object_type:
- tp_weaklistoffset = "%s.tp_weaklistoffset" % typeobj_cname
- if type.typedef_flag:
- objstruct = type.objstruct_cname
- else:
- objstruct = "struct %s" % type.objstruct_cname
- code.putln("if (%s == 0) %s = offsetof(%s, %s);" % (
- tp_weaklistoffset,
- tp_weaklistoffset,
- objstruct,
- weakref_entry.cname))
- else:
- error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
- if scope.lookup_here("__reduce_cython__") if not scope.is_closure_class_scope else None:
- # Unfortunately, we cannot reliably detect whether a
- # superclass defined __reduce__ at compile time, so we must
- # do so at runtime.
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c'))
- code.putln('if (__Pyx_setup_reduce((PyObject*)&%s) < 0) %s' % (
- typeobj_cname,
- code.error_goto(entry.pos)))
+ def generate_type_ready_code(self, entry, code):
+ Nodes.CClassDefNode.generate_type_ready_code(entry, code)
def generate_exttype_vtable_init_code(self, entry, code):
# Generate code to initialise the C method table of an
@@ -3060,15 +3064,6 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode):
cast,
meth_entry.func_cname))
- def generate_typeptr_assignment_code(self, entry, code):
- # Generate code to initialise the typeptr of an extension
- # type defined in this module to point to its type object.
- type = entry.type
- if type.typeobj_cname:
- code.putln(
- "%s = &%s;" % (
- type.typeptr_cname, type.typeobj_cname))
-
def generate_cfunction_declaration(entry, env, code, definition):
from_cy_utility = entry.used and entry.utility_code_definition
if entry.used and entry.inline_func_in_pxd or (not entry.in_cinclude and (
diff --git a/Cython/Compiler/Nodes.py b/Cython/Compiler/Nodes.py
index 0758894..e0d8dc7 100644
--- a/Cython/Compiler/Nodes.py
+++ b/Cython/Compiler/Nodes.py
@@ -68,8 +68,9 @@ def embed_position(pos, docstring):
return doc
-def analyse_type_annotation(annotation, env):
+def analyse_type_annotation(annotation, env, assigned_value=None):
base_type = None
+ is_ambiguous = False
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
warning(annotation.pos,
@@ -88,6 +89,13 @@ def analyse_type_annotation(annotation, env):
warning(annotation.pos, "Duplicate type declarations found in signature annotation")
arg_type = annotation.analyse_as_type(env)
if annotation.is_name and not annotation.cython_attribute and annotation.name in ('int', 'long', 'float'):
+ # Map builtin numeric Python types to C types in safe cases.
+ if assigned_value is not None and arg_type is not None and not arg_type.is_pyobject:
+ assigned_type = assigned_value.infer_type(env)
+ if assigned_type and assigned_type.is_pyobject:
+ # C type seems unsafe, e.g. due to 'None' default value => ignore annotation type
+ is_ambiguous = True
+ arg_type = None
# ignore 'int' and require 'cython.int' to avoid unsafe integer declarations
if arg_type in (PyrexTypes.c_long_type, PyrexTypes.c_int_type, PyrexTypes.c_float_type):
arg_type = PyrexTypes.c_double_type if annotation.name == 'float' else py_object_type
@@ -100,6 +108,8 @@ def analyse_type_annotation(annotation, env):
"Python type declaration in signature annotation does not refer to a Python type")
base_type = CAnalysedBaseTypeNode(
annotation.pos, type=arg_type, is_arg=True)
+ elif is_ambiguous:
+ warning(annotation.pos, "Ambiguous types in annotation, ignoring")
else:
warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
return base_type, arg_type
@@ -460,19 +470,30 @@ class StatNode(Node):
class CDefExternNode(StatNode):
- # include_file string or None
- # body StatNode
+ # include_file string or None
+ # verbatim_include string or None
+ # body StatListNode
child_attrs = ["body"]
def analyse_declarations(self, env):
- if self.include_file:
- env.add_include_file(self.include_file)
old_cinclude_flag = env.in_cinclude
env.in_cinclude = 1
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
+ if self.include_file or self.verbatim_include:
+ # Determine whether include should be late
+ stats = self.body.stats
+ if not env.directives['preliminary_late_includes_cy28']:
+ late = False
+ elif not stats:
+ # Special case: empty 'cdef extern' blocks are early
+ late = False
+ else:
+ late = all(isinstance(node, CVarDefNode) for node in stats)
+ env.add_include_file(self.include_file, self.verbatim_include, late)
+
def analyse_expressions(self, env):
return self
@@ -776,7 +797,7 @@ class CFuncDeclaratorNode(CDeclaratorNode):
scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos)
for arg in func_type.args[len(func_type.args) - self.optional_arg_count:]:
- scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=1)
+ scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=True, allow_memoryview=True)
struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name)
@@ -890,7 +911,7 @@ class CArgDeclNode(Node):
annotation = self.annotation
if not annotation:
return None
- base_type, arg_type = analyse_type_annotation(annotation, env)
+ base_type, arg_type = analyse_type_annotation(annotation, env, assigned_value=self.default)
if base_type is not None:
self.base_type = base_type
return arg_type
@@ -1127,7 +1148,7 @@ class TemplatedTypeNode(CBaseTypeNode):
type = template_node.analyse_as_type(env)
if type is None:
error(template_node.pos, "unknown type in template argument")
- return error_type
+ type = error_type
template_types.append(type)
self.type = base_type.specialize_here(self.pos, template_types)
@@ -1844,6 +1865,10 @@ class FuncDefNode(StatNode, BlockNode):
code_object = self.code_object.calculate_result_code(code) if self.code_object else None
code.put_trace_frame_init(code_object)
+ # ----- Special check for getbuffer
+ if is_getbuffer_slot:
+ self.getbuffer_check(code)
+
# ----- set up refnanny
if use_refnanny:
tempvardecl_code.put_declare_refcount_context()
@@ -1917,7 +1942,7 @@ class FuncDefNode(StatNode, BlockNode):
code.put_var_incref(entry)
# Note: defaults are always incref-ed. For def functions, we
- # we aquire arguments from object converstion, so we have
+ # we acquire arguments from object conversion, so we have
# new references. If we are a cdef function, we need to
# incref our arguments
elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1:
@@ -2145,7 +2170,10 @@ class FuncDefNode(StatNode, BlockNode):
error(arg.pos, "Invalid use of 'void'")
elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice):
error(arg.pos, "Argument type '%s' is incomplete" % arg.type)
- return env.declare_arg(arg.name, arg.type, arg.pos)
+ entry = env.declare_arg(arg.name, arg.type, arg.pos)
+ if arg.annotation:
+ entry.annotation = arg.annotation
+ return entry
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
@@ -2191,31 +2219,59 @@ class FuncDefNode(StatNode, BlockNode):
#
# Special code for the __getbuffer__ function
#
- def getbuffer_init(self, code):
- info = self.local_scope.arg_entries[1].cname
- # Python 3.0 betas have a bug in memoryview which makes it call
- # getbuffer with a NULL parameter. For now we work around this;
- # the following block should be removed when this bug is fixed.
- code.putln("if (%s != NULL) {" % info)
- code.putln("%s->obj = Py_None; __Pyx_INCREF(Py_None);" % info)
- code.put_giveref("%s->obj" % info) # Do not refnanny object within structs
+ def _get_py_buffer_info(self):
+ py_buffer = self.local_scope.arg_entries[1]
+ try:
+ # Check builtin definition of struct Py_buffer
+ obj_type = py_buffer.type.base_type.scope.entries['obj'].type
+ except (AttributeError, KeyError):
+ # User code redeclared struct Py_buffer
+ obj_type = None
+ return py_buffer, obj_type
+
+ # Old Python 3 used to support write-locks on buffer-like objects by
+ # calling PyObject_GetBuffer() with a view==NULL parameter. This obscure
+ # feature is obsolete, it was almost never used (only one instance in
+ # `Modules/posixmodule.c` in Python 3.1) and it is now officially removed
+ # (see bpo-14203). We add an extra check here to prevent legacy code from
+ # from trying to use the feature and prevent segmentation faults.
+ def getbuffer_check(self, code):
+ py_buffer, _ = self._get_py_buffer_info()
+ view = py_buffer.cname
+ code.putln("if (%s == NULL) {" % view)
+ code.putln("PyErr_SetString(PyExc_BufferError, "
+ "\"PyObject_GetBuffer: view==NULL argument is obsolete\");")
+ code.putln("return -1;")
code.putln("}")
+ def getbuffer_init(self, code):
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.put_init_to_py_none("%s->obj" % view, obj_type)
+ code.put_giveref("%s->obj" % view) # Do not refnanny object within structs
+ else:
+ code.putln("%s->obj = NULL;" % view)
+
def getbuffer_error_cleanup(self, code):
- info = self.local_scope.arg_entries[1].cname
- code.putln("if (%s != NULL && %s->obj != NULL) {"
- % (info, info))
- code.put_gotref("%s->obj" % info)
- code.putln("__Pyx_DECREF(%s->obj); %s->obj = NULL;"
- % (info, info))
- code.putln("}")
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.putln("if (%s->obj != NULL) {" % view)
+ code.put_gotref("%s->obj" % view)
+ code.put_decref_clear("%s->obj" % view, obj_type)
+ code.putln("}")
+ else:
+ code.putln("Py_CLEAR(%s->obj);" % view)
def getbuffer_normal_cleanup(self, code):
- info = self.local_scope.arg_entries[1].cname
- code.putln("if (%s != NULL && %s->obj == Py_None) {" % (info, info))
- code.put_gotref("Py_None")
- code.putln("__Pyx_DECREF(Py_None); %s->obj = NULL;" % info)
- code.putln("}")
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.putln("if (%s->obj == Py_None) {" % view)
+ code.put_gotref("%s->obj" % view)
+ code.put_decref_clear("%s->obj" % view, obj_type)
+ code.putln("}")
def get_preprocessor_guard(self):
if not self.entry.is_special:
@@ -2733,6 +2789,7 @@ class DefNode(FuncDefNode):
name_declarator, type = formal_arg.analyse(scope, nonempty=1)
cfunc_args.append(PyrexTypes.CFuncTypeArg(name=name_declarator.name,
cname=None,
+ annotation=formal_arg.annotation,
type=py_object_type,
pos=formal_arg.pos))
cfunc_type = PyrexTypes.CFuncType(return_type=py_object_type,
@@ -3135,7 +3192,10 @@ class DefNode(FuncDefNode):
arg_code_list.append(arg_decl_code(self.star_arg))
if self.starstar_arg:
arg_code_list.append(arg_decl_code(self.starstar_arg))
- arg_code = ', '.join(arg_code_list)
+ if arg_code_list:
+ arg_code = ', '.join(arg_code_list)
+ else:
+ arg_code = 'void' # No arguments
dc = self.return_type.declaration_code(self.entry.pyfunc_cname)
decls_code = code.globalstate['decls']
@@ -3695,18 +3755,12 @@ class DefNodeWrapper(FuncDefNode):
entry = arg.entry
code.putln("%s = %s;" % (entry.cname, item))
else:
- func = arg.type.from_py_function
- if func:
+ if arg.type.from_py_function:
if arg.default:
# C-typed default arguments must be handled here
code.putln('if (%s) {' % item)
- rhs = "%s(%s)" % (func, item)
- if arg.type.is_enum:
- rhs = arg.type.cast_code(rhs)
- code.putln("%s = %s; %s" % (
- arg.entry.cname,
- rhs,
- code.error_goto_if(arg.type.error_condition(arg.entry.cname), arg.pos)))
+ code.putln(arg.type.from_py_call_code(
+ item, arg.entry.cname, arg.pos, code))
if arg.default:
code.putln('} else {')
code.putln("%s = %s;" % (
@@ -3807,11 +3861,11 @@ class DefNodeWrapper(FuncDefNode):
code.putln('switch (pos_args) {')
for i, arg in enumerate(all_args[:last_required_arg+1]):
if max_positional_args > 0 and i <= max_positional_args:
+ if i != 0:
+ code.putln('CYTHON_FALLTHROUGH;')
if self.star_arg and i == max_positional_args:
code.putln('default:')
else:
- if i != 0:
- code.putln('CYTHON_FALLTHROUGH;')
code.putln('case %2d:' % i)
pystring_cname = code.intern_identifier(arg.name)
if arg.default:
@@ -3820,12 +3874,12 @@ class DefNodeWrapper(FuncDefNode):
continue
code.putln('if (kw_args > 0) {')
# don't overwrite default argument
- code.putln('PyObject* value = PyDict_GetItem(%s, %s);' % (
+ code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, %s);' % (
Naming.kwds_cname, pystring_cname))
code.putln('if (value) { values[%d] = value; kw_args--; }' % i)
code.putln('}')
else:
- code.putln('if (likely((values[%d] = PyDict_GetItem(%s, %s)) != 0)) kw_args--;' % (
+ code.putln('if (likely((values[%d] = __Pyx_PyDict_GetItemStr(%s, %s)) != 0)) kw_args--;' % (
i, Naming.kwds_cname, pystring_cname))
if i < min_positional_args:
if i == 0:
@@ -3911,7 +3965,7 @@ class DefNodeWrapper(FuncDefNode):
else:
code.putln('if (kw_args == 1) {')
code.putln('const Py_ssize_t index = %d;' % first_optional_arg)
- code.putln('PyObject* value = PyDict_GetItem(%s, *%s[index]);' % (
+ code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, *%s[index]);' % (
Naming.kwds_cname, Naming.pykwdlist_cname))
code.putln('if (value) { values[index] = value; kw_args--; }')
if len(optional_args) > 1:
@@ -3947,17 +4001,14 @@ class DefNodeWrapper(FuncDefNode):
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
- func = new_type.from_py_function
# copied from CoerceFromPyTypeNode
- if func:
- lhs = arg.entry.cname
- rhs = "%s(%s)" % (func, arg.hdr_cname)
- if new_type.is_enum:
- rhs = PyrexTypes.typecast(new_type, PyrexTypes.c_long_type, rhs)
- code.putln("%s = %s; %s" % (
- lhs,
- rhs,
- code.error_goto_if(new_type.error_condition(arg.entry.cname), arg.pos)))
+ if new_type.from_py_function:
+ code.putln(new_type.from_py_call_code(
+ arg.hdr_cname,
+ arg.entry.cname,
+ arg.pos,
+ code,
+ ))
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % new_type)
@@ -3998,6 +4049,7 @@ class GeneratorDefNode(DefNode):
is_generator = True
is_coroutine = False
+ is_iterable_coroutine = False
is_asyncgen = False
gen_type_name = 'Generator'
needs_closure = True
@@ -4022,9 +4074,10 @@ class GeneratorDefNode(DefNode):
code.putln('{')
code.putln('__pyx_CoroutineObject *gen = __Pyx_%s_New('
- '(__pyx_coroutine_body_t) %s, (PyObject *) %s, %s, %s, %s); %s' % (
+ '(__pyx_coroutine_body_t) %s, %s, (PyObject *) %s, %s, %s, %s); %s' % (
self.gen_type_name,
- body_cname, Naming.cur_scope_cname, name, qualname, module_name,
+ body_cname, self.code_object.calculate_result_code(code) if self.code_object else 'NULL',
+ Naming.cur_scope_cname, name, qualname, module_name,
code.error_goto_if_null('gen', self.pos)))
code.put_decref(Naming.cur_scope_cname, py_object_type)
if self.requires_classobj:
@@ -4049,6 +4102,11 @@ class AsyncDefNode(GeneratorDefNode):
is_coroutine = True
+class IterableAsyncDefNode(AsyncDefNode):
+ gen_type_name = 'IterableCoroutine'
+ is_iterable_coroutine = True
+
+
class AsyncGenNode(AsyncDefNode):
gen_type_name = 'AsyncGen'
is_asyncgen = True
@@ -4119,6 +4177,9 @@ class GeneratorBodyDefNode(DefNode):
linetrace = code.globalstate.directives['linetrace']
if profile or linetrace:
tempvardecl_code.put_trace_declarations()
+ code.funcstate.can_trace = True
+ code_object = self.code_object.calculate_result_code(code) if self.code_object else None
+ code.put_trace_frame_init(code_object)
# ----- Resume switch point.
code.funcstate.init_closure_temps(lenv.scope_class.type.scope)
@@ -4157,6 +4218,9 @@ class GeneratorBodyDefNode(DefNode):
# FIXME: this silences a potential "unused" warning => try to avoid unused closures in more cases
code.putln("CYTHON_MAYBE_UNUSED_VAR(%s);" % Naming.cur_scope_cname)
+ if profile or linetrace:
+ code.funcstate.can_trace = False
+
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
@@ -4164,6 +4228,9 @@ class GeneratorBodyDefNode(DefNode):
# on normal generator termination, we do not take the exception propagation
# path: no traceback info is required and not creating it is much faster
if not self.is_inlined and not self.body.is_terminator:
+ if self.is_async_gen_body:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
code.putln('PyErr_SetNone(%s);' % (
'__Pyx_PyExc_StopAsyncIteration' if self.is_async_gen_body else 'PyExc_StopIteration'))
# ----- Error cleanup
@@ -4224,7 +4291,7 @@ class GeneratorBodyDefNode(DefNode):
class OverrideCheckNode(StatNode):
# A Node for dispatching to the def method if it
- # is overriden.
+ # is overridden.
#
# py_func
#
@@ -4400,36 +4467,13 @@ class PyClassDefNode(ClassDefNode):
if self.is_py3_style_class:
error(self.classobj.pos, "Python3 style class could not be represented as C class")
return
- bases = self.classobj.bases.args
- if len(bases) == 0:
- base_class_name = None
- base_class_module = None
- elif len(bases) == 1:
- base = bases[0]
- path = []
- from .ExprNodes import AttributeNode, NameNode
- while isinstance(base, AttributeNode):
- path.insert(0, base.attribute)
- base = base.obj
- if isinstance(base, NameNode):
- path.insert(0, base.name)
- base_class_name = path[-1]
- if len(path) > 1:
- base_class_module = u'.'.join(path[:-1])
- else:
- base_class_module = None
- else:
- error(self.classobj.bases.args.pos, "Invalid base class")
- else:
- error(self.classobj.bases.args.pos, "C class may only have one base class")
- return None
+ from . import ExprNodes
return CClassDefNode(self.pos,
visibility='private',
module_name=None,
class_name=self.name,
- base_class_module=base_class_module,
- base_class_name=base_class_name,
+ bases=self.classobj.bases or ExprNodes.TupleNode(self.pos, args=[]),
decorators=self.decorators,
body=self.body,
in_pxd=False,
@@ -4521,8 +4565,7 @@ class CClassDefNode(ClassDefNode):
# module_name string or None For import of extern type objects
# class_name string Unqualified name of class
# as_name string or None Name to declare as in this scope
- # base_class_module string or None Module containing the base class
- # base_class_name string or None Name of the base class
+ # bases TupleNode Base class(es)
# objstruct_name string or None Specified C name of object struct
# typeobj_name string or None Specified C name of type object
# in_pxd boolean Is in a .pxd file
@@ -4602,44 +4645,34 @@ class CClassDefNode(ClassDefNode):
self.module.has_extern_class = 1
env.add_imported_module(self.module)
- if self.base_class_name:
- if self.base_class_module:
- base_class_scope = env.find_imported_module(self.base_class_module.split('.'), self.pos)
- if not base_class_scope:
- error(self.pos, "'%s' is not a cimported module" % self.base_class_module)
- return
+ if self.bases.args:
+ base = self.bases.args[0]
+ base_type = base.analyse_as_type(env)
+ if base_type in (PyrexTypes.c_int_type, PyrexTypes.c_long_type, PyrexTypes.c_float_type):
+ # Use the Python rather than C variant of these types.
+ base_type = env.lookup(base_type.sign_and_name()).type
+ if base_type is None:
+ error(base.pos, "First base of '%s' is not an extension type" % self.class_name)
+ elif base_type == PyrexTypes.py_object_type:
+ base_class_scope = None
+ elif not base_type.is_extension_type and \
+ not (base_type.is_builtin_type and base_type.objstruct_cname):
+ error(base.pos, "'%s' is not an extension type" % base_type)
+ elif not base_type.is_complete():
+ error(base.pos, "Base class '%s' of type '%s' is incomplete" % (
+ base_type.name, self.class_name))
+ elif base_type.scope and base_type.scope.directives and \
+ base_type.is_final_type:
+ error(base.pos, "Base class '%s' of type '%s' is final" % (
+ base_type, self.class_name))
+ elif base_type.is_builtin_type and \
+ base_type.name in ('tuple', 'str', 'bytes'):
+ error(base.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
+ % base_type.name)
else:
- base_class_scope = env
- if self.base_class_name == 'object':
- # extension classes are special and don't need to inherit from object
- if base_class_scope is None or base_class_scope.lookup('object') is None:
- self.base_class_name = None
- self.base_class_module = None
- base_class_scope = None
- if base_class_scope:
- base_class_entry = base_class_scope.find(self.base_class_name, self.pos)
- if base_class_entry:
- if not base_class_entry.is_type:
- error(self.pos, "'%s' is not a type name" % self.base_class_name)
- elif not base_class_entry.type.is_extension_type and \
- not (base_class_entry.type.is_builtin_type and
- base_class_entry.type.objstruct_cname):
- error(self.pos, "'%s' is not an extension type" % self.base_class_name)
- elif not base_class_entry.type.is_complete():
- error(self.pos, "Base class '%s' of type '%s' is incomplete" % (
- self.base_class_name, self.class_name))
- elif base_class_entry.type.scope and base_class_entry.type.scope.directives and \
- base_class_entry.type.is_final_type:
- error(self.pos, "Base class '%s' of type '%s' is final" % (
- self.base_class_name, self.class_name))
- elif base_class_entry.type.is_builtin_type and \
- base_class_entry.type.name in ('tuple', 'str', 'bytes'):
- error(self.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
- % base_class_entry.type.name)
- else:
- self.base_type = base_class_entry.type
- if env.directives.get('freelist', 0) > 0:
- warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
+ self.base_type = base_type
+ if env.directives.get('freelist', 0) > 0 and base_type != PyrexTypes.py_object_type:
+ warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
has_body = self.body is not None
if has_body and self.base_type and not self.base_type.scope:
@@ -4699,6 +4732,28 @@ class CClassDefNode(ClassDefNode):
else:
scope.implemented = 1
+ if len(self.bases.args) > 1:
+ if not has_body or self.in_pxd:
+ error(self.bases.args[1].pos, "Only declare first base in declaration.")
+ # At runtime, we check that the other bases are heap types
+ # and that a __dict__ is added if required.
+ for other_base in self.bases.args[1:]:
+ if other_base.analyse_as_type(env):
+ error(other_base.pos, "Only one extension type base class allowed.")
+ self.entry.type.early_init = 0
+ from . import ExprNodes
+ self.type_init_args = ExprNodes.TupleNode(
+ self.pos,
+ args=[ExprNodes.IdentifierStringNode(self.pos, value=self.class_name),
+ self.bases,
+ ExprNodes.DictNode(self.pos, key_value_pairs=[])])
+ elif self.base_type:
+ self.entry.type.early_init = self.base_type.is_external or self.base_type.early_init
+ self.type_init_args = None
+ else:
+ self.entry.type.early_init = 1
+ self.type_init_args = None
+
env.allocate_vtable_names(self.entry)
for thunk in self.entry.type.defered_declarations:
@@ -4708,6 +4763,8 @@ class CClassDefNode(ClassDefNode):
if self.body:
scope = self.entry.type.scope
self.body = self.body.analyse_expressions(scope)
+ if self.type_init_args:
+ self.type_init_args.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
@@ -4721,8 +4778,172 @@ class CClassDefNode(ClassDefNode):
code.mark_pos(self.pos)
if self.body:
self.body.generate_execution_code(code)
+ if not self.entry.type.early_init:
+ if self.type_init_args:
+ self.type_init_args.generate_evaluation_code(code)
+ bases = "PyTuple_GET_ITEM(%s, 1)" % self.type_init_args.result()
+ first_base = "((PyTypeObject*)PyTuple_GET_ITEM(%s, 0))" % bases
+ # Let Python do the base types compatibility checking.
+ trial_type = code.funcstate.allocate_temp(PyrexTypes.py_object_type, True)
+ code.putln("%s = PyType_Type.tp_new(&PyType_Type, %s, NULL);" % (
+ trial_type, self.type_init_args.result()))
+ code.putln(code.error_goto_if_null(trial_type, self.pos))
+ code.put_gotref(trial_type)
+ code.putln("if (((PyTypeObject*) %s)->tp_base != %s) {" % (
+ trial_type, first_base))
+ code.putln("PyErr_Format(PyExc_TypeError, \"best base '%s' must be equal to first base '%s'\",")
+ code.putln(" ((PyTypeObject*) %s)->tp_base->tp_name, %s->tp_name);" % (
+ trial_type, first_base))
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+ code.funcstate.release_temp(trial_type)
+ code.put_incref(bases, PyrexTypes.py_object_type)
+ code.put_giveref(bases)
+ code.putln("%s.tp_bases = %s;" % (self.entry.type.typeobj_cname, bases))
+ code.put_decref_clear(trial_type, PyrexTypes.py_object_type)
+ self.type_init_args.generate_disposal_code(code)
+ self.type_init_args.free_temps(code)
+
+ self.generate_type_ready_code(self.entry, code, True)
+
+ # Also called from ModuleNode for early init types.
+ @staticmethod
+ def generate_type_ready_code(entry, code, heap_type_bases=False):
+ # Generate a call to PyType_Ready for an extension
+ # type defined in this module.
+ type = entry.type
+ typeobj_cname = type.typeobj_cname
+ scope = type.scope
+ if not scope: # could be None if there was an error
+ return
+ if entry.visibility != 'extern':
+ for slot in TypeSlots.slot_table:
+ slot.generate_dynamic_init_code(scope, code)
+ if heap_type_bases:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('PyType_Ready', 'ExtensionTypes.c'))
+ readyfunc = "__Pyx_PyType_Ready"
+ else:
+ readyfunc = "PyType_Ready"
+ code.putln(
+ "if (%s(&%s) < 0) %s" % (
+ readyfunc,
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ # Don't inherit tp_print from builtin types, restoring the
+ # behavior of using tp_repr or tp_str instead.
+ code.putln("%s.tp_print = 0;" % typeobj_cname)
+
+ # Use specialised attribute lookup for types with generic lookup but no instance dict.
+ getattr_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_getattro')
+ dictoffset_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_dictoffset')
+ if getattr_slot_func == '0' and dictoffset_slot_func == '0':
+ if type.is_final_type:
+ py_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict" # grepable
+ utility_func = "PyObject_GenericGetAttrNoDict"
+ else:
+ py_cfunc = "__Pyx_PyObject_GenericGetAttr"
+ utility_func = "PyObject_GenericGetAttr"
+ code.globalstate.use_utility_code(UtilityCode.load_cached(utility_func, "ObjectHandling.c"))
+
+ code.putln("if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) &&"
+ " likely(!%s.tp_dictoffset && %s.tp_getattro == PyObject_GenericGetAttr)) {" % (
+ typeobj_cname, typeobj_cname))
+ code.putln("%s.tp_getattro = %s;" % (
+ typeobj_cname, py_cfunc))
+ code.putln("}")
+
+ # Fix special method docstrings. This is a bit of a hack, but
+ # unless we let PyType_Ready create the slot wrappers we have
+ # a significant performance hit. (See trac #561.)
+ for func in entry.type.scope.pyfunc_entries:
+ is_buffer = func.name in ('__getbuffer__', '__releasebuffer__')
+ if (func.is_special and Options.docstrings and
+ func.wrapperbase_cname and not is_buffer):
+ slot = TypeSlots.method_name_to_slot.get(func.name)
+ preprocessor_guard = slot.preprocessor_guard_code() if slot else None
+ if preprocessor_guard:
+ code.putln(preprocessor_guard)
+ code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
+ code.putln("{")
+ code.putln(
+ 'PyObject *wrapper = PyObject_GetAttrString((PyObject *)&%s, "%s"); %s' % (
+ typeobj_cname,
+ func.name,
+ code.error_goto_if_null('wrapper', entry.pos)))
+ code.putln(
+ "if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {")
+ code.putln(
+ "%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
+ func.wrapperbase_cname))
+ code.putln(
+ "%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname))
+ code.putln(
+ "((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % (
+ func.wrapperbase_cname))
+ code.putln("}")
+ code.putln("}")
+ code.putln('#endif')
+ if preprocessor_guard:
+ code.putln('#endif')
+ if type.vtable_cname:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
+ code.putln(
+ "if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
+ typeobj_cname,
+ type.vtabptr_cname,
+ code.error_goto(entry.pos)))
+ if heap_type_bases:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('MergeVTables', 'ImportExport.c'))
+ code.putln("if (__Pyx_MergeVtables(&%s) < 0) %s" % (
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ if not type.scope.is_internal and not type.scope.directives['internal']:
+ # scope.is_internal is set for types defined by
+ # Cython (such as closures), the 'internal'
+ # directive is set by users
+ code.putln(
+ 'if (PyObject_SetAttrString(%s, "%s", (PyObject *)&%s) < 0) %s' % (
+ Naming.module_cname,
+ scope.class_name,
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
+ if weakref_entry:
+ if weakref_entry.type is py_object_type:
+ tp_weaklistoffset = "%s.tp_weaklistoffset" % typeobj_cname
+ if type.typedef_flag:
+ objstruct = type.objstruct_cname
+ else:
+ objstruct = "struct %s" % type.objstruct_cname
+ code.putln("if (%s == 0) %s = offsetof(%s, %s);" % (
+ tp_weaklistoffset,
+ tp_weaklistoffset,
+ objstruct,
+ weakref_entry.cname))
+ else:
+ error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
+ if scope.lookup_here("__reduce_cython__") if not scope.is_closure_class_scope else None:
+ # Unfortunately, we cannot reliably detect whether a
+ # superclass defined __reduce__ at compile time, so we must
+ # do so at runtime.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c'))
+ code.putln('if (__Pyx_setup_reduce((PyObject*)&%s) < 0) %s' % (
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ # Generate code to initialise the typeptr of an extension
+ # type defined in this module to point to its type object.
+ if type.typeobj_cname:
+ code.putln(
+ "%s = &%s;" % (
+ type.typeptr_cname, type.typeobj_cname))
def annotate(self, code):
+ if self.type_init_args:
+ self.type_init_args.annotate(code)
if self.body:
self.body.annotate(code)
@@ -4827,6 +5048,8 @@ class ExprStatNode(StatNode):
def analyse_expressions(self, env):
self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr = self.expr.analyse_expressions(env)
+ # Repeat in case of node replacement.
+ self.expr.result_is_used = False # hint that .result() may safely be left empty
return self
def nogil_check(self, env):
@@ -4837,9 +5060,13 @@ class ExprStatNode(StatNode):
def generate_execution_code(self, code):
code.mark_pos(self.pos)
+ self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result():
- code.putln("%s;" % self.expr.result())
+ result = self.expr.result()
+ if not self.expr.type.is_void:
+ result = "(void)(%s)" % result
+ code.putln("%s;" % result)
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
@@ -5665,19 +5892,23 @@ class ReturnStatNode(StatNode):
if not self.return_type:
# error reported earlier
return
+
+ value = self.value
if self.return_type.is_pyobject:
- code.put_xdecref(Naming.retval_cname,
- self.return_type)
+ code.put_xdecref(Naming.retval_cname, self.return_type)
+ if value and value.is_none:
+ # Use specialised default handling for "return None".
+ value = None
- if self.value:
- self.value.generate_evaluation_code(code)
+ if value:
+ value.generate_evaluation_code(code)
if self.return_type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=Naming.retval_cname,
lhs_type=self.return_type,
- lhs_pos=self.value.pos,
- rhs=self.value,
+ lhs_pos=value.pos,
+ rhs=value,
code=code,
have_gil=self.in_nogil_context)
elif self.in_generator:
@@ -5686,19 +5917,21 @@ class ReturnStatNode(StatNode):
UtilityCode.load_cached("ReturnWithStopIteration", "Coroutine.c"))
code.putln("%s = NULL; __Pyx_ReturnWithStopIteration(%s);" % (
Naming.retval_cname,
- self.value.py_result()))
- self.value.generate_disposal_code(code)
+ value.py_result()))
+ value.generate_disposal_code(code)
else:
- self.value.make_owned_reference(code)
+ value.make_owned_reference(code)
code.putln("%s = %s;" % (
Naming.retval_cname,
- self.value.result_as(self.return_type)))
- self.value.generate_post_assignment_code(code)
- self.value.free_temps(code)
+ value.result_as(self.return_type)))
+ value.generate_post_assignment_code(code)
+ value.free_temps(code)
else:
if self.return_type.is_pyobject:
if self.in_generator:
if self.in_async_gen:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
code.put("PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); ")
code.putln("%s = NULL;" % Naming.retval_cname)
else:
@@ -5944,9 +6177,13 @@ class IfStatNode(StatNode):
code.mark_pos(self.pos)
end_label = code.new_label()
last = len(self.if_clauses)
- if not self.else_clause:
+ if self.else_clause:
+ # If the 'else' clause is 'unlikely', then set the preceding 'if' clause to 'likely' to reflect that.
+ self._set_branch_hint(self.if_clauses[-1], self.else_clause, inverse=True)
+ else:
last -= 1 # avoid redundant goto at end of last if-clause
for i, if_clause in enumerate(self.if_clauses):
+ self._set_branch_hint(if_clause, if_clause.body)
if_clause.generate_execution_code(code, end_label, is_last=i == last)
if self.else_clause:
code.mark_pos(self.else_clause.pos)
@@ -5955,6 +6192,21 @@ class IfStatNode(StatNode):
code.putln("}")
code.put_label(end_label)
+ def _set_branch_hint(self, clause, statements_node, inverse=False):
+ if not statements_node.is_terminator:
+ return
+ if not isinstance(statements_node, StatListNode) or not statements_node.stats:
+ return
+ # Anything that unconditionally raises exceptions should be considered unlikely.
+ if isinstance(statements_node.stats[-1], (RaiseStatNode, ReraiseStatNode)):
+ if len(statements_node.stats) > 1:
+ # Allow simple statements before the 'raise', but no conditions, loops, etc.
+ non_branch_nodes = (ExprStatNode, AssignmentNode, DelStatNode, GlobalNode, NonlocalNode)
+ for node in statements_node.stats[:-1]:
+ if not isinstance(node, non_branch_nodes):
+ return
+ clause.branch_hint = 'likely' if inverse else 'unlikely'
+
def generate_function_definitions(self, env, code):
for clause in self.if_clauses:
clause.generate_function_definitions(env, code)
@@ -5975,6 +6227,7 @@ class IfClauseNode(Node):
# body StatNode
child_attrs = ["condition", "body"]
+ branch_hint = None
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
@@ -5987,7 +6240,10 @@ class IfClauseNode(Node):
def generate_execution_code(self, code, end_label, is_last):
self.condition.generate_evaluation_code(code)
code.mark_pos(self.pos)
- code.putln("if (%s) {" % self.condition.result())
+ condition = self.condition.result()
+ if self.branch_hint:
+ condition = '%s(%s)' % (self.branch_hint, condition)
+ code.putln("if (%s) {" % condition)
self.condition.generate_disposal_code(code)
self.condition.free_temps(code)
self.body.generate_execution_code(code)
@@ -6230,6 +6486,66 @@ class DictIterationNextNode(Node):
var.release(code)
+class SetIterationNextNode(Node):
+ # Helper node for calling _PySet_NextEntry() inside of a WhileStatNode
+ # and checking the set size for changes. Created in Optimize.py.
+ child_attrs = ['set_obj', 'expected_size', 'pos_index_var',
+ 'coerced_value_var', 'value_target', 'is_set_flag']
+
+ coerced_value_var = value_ref = None
+
+ def __init__(self, set_obj, expected_size, pos_index_var, value_target, is_set_flag):
+ Node.__init__(
+ self, set_obj.pos,
+ set_obj=set_obj,
+ expected_size=expected_size,
+ pos_index_var=pos_index_var,
+ value_target=value_target,
+ is_set_flag=is_set_flag,
+ is_temp=True,
+ type=PyrexTypes.c_bint_type)
+
+ def analyse_expressions(self, env):
+ from . import ExprNodes
+ self.set_obj = self.set_obj.analyse_types(env)
+ self.expected_size = self.expected_size.analyse_types(env)
+ self.pos_index_var = self.pos_index_var.analyse_types(env)
+ self.value_target = self.value_target.analyse_target_types(env)
+ self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
+ self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
+ self.is_set_flag = self.is_set_flag.analyse_types(env)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ self.set_obj.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("set_iter", "Optimize.c"))
+ self.set_obj.generate_evaluation_code(code)
+
+ value_ref = self.value_ref
+ value_ref.allocate(code)
+
+ result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
+ code.putln("%s = __Pyx_set_iter_next(%s, %s, &%s, &%s, %s);" % (
+ result_temp,
+ self.set_obj.py_result(),
+ self.expected_size.result(),
+ self.pos_index_var.result(),
+ value_ref.result(),
+ self.is_set_flag.result()
+ ))
+ code.putln("if (unlikely(%s == 0)) break;" % result_temp)
+ code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos))
+ code.funcstate.release_temp(result_temp)
+
+ # evaluate all coercions before the assignments
+ code.put_gotref(value_ref.result())
+ self.coerced_value_var.generate_evaluation_code(code)
+ self.value_target.generate_assignment_code(self.coerced_value_var, code)
+ value_ref.release(code)
+
+
def ForStatNode(pos, **kw):
if 'iterator' in kw:
if kw['iterator'].is_async:
@@ -6416,10 +6732,27 @@ class ForFromStatNode(LoopNode, StatNode):
"Consider switching the directions of the relations.", 2)
self.step = self.step.analyse_types(env)
- if self.target.type.is_numeric:
- loop_type = self.target.type
+ self.set_up_loop(env)
+ target_type = self.target.type
+ if not (target_type.is_pyobject or target_type.is_numeric):
+ error(self.target.pos, "for-from loop variable must be c numeric type or Python object")
+
+ self.body = self.body.analyse_expressions(env)
+ if self.else_clause:
+ self.else_clause = self.else_clause.analyse_expressions(env)
+ return self
+
+ def set_up_loop(self, env):
+ from . import ExprNodes
+
+ target_type = self.target.type
+ if target_type.is_numeric:
+ loop_type = target_type
else:
- loop_type = PyrexTypes.c_int_type
+ if target_type.is_enum:
+ warning(self.target.pos,
+ "Integer loops over enum values are fragile. Please cast to a safe integer type instead.")
+ loop_type = PyrexTypes.c_long_type if target_type.is_pyobject else PyrexTypes.c_int_type
if not self.bound1.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type)
if not self.bound2.type.is_pyobject:
@@ -6435,10 +6768,7 @@ class ForFromStatNode(LoopNode, StatNode):
if not self.step.is_literal:
self.step = self.step.coerce_to_temp(env)
- target_type = self.target.type
- if not (target_type.is_pyobject or target_type.is_numeric):
- error(self.target.pos, "for-from loop variable must be c numeric type or Python object")
- if target_type.is_numeric:
+ if target_type.is_numeric or target_type.is_enum:
self.is_py_target = False
if isinstance(self.target, ExprNodes.BufferIndexNode):
raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.")
@@ -6448,12 +6778,7 @@ class ForFromStatNode(LoopNode, StatNode):
self.is_py_target = True
c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env)
self.loopvar_node = c_loopvar_node
- self.py_loopvar_node = \
- ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
- self.body = self.body.analyse_expressions(env)
- if self.else_clause:
- self.else_clause = self.else_clause.analyse_expressions(env)
- return self
+ self.py_loopvar_node = ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
@@ -6465,7 +6790,10 @@ class ForFromStatNode(LoopNode, StatNode):
if self.step is not None:
self.step.generate_evaluation_code(code)
step = self.step.result()
- incop = "%s=%s" % (incop[0], step)
+ incop = "%s=%s" % (incop[0], step) # e.g. '++' => '+= STEP'
+ else:
+ step = '1'
+
from . import ExprNodes
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.allocate(code)
@@ -6474,15 +6802,13 @@ class ForFromStatNode(LoopNode, StatNode):
loopvar_type = PyrexTypes.c_long_type if self.target.type.is_enum else self.target.type
- if from_range:
+ if from_range and not self.is_py_target:
loopvar_name = code.funcstate.allocate_temp(loopvar_type, False)
else:
loopvar_name = self.loopvar_node.result()
if loopvar_type.is_int and not loopvar_type.signed and self.relation2[0] == '>':
# Handle the case where the endpoint of an unsigned int iteration
# is within step of 0.
- if not self.step:
- step = 1
code.putln("for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % (
loopvar_name,
self.bound1.result(), offset, step,
@@ -6497,10 +6823,7 @@ class ForFromStatNode(LoopNode, StatNode):
coerced_loopvar_node = self.py_loopvar_node
if coerced_loopvar_node is None and from_range:
- loopvar_cvalue = loopvar_name
- if self.target.type.is_enum:
- loopvar_cvalue = '(%s)%s' % (self.target.type.declaration_code(''), loopvar_cvalue)
- coerced_loopvar_node = ExprNodes.RawCNameExprNode(self.target.pos, loopvar_type, loopvar_cvalue)
+ coerced_loopvar_node = ExprNodes.RawCNameExprNode(self.target.pos, loopvar_type, loopvar_name)
if coerced_loopvar_node is not None:
coerced_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(coerced_loopvar_node, code)
@@ -6508,7 +6831,7 @@ class ForFromStatNode(LoopNode, StatNode):
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
- if self.py_loopvar_node:
+ if not from_range and self.py_loopvar_node:
# This mess is to make for..from loops with python targets behave
# exactly like those with C targets with regards to re-assignment
# of the loop variable.
@@ -6543,12 +6866,12 @@ class ForFromStatNode(LoopNode, StatNode):
code.putln("}")
- if self.py_loopvar_node:
+ if not from_range and self.py_loopvar_node:
# This is potentially wasteful, but we don't want the semantics to
# depend on whether or not the loop is a python type.
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
- if from_range:
+ if from_range and not self.is_py_target:
code.funcstate.release_temp(loopvar_name)
break_label = code.break_label
@@ -6950,19 +7273,42 @@ class ExceptClauseNode(Node):
def generate_handling_code(self, code, end_label):
code.mark_pos(self.pos)
+
if self.pattern:
- code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
+ has_non_literals = not all(
+ pattern.is_literal or pattern.is_simple() and not pattern.is_temp
+ for pattern in self.pattern)
+
+ if has_non_literals:
+ # For non-trivial exception check expressions, hide the live exception from C-API calls.
+ exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ for _ in range(3)]
+ code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c"))
+ code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % tuple(exc_vars))
+ code.globalstate.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
+ exc_test_func = "__Pyx_PyErr_GivenExceptionMatches(%s, %%s)" % exc_vars[0]
+ else:
+ exc_vars = ()
+ code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
+ exc_test_func = "__Pyx_PyErr_ExceptionMatches(%s)"
+
exc_tests = []
for pattern in self.pattern:
pattern.generate_evaluation_code(code)
- exc_tests.append("__Pyx_PyErr_ExceptionMatches(%s)" % pattern.py_result())
+ exc_tests.append(exc_test_func % pattern.py_result())
- match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
- code.putln(
- "%s = %s;" % (match_flag, ' || '.join(exc_tests)))
+ match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = %s;" % (match_flag, ' || '.join(exc_tests)))
for pattern in self.pattern:
pattern.generate_disposal_code(code)
pattern.free_temps(code)
+
+ if has_non_literals:
+ code.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(exc_vars))
+ code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
+ for temp in exc_vars:
+ code.funcstate.release_temp(temp)
+
code.putln(
"if (%s) {" %
match_flag)
@@ -6981,8 +7327,7 @@ class ExceptClauseNode(Node):
code.putln("}")
return
- exc_vars = [code.funcstate.allocate_temp(py_object_type,
- manage_ref=True)
+ exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
for _ in range(3)]
code.put_add_traceback(self.function_name)
# We always have to fetch the exception value even if
@@ -7774,11 +8119,17 @@ class ParallelStatNode(StatNode, ParallelNode):
if self.kwargs:
# Try to find num_threads and chunksize keyword arguments
pairs = []
+ seen = set()
for dictitem in self.kwargs.key_value_pairs:
+ if dictitem.key.value in seen:
+ error(self.pos, "Duplicate keyword argument found: %s" % dictitem.key.value)
+ seen.add(dictitem.key.value)
if dictitem.key.value == 'num_threads':
- self.num_threads = dictitem.value
+ if not dictitem.value.is_none:
+ self.num_threads = dictitem.value
elif self.is_prange and dictitem.key.value == 'chunksize':
- self.chunksize = dictitem.value
+ if not dictitem.value.is_none:
+ self.chunksize = dictitem.value
else:
pairs.append(dictitem)
@@ -7818,7 +8169,7 @@ class ParallelStatNode(StatNode, ParallelNode):
self.num_threads.compile_time_value(env) <= 0):
error(self.pos, "argument to num_threads must be greater than 0")
- if not self.num_threads.is_simple():
+ if not self.num_threads.is_simple() or self.num_threads.type.is_pyobject:
self.num_threads = self.num_threads.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
return self
diff --git a/Cython/Compiler/Optimize.py b/Cython/Compiler/Optimize.py
index b451ec3..66c1801 100644
--- a/Cython/Compiler/Optimize.py
+++ b/Cython/Compiler/Optimize.py
@@ -1,5 +1,6 @@
from __future__ import absolute_import
+import re
import sys
import copy
import codecs
@@ -8,14 +9,16 @@ import itertools
from . import TypeSlots
from .ExprNodes import not_a_constant
import cython
-cython.declare(UtilityCode=object, EncodedString=object, bytes_literal=object,
+cython.declare(UtilityCode=object, EncodedString=object, bytes_literal=object, encoded_string=object,
Nodes=object, ExprNodes=object, PyrexTypes=object, Builtin=object,
UtilNodes=object, _py_int_types=object)
if sys.version_info[0] >= 3:
_py_int_types = int
+ _py_string_types = (bytes, str)
else:
_py_int_types = (int, long)
+ _py_string_types = (bytes, unicode)
from . import Nodes
from . import ExprNodes
@@ -26,8 +29,8 @@ from . import UtilNodes
from . import Options
from .Code import UtilityCode, TempitaUtilityCode
-from .StringEncoding import EncodedString, bytes_literal
-from .Errors import error
+from .StringEncoding import EncodedString, bytes_literal, encoded_string
+from .Errors import error, warning
from .ParseTreeTransforms import SkipDeclarations
try:
@@ -186,38 +189,61 @@ class IterationTransform(Visitor.EnvTransform):
self.visitchildren(node)
return self._optimise_for_loop(node, node.iterator.sequence)
- def _optimise_for_loop(self, node, iterator, reversed=False):
- if iterator.type is Builtin.dict_type:
+ def _optimise_for_loop(self, node, iterable, reversed=False):
+ annotation_type = None
+ if (iterable.is_name or iterable.is_attribute) and iterable.entry and iterable.entry.annotation:
+ annotation = iterable.entry.annotation
+ if annotation.is_subscript:
+ annotation = annotation.base # container base type
+ # FIXME: generalise annotation evaluation => maybe provide a "qualified name" also for imported names?
+ if annotation.is_name:
+ if annotation.entry and annotation.entry.qualified_name == 'typing.Dict':
+ annotation_type = Builtin.dict_type
+ elif annotation.name == 'Dict':
+ annotation_type = Builtin.dict_type
+ if annotation.entry and annotation.entry.qualified_name in ('typing.Set', 'typing.FrozenSet'):
+ annotation_type = Builtin.set_type
+ elif annotation.name in ('Set', 'FrozenSet'):
+ annotation_type = Builtin.set_type
+
+ if Builtin.dict_type in (iterable.type, annotation_type):
# like iterating over dict.keys()
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_dict_iteration(
- node, dict_obj=iterator, method=None, keys=True, values=False)
+ node, dict_obj=iterable, method=None, keys=True, values=False)
+
+ if (Builtin.set_type in (iterable.type, annotation_type) or
+ Builtin.frozenset_type in (iterable.type, annotation_type)):
+ if reversed:
+ # CPython raises an error here: not a sequence
+ return node
+ return self._transform_set_iteration(node, iterable)
# C array (slice) iteration?
- if iterator.type.is_ptr or iterator.type.is_array:
- return self._transform_carray_iteration(node, iterator, reversed=reversed)
- if iterator.type is Builtin.bytes_type:
- return self._transform_bytes_iteration(node, iterator, reversed=reversed)
- if iterator.type is Builtin.unicode_type:
- return self._transform_unicode_iteration(node, iterator, reversed=reversed)
+ if iterable.type.is_ptr or iterable.type.is_array:
+ return self._transform_carray_iteration(node, iterable, reversed=reversed)
+ if iterable.type is Builtin.bytes_type:
+ return self._transform_bytes_iteration(node, iterable, reversed=reversed)
+ if iterable.type is Builtin.unicode_type:
+ return self._transform_unicode_iteration(node, iterable, reversed=reversed)
# the rest is based on function calls
- if not isinstance(iterator, ExprNodes.SimpleCallNode):
+ if not isinstance(iterable, ExprNodes.SimpleCallNode):
return node
- if iterator.args is None:
- arg_count = iterator.arg_tuple and len(iterator.arg_tuple.args) or 0
+ if iterable.args is None:
+ arg_count = iterable.arg_tuple and len(iterable.arg_tuple.args) or 0
else:
- arg_count = len(iterator.args)
- if arg_count and iterator.self is not None:
+ arg_count = len(iterable.args)
+ if arg_count and iterable.self is not None:
arg_count -= 1
- function = iterator.function
+ function = iterable.function
# dict iteration?
if function.is_attribute and not reversed and not arg_count:
- base_obj = iterator.self or function.obj
+ base_obj = iterable.self or function.obj
method = function.attribute
# in Py3, items() is equivalent to Py2's iteritems()
is_safe_iter = self.global_scope().context.language_level >= 3
@@ -245,25 +271,35 @@ class IterationTransform(Visitor.EnvTransform):
node, base_obj, method, keys, values)
# enumerate/reversed ?
- if iterator.self is None and function.is_name and \
+ if iterable.self is None and function.is_name and \
function.entry and function.entry.is_builtin:
if function.name == 'enumerate':
if reversed:
# CPython raises an error here: not a sequence
return node
- return self._transform_enumerate_iteration(node, iterator)
+ return self._transform_enumerate_iteration(node, iterable)
elif function.name == 'reversed':
if reversed:
# CPython raises an error here: not a sequence
return node
- return self._transform_reversed_iteration(node, iterator)
+ return self._transform_reversed_iteration(node, iterable)
# range() iteration?
- if Options.convert_range and (node.target.type.is_int or node.target.type.is_enum):
- if iterator.self is None and function.is_name and \
- function.entry and function.entry.is_builtin and \
- function.name in ('range', 'xrange'):
- return self._transform_range_iteration(node, iterator, reversed=reversed)
+ if Options.convert_range and arg_count >= 1 and (
+ iterable.self is None and
+ function.is_name and function.name in ('range', 'xrange') and
+ function.entry and function.entry.is_builtin):
+ if node.target.type.is_int or node.target.type.is_enum:
+ return self._transform_range_iteration(node, iterable, reversed=reversed)
+ if node.target.type.is_pyobject:
+ # Assume that small integer ranges (C long >= 32bit) are best handled in C as well.
+ for arg in (iterable.arg_tuple.args if iterable.args is None else iterable.args):
+ if isinstance(arg, ExprNodes.IntNode):
+ if arg.has_constant_result() and -2**30 <= arg.constant_result < 2**30:
+ continue
+ break
+ else:
+ return self._transform_range_iteration(node, iterable, reversed=reversed)
return node
@@ -768,6 +804,7 @@ class IterationTransform(Visitor.EnvTransform):
step=step, body=node.body,
else_clause=node.else_clause,
from_range=True)
+ for_node.set_up_loop(self.current_env())
if bound2_is_temp:
for_node = UtilNodes.LetNode(bound2, for_node)
@@ -946,6 +983,85 @@ class IterationTransform(Visitor.EnvTransform):
PyrexTypes.CFuncTypeArg("p_is_dict", PyrexTypes.c_int_ptr_type, None),
])
+ PySet_Iterator_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("set", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("is_set", PyrexTypes.c_int_type, None),
+ PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("p_is_set", PyrexTypes.c_int_ptr_type, None),
+ ])
+
+ def _transform_set_iteration(self, node, set_obj):
+ temps = []
+ temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
+ temps.append(temp)
+ set_temp = temp.ref(set_obj.pos)
+ temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
+ temps.append(temp)
+ pos_temp = temp.ref(node.pos)
+
+ if isinstance(node.body, Nodes.StatListNode):
+ body = node.body
+ else:
+ body = Nodes.StatListNode(pos = node.body.pos,
+ stats = [node.body])
+
+ # keep original length to guard against set modification
+ set_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
+ temps.append(set_len_temp)
+ set_len_temp_addr = ExprNodes.AmpersandNode(
+ node.pos, operand=set_len_temp.ref(set_obj.pos),
+ type=PyrexTypes.c_ptr_type(set_len_temp.type))
+ temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
+ temps.append(temp)
+ is_set_temp = temp.ref(node.pos)
+ is_set_temp_addr = ExprNodes.AmpersandNode(
+ node.pos, operand=is_set_temp,
+ type=PyrexTypes.c_ptr_type(temp.type))
+
+ value_target = node.target
+ iter_next_node = Nodes.SetIterationNextNode(
+ set_temp, set_len_temp.ref(set_obj.pos), pos_temp, value_target, is_set_temp)
+ iter_next_node = iter_next_node.analyse_expressions(self.current_env())
+ body.stats[0:0] = [iter_next_node]
+
+ def flag_node(value):
+ value = value and 1 or 0
+ return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value)
+
+ result_code = [
+ Nodes.SingleAssignmentNode(
+ node.pos,
+ lhs=pos_temp,
+ rhs=ExprNodes.IntNode(node.pos, value='0', constant_result=0)),
+ Nodes.SingleAssignmentNode(
+ set_obj.pos,
+ lhs=set_temp,
+ rhs=ExprNodes.PythonCapiCallNode(
+ set_obj.pos,
+ "__Pyx_set_iterator",
+ self.PySet_Iterator_func_type,
+ utility_code=UtilityCode.load_cached("set_iter", "Optimize.c"),
+ args=[set_obj, flag_node(set_obj.type is Builtin.set_type),
+ set_len_temp_addr, is_set_temp_addr,
+ ],
+ is_temp=True,
+ )),
+ Nodes.WhileStatNode(
+ node.pos,
+ condition=None,
+ body=body,
+ else_clause=node.else_clause,
+ )
+ ]
+
+ return UtilNodes.TempsBlockNode(
+ node.pos, temps=temps,
+ body=Nodes.StatListNode(
+ node.pos,
+ stats = result_code
+ ))
+
class SwitchTransform(Visitor.EnvTransform):
"""
@@ -1909,16 +2025,11 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
"""
### cleanup to avoid redundant coercions to/from Python types
- def _visit_PyTypeTestNode(self, node):
- # disabled - appears to break assignments in some cases, and
- # also drops a None check, which might still be required
+ def visit_PyTypeTestNode(self, node):
"""Flatten redundant type checks after tree changes.
"""
- old_arg = node.arg
self.visitchildren(node)
- if old_arg is node.arg or node.arg.type != node.type:
- return node
- return node.arg
+ return node.reanalyse()
def _visit_TypecastNode(self, node):
# disabled - the user may have had a reason to put a type
@@ -1933,11 +2044,18 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
def visit_ExprStatNode(self, node):
"""
- Drop useless coercions.
+ Drop dead code and useless coercions.
"""
self.visitchildren(node)
if isinstance(node.expr, ExprNodes.CoerceToPyTypeNode):
node.expr = node.expr.arg
+ expr = node.expr
+ if expr is None or expr.is_none or expr.is_literal:
+ # Expression was removed or is dead code => remove ExprStatNode as well.
+ return None
+ if expr.is_name and expr.entry and (expr.entry.is_local or expr.entry.is_arg):
+ # Ignore dead references to local variables etc.
+ return None
return node
def visit_CoerceToBooleanNode(self, node):
@@ -2155,7 +2273,8 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
attribute=attr_name,
is_called=True).analyse_as_type_attribute(self.current_env())
if method is None:
- return node
+ return self._optimise_generic_builtin_method_call(
+ node, attr_name, function, arg_list, is_unbound_method)
args = node.args
if args is None and node.arg_tuple:
args = node.arg_tuple.args
@@ -2171,6 +2290,62 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
### builtin types
+ def _optimise_generic_builtin_method_call(self, node, attr_name, function, arg_list, is_unbound_method):
+ """
+ Try to inject an unbound method call for a call to a method of a known builtin type.
+ This enables caching the underlying C function of the method at runtime.
+ """
+ arg_count = len(arg_list)
+ if is_unbound_method or arg_count >= 3 or not (function.is_attribute and function.is_py_attr):
+ return node
+ if not function.obj.type.is_builtin_type:
+ return node
+ if function.obj.type.name in ('basestring', 'type'):
+ # these allow different actual types => unsafe
+ return node
+ return ExprNodes.CachedBuiltinMethodCallNode(
+ node, function.obj, attr_name, arg_list)
+
+ PyObject_Unicode_func_type = PyrexTypes.CFuncType(
+ Builtin.unicode_type, [
+ PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
+ ])
+
+ def _handle_simple_function_unicode(self, node, function, pos_args):
+ """Optimise single argument calls to unicode().
+ """
+ if len(pos_args) != 1:
+ if len(pos_args) == 0:
+ return ExprNodes.UnicodeNode(node.pos, value=EncodedString(), constant_result=u'')
+ return node
+ arg = pos_args[0]
+ if arg.type is Builtin.unicode_type:
+ if not arg.may_be_none():
+ return arg
+ cname = "__Pyx_PyUnicode_Unicode"
+ utility_code = UtilityCode.load_cached('PyUnicode_Unicode', 'StringTools.c')
+ else:
+ cname = "__Pyx_PyObject_Unicode"
+ utility_code = UtilityCode.load_cached('PyObject_Unicode', 'StringTools.c')
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, cname, self.PyObject_Unicode_func_type,
+ args=pos_args,
+ is_temp=node.is_temp,
+ utility_code=utility_code,
+ py_name="unicode")
+
+ def visit_FormattedValueNode(self, node):
+ """Simplify or avoid plain string formatting of a unicode value.
+ This seems misplaced here, but plain unicode formatting is essentially
+ a call to the unicode() builtin, which is optimised right above.
+ """
+ self.visitchildren(node)
+ if node.value.type is Builtin.unicode_type and not node.c_format_spec and not node.format_spec:
+ if not node.conversion_char or node.conversion_char == 's':
+ # value is definitely a unicode string and we don't format it any special
+ return self._handle_simple_function_unicode(node, None, [node.value])
+ return node
+
PyDict_Copy_func_type = PyrexTypes.CFuncType(
Builtin.dict_type, [
PyrexTypes.CFuncTypeArg("dict", Builtin.dict_type, None)
@@ -2392,6 +2567,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
_map_to_capi_len_function = {
Builtin.unicode_type: "__Pyx_PyUnicode_GET_LENGTH",
Builtin.bytes_type: "PyBytes_GET_SIZE",
+ Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
Builtin.list_type: "PyList_GET_SIZE",
Builtin.tuple_type: "PyTuple_GET_SIZE",
Builtin.set_type: "PySet_GET_SIZE",
@@ -2625,7 +2801,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
PyTypeObjectPtr = PyrexTypes.CPtrType(
cython_scope.lookup('PyTypeObject').type)
pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
- PyrexTypes.py_object_type, [
+ ext_type, [
PyrexTypes.CFuncTypeArg("type", PyTypeObjectPtr, None),
PyrexTypes.CFuncTypeArg("args", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("kwargs", PyrexTypes.py_object_type, None),
@@ -2638,6 +2814,7 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
node.pos, slot_func_cname,
pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
+ may_return_none=False,
is_temp=True)
else:
# arbitrary variable, needs a None check for safety
@@ -2685,6 +2862,69 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
utility_code=load_c_utility('append')
)
+ def _handle_simple_method_list_extend(self, node, function, args, is_unbound_method):
+ """Replace list.extend([...]) for short sequence literals values by sequential appends
+ to avoid creating an intermediate sequence argument.
+ """
+ if len(args) != 2:
+ return node
+ obj, value = args
+ if not value.is_sequence_constructor:
+ return node
+ items = list(value.args)
+ if value.mult_factor is not None or len(items) > 8:
+ # Appending wins for short sequences but slows down when multiple resize operations are needed.
+ # This seems to be a good enough limit that avoids repeated resizing.
+ if False and isinstance(value, ExprNodes.ListNode):
+ # One would expect that tuples are more efficient here, but benchmarking with
+ # Py3.5 and Py3.7 suggests that they are not. Probably worth revisiting at some point.
+ # Might be related to the usage of PySequence_FAST() in CPython's list.extend(),
+ # which is probably tuned more towards lists than tuples (and rightly so).
+ tuple_node = args[1].as_tuple().analyse_types(self.current_env(), skip_children=True)
+ Visitor.recursively_replace_node(node, args[1], tuple_node)
+ return node
+ wrapped_obj = self._wrap_self_arg(obj, function, is_unbound_method, 'extend')
+ if not items:
+ # Empty sequences are not likely to occur, but why waste a call to list.extend() for them?
+ wrapped_obj.result_is_used = node.result_is_used
+ return wrapped_obj
+ cloned_obj = obj = wrapped_obj
+ if len(items) > 1 and not obj.is_simple():
+ cloned_obj = UtilNodes.LetRefNode(obj)
+ # Use ListComp_Append() for all but the last item and finish with PyList_Append()
+ # to shrink the list storage size at the very end if necessary.
+ temps = []
+ arg = items[-1]
+ if not arg.is_simple():
+ arg = UtilNodes.LetRefNode(arg)
+ temps.append(arg)
+ new_node = ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_PyList_Append", self.PyObject_Append_func_type,
+ args=[cloned_obj, arg],
+ is_temp=True,
+ utility_code=load_c_utility("ListAppend"))
+ for arg in items[-2::-1]:
+ if not arg.is_simple():
+ arg = UtilNodes.LetRefNode(arg)
+ temps.append(arg)
+ new_node = ExprNodes.binop_node(
+ node.pos, '|',
+ ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_ListComp_Append", self.PyObject_Append_func_type,
+ args=[cloned_obj, arg], py_name="extend",
+ is_temp=True,
+ utility_code=load_c_utility("ListCompAppend")),
+ new_node,
+ type=PyrexTypes.c_returncode_type,
+ )
+ new_node.result_is_used = node.result_is_used
+ if cloned_obj is not obj:
+ temps.append(cloned_obj)
+ for temp in temps:
+ new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
+ new_node.result_is_used = node.result_is_used
+ return new_node
+
PyByteArray_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
@@ -2890,6 +3130,28 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
may_return_none=True,
utility_code=load_c_utility('dict_setdefault'))
+ PyDict_Pop_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
+ ])
+
+ def _handle_simple_method_dict_pop(self, node, function, args, is_unbound_method):
+ """Replace dict.pop() by a call to _PyDict_Pop().
+ """
+ if len(args) == 2:
+ args.append(ExprNodes.NullNode(node.pos))
+ elif len(args) != 3:
+ self._error_wrong_arg_count('dict.pop', node, args, "2 or 3")
+ return node
+
+ return self._substitute_method_call(
+ node, function,
+ "__Pyx_PyDict_Pop", self.PyDict_Pop_func_type,
+ 'pop', is_unbound_method, args,
+ utility_code=load_c_utility('py_dict_pop'))
+
Pyx_PyInt_BinopInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None),
@@ -3639,18 +3901,8 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
may_return_none=ExprNodes.PythonCapiCallNode.may_return_none,
with_none_check=True):
args = list(args)
- if with_none_check and args and not args[0].is_literal:
- self_arg = args[0]
- if is_unbound_method:
- self_arg = self_arg.as_none_safe_node(
- "descriptor '%s' requires a '%s' object but received a 'NoneType'",
- format_args=[attr_name, function.obj.name])
- else:
- self_arg = self_arg.as_none_safe_node(
- "'NoneType' object has no attribute '%{0}s'".format('.30' if len(attr_name) <= 30 else ''),
- error = "PyExc_AttributeError",
- format_args = [attr_name])
- args[0] = self_arg
+ if with_none_check and args:
+ args[0] = self._wrap_self_arg(args[0], function, is_unbound_method, attr_name)
if is_temp is None:
is_temp = node.is_temp
return ExprNodes.PythonCapiCallNode(
@@ -3662,6 +3914,20 @@ class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
result_is_used = node.result_is_used,
)
+ def _wrap_self_arg(self, self_arg, function, is_unbound_method, attr_name):
+ if self_arg.is_literal:
+ return self_arg
+ if is_unbound_method:
+ self_arg = self_arg.as_none_safe_node(
+ "descriptor '%s' requires a '%s' object but received a 'NoneType'",
+ format_args=[attr_name, self_arg.type.name])
+ else:
+ self_arg = self_arg.as_none_safe_node(
+ "'NoneType' object has no attribute '%{0}s'".format('.30' if len(attr_name) <= 30 else ''),
+ error="PyExc_AttributeError",
+ format_args=[attr_name])
+ return self_arg
+
def _inject_int_default_argument(self, node, args, arg_index, type, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
@@ -3930,8 +4196,42 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
if isinstance(node.operand1, ExprNodes.IntNode) and \
node.operand2.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand2, node.operand1)
+ if node.operand1.is_string_literal:
+ return self._multiply_string(node, node.operand1, node.operand2)
+ elif node.operand2.is_string_literal:
+ return self._multiply_string(node, node.operand2, node.operand1)
return self.visit_BinopNode(node)
+ def _multiply_string(self, node, string_node, multiplier_node):
+ multiplier = multiplier_node.constant_result
+ if not isinstance(multiplier, _py_int_types):
+ return node
+ if not (node.has_constant_result() and isinstance(node.constant_result, _py_string_types)):
+ return node
+ if len(node.constant_result) > 256:
+ # Too long for static creation, leave it to runtime. (-> arbitrary limit)
+ return node
+
+ build_string = encoded_string
+ if isinstance(string_node, ExprNodes.BytesNode):
+ build_string = bytes_literal
+ elif isinstance(string_node, ExprNodes.StringNode):
+ if string_node.unicode_value is not None:
+ string_node.unicode_value = encoded_string(
+ string_node.unicode_value * multiplier,
+ string_node.unicode_value.encoding)
+ elif isinstance(string_node, ExprNodes.UnicodeNode):
+ if string_node.bytes_value is not None:
+ string_node.bytes_value = bytes_literal(
+ string_node.bytes_value * multiplier,
+ string_node.bytes_value.encoding)
+ else:
+ assert False, "unknown string node type: %s" % type(string_node)
+ string_node.value = build_string(
+ string_node.value * multiplier,
+ string_node.value.encoding)
+ return string_node
+
def _calculate_constant_seq(self, node, sequence_node, factor):
if factor.constant_result != 1 and sequence_node.args:
if isinstance(factor.constant_result, _py_int_types) and factor.constant_result <= 0:
@@ -3951,6 +4251,78 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
sequence_node.mult_factor = factor
return sequence_node
+ def visit_ModNode(self, node):
+ self.visitchildren(node)
+ if isinstance(node.operand1, ExprNodes.UnicodeNode) and isinstance(node.operand2, ExprNodes.TupleNode):
+ if not node.operand2.mult_factor:
+ fstring = self._build_fstring(node.operand1.pos, node.operand1.value, node.operand2.args)
+ if fstring is not None:
+ return fstring
+ return self.visit_BinopNode(node)
+
+ _parse_string_format_regex = (
+ u'(%(?:' # %...
+ u'(?:[0-9]+|[ ])?' # width (optional) or space prefix fill character (optional)
+ u'(?:[.][0-9]+)?' # precision (optional)
+ u')?.)' # format type (or something different for unsupported formats)
+ )
+
+ def _build_fstring(self, pos, ustring, format_args):
+ # Issues formatting warnings instead of errors since we really only catch a few errors by accident.
+ args = iter(format_args)
+ substrings = []
+ can_be_optimised = True
+ for s in re.split(self._parse_string_format_regex, ustring):
+ if not s:
+ continue
+ if s == u'%%':
+ substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(u'%'), constant_result=u'%'))
+ continue
+ if s[0] != u'%':
+ if s[-1] == u'%':
+ warning(pos, "Incomplete format: '...%s'" % s[-3:], level=1)
+ can_be_optimised = False
+ substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(s), constant_result=s))
+ continue
+ format_type = s[-1]
+ try:
+ arg = next(args)
+ except StopIteration:
+ warning(pos, "Too few arguments for format placeholders", level=1)
+ can_be_optimised = False
+ break
+ if format_type in u'srfdoxX':
+ format_spec = s[1:]
+ if format_type in u'doxX' and u'.' in format_spec:
+ # Precision is not allowed for integers in format(), but ok in %-formatting.
+ can_be_optimised = False
+ elif format_type in u'rs':
+ format_spec = format_spec[:-1]
+ substrings.append(ExprNodes.FormattedValueNode(
+ arg.pos, value=arg,
+ conversion_char=format_type if format_type in u'rs' else None,
+ format_spec=ExprNodes.UnicodeNode(
+ pos, value=EncodedString(format_spec), constant_result=format_spec)
+ if format_spec else None,
+ ))
+ else:
+ # keep it simple for now ...
+ can_be_optimised = False
+
+ if not can_be_optimised:
+ # Print all warnings we can find before finally giving up here.
+ return None
+
+ try:
+ next(args)
+ except StopIteration: pass
+ else:
+ warning(pos, "Too many arguments for format placeholders", level=1)
+ return None
+
+ node = ExprNodes.JoinedStrNode(pos, values=substrings)
+ return self.visit_JoinedStrNode(node)
+
def visit_FormattedValueNode(self, node):
self.visitchildren(node)
conversion_char = node.conversion_char or 's'
@@ -4285,7 +4657,7 @@ class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
visit_Node = Visitor.VisitorTransform.recurse_to_children
-class FinalOptimizePhase(Visitor.CythonTransform, Visitor.NodeRefCleanupMixin):
+class FinalOptimizePhase(Visitor.EnvTransform, Visitor.NodeRefCleanupMixin):
"""
This visitor handles several commuting optimizations, and is run
just before the C code generation phase.
@@ -4294,8 +4666,11 @@ class FinalOptimizePhase(Visitor.CythonTransform, Visitor.NodeRefCleanupMixin):
- eliminate None assignment and refcounting for first assignment.
- isinstance -> typecheck for cdef types
- eliminate checks for None and/or types that became redundant after tree changes
+ - eliminate useless string formatting steps
- replace Python function calls that look like method calls by a faster PyMethodCallNode
"""
+ in_loop = False
+
def visit_SingleAssignmentNode(self, node):
"""Avoid redundant initialisation of local variables before their
first assignment.
@@ -4322,8 +4697,10 @@ class FinalOptimizePhase(Visitor.CythonTransform, Visitor.NodeRefCleanupMixin):
function.type = function.entry.type
PyTypeObjectPtr = PyrexTypes.CPtrType(cython_scope.lookup('PyTypeObject').type)
node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr)
- elif (self.current_directives.get("optimize.unpack_method_calls")
- and node.is_temp and function.type.is_pyobject):
+ elif (node.is_temp and function.type.is_pyobject and self.current_directives.get(
+ "optimize.unpack_method_calls_in_pyinit"
+ if not self.in_loop and self.current_env().is_module_scope
+ else "optimize.unpack_method_calls")):
# optimise simple Python methods calls
if isinstance(node.arg_tuple, ExprNodes.TupleNode) and not (
node.arg_tuple.mult_factor or (node.arg_tuple.is_literal and node.arg_tuple.args)):
@@ -4354,6 +4731,11 @@ class FinalOptimizePhase(Visitor.CythonTransform, Visitor.NodeRefCleanupMixin):
node, function=function, arg_tuple=node.arg_tuple, type=node.type))
return node
+ def visit_NumPyMethodCallNode(self, node):
+ # Exclude from replacement above.
+ self.visitchildren(node)
+ return node
+
def visit_PyTypeTestNode(self, node):
"""Remove tests for alternatively allowed None values from
type tests when we know that the argument cannot be None
@@ -4374,6 +4756,16 @@ class FinalOptimizePhase(Visitor.CythonTransform, Visitor.NodeRefCleanupMixin):
return node.arg
return node
+ def visit_LoopNode(self, node):
+ """Remember when we enter a loop as some expensive optimisations might still be worth it there.
+ """
+ old_val = self.in_loop
+ self.in_loop = True
+ self.visitchildren(node)
+ self.in_loop = old_val
+ return node
+
+
class ConsolidateOverflowCheck(Visitor.CythonTransform):
"""
This class facilitates the sharing of overflow checking among all nodes
diff --git a/Cython/Compiler/Options.py b/Cython/Compiler/Options.py
index aae39e9..33c6c95 100644
--- a/Cython/Compiler/Options.py
+++ b/Cython/Compiler/Options.py
@@ -171,6 +171,8 @@ _directive_defaults = {
'language_level': 2,
'fast_getattr': False, # Undocumented until we come up with a better way to handle this everywhere.
'py2_import': False, # For backward compatibility of Cython's source code in Py3 source mode
+ 'preliminary_late_includes_cy28': False, # Temporary directive in 0.28, to be removed in a later version (see GH#2079).
+ 'iterable_coroutine': False, # Make async coroutines backwards compatible with the old asyncio yield-from syntax.
'c_string_type': 'bytes',
'c_string_encoding': '',
'type_version_tag': True, # enables Py_TPFLAGS_HAVE_VERSION_TAG on extension types
@@ -194,6 +196,7 @@ _directive_defaults = {
# optimizations
'optimize.inline_defnode_calls': True,
'optimize.unpack_method_calls': True, # increases code size when True
+ 'optimize.unpack_method_calls_in_pyinit': False, # uselessly increases code size when True
'optimize.use_switch': True,
# remove unreachable code
@@ -319,6 +322,7 @@ directive_scopes = { # defaults to available everywhere
'old_style_globals': ('module',),
'np_pythran': ('module',),
'fast_gil': ('module',),
+ 'iterable_coroutine': ('module', 'function'),
}
diff --git a/Cython/Compiler/ParseTreeTransforms.py b/Cython/Compiler/ParseTreeTransforms.py
index 6bf2d85..fb8eb07 100644
--- a/Cython/Compiler/ParseTreeTransforms.py
+++ b/Cython/Compiler/ParseTreeTransforms.py
@@ -15,6 +15,7 @@ from . import ExprNodes
from . import Nodes
from . import Options
from . import Builtin
+from . import Errors
from .Visitor import VisitorTransform, TreeVisitor
from .Visitor import CythonTransform, EnvTransform, ScopeTrackingTransform
@@ -632,7 +633,7 @@ class TrackNumpyAttributes(VisitorTransform, SkipDeclarations):
visit_Node = VisitorTransform.recurse_to_children
-class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
+class InterpretCompilerDirectives(CythonTransform):
"""
After parsing, directives can be stored in a number of places:
- #cython-comments at the top of the file (stored in ModuleNode)
@@ -857,6 +858,11 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
node.cython_attribute = directive
return node
+ def visit_NewExprNode(self, node):
+ self.visit(node.cppclass)
+ self.visitchildren(node)
+ return node
+
def try_to_parse_directives(self, node):
# If node is the contents of an directive (in a with statement or
# decorator), returns a list of (directivename, value) pairs.
@@ -987,7 +993,7 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
def visit_CVarDefNode(self, node):
directives = self._extract_directives(node, 'function')
if not directives:
- return node
+ return self.visit_Node(node)
for name, value in directives.items():
if name == 'locals':
node.directive_locals = value
@@ -1027,7 +1033,8 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
directives = []
realdecs = []
both = []
- for dec in node.decorators:
+ # Decorators coming first take precedence.
+ for dec in node.decorators[::-1]:
new_directives = self.try_to_parse_directives(dec.decorator)
if new_directives is not None:
for directive in new_directives:
@@ -1037,15 +1044,17 @@ class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
directives.append(directive)
if directive[0] == 'staticmethod':
both.append(dec)
+ # Adapt scope type based on decorators that change it.
+ if directive[0] == 'cclass' and scope_name == 'class':
+ scope_name = 'cclass'
else:
realdecs.append(dec)
- if realdecs and isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode)):
+ if realdecs and (scope_name == 'cclass' or
+ isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode))):
raise PostParseError(realdecs[0].pos, "Cdef functions/classes cannot take arbitrary decorators.")
- else:
- node.decorators = realdecs + both
+ node.decorators = realdecs[::-1] + both[::-1]
# merge or override repeated directives
optdict = {}
- directives.reverse() # Decorators coming first take precedence
for directive in directives:
name, value = directive
if name in optdict:
@@ -1871,7 +1880,7 @@ if VALUE is not None:
def visit_FuncDefNode(self, node):
"""
- Analyse a function and its body, as that hasn't happend yet. Also
+ Analyse a function and its body, as that hasn't happened yet. Also
analyse the directive_locals set by @cython.locals().
Then, if we are a function with fused arguments, replace the function
@@ -1934,6 +1943,8 @@ if VALUE is not None:
binding = self.current_directives.get('binding')
rhs = ExprNodes.PyCFunctionNode.from_defnode(node, binding)
node.code_object = rhs.code_object
+ if node.is_generator:
+ node.gbody.code_object = node.code_object
if env.is_py_class_scope:
rhs.binding = True
@@ -2060,7 +2071,7 @@ if VALUE is not None:
# Some nodes are no longer needed after declaration
# analysis and can be dropped. The analysis was performed
- # on these nodes in a seperate recursive process from the
+ # on these nodes in a separate recursive process from the
# enclosing function or module, so we can simply drop them.
def visit_CDeclaratorNode(self, node):
# necessary to ensure that all CNameDeclaratorNodes are visited.
@@ -2585,10 +2596,13 @@ class MarkClosureVisitor(CythonTransform):
collector.visitchildren(node)
if node.is_async_def:
- coroutine_type = Nodes.AsyncGenNode if collector.has_yield else Nodes.AsyncDefNode
+ coroutine_type = Nodes.AsyncDefNode
if collector.has_yield:
+ coroutine_type = Nodes.AsyncGenNode
for yield_expr in collector.yields + collector.returns:
yield_expr.in_async_gen = True
+ elif self.current_directives['iterable_coroutine']:
+ coroutine_type = Nodes.IterableAsyncDefNode
elif collector.has_await:
found = next(y for y in collector.yields if y.is_await)
error(found.pos, "'await' not allowed in generators (use 'yield')")
@@ -3152,8 +3166,9 @@ class ReplaceFusedTypeChecks(VisitorTransform):
return self.transform(node)
def visit_PrimaryCmpNode(self, node):
- type1 = node.operand1.analyse_as_type(self.local_scope)
- type2 = node.operand2.analyse_as_type(self.local_scope)
+ with Errors.local_errors(ignore=True):
+ type1 = node.operand1.analyse_as_type(self.local_scope)
+ type2 = node.operand2.analyse_as_type(self.local_scope)
if type1 and type2:
false_node = ExprNodes.BoolNode(node.pos, value=False)
diff --git a/Cython/Compiler/Parsing.py b/Cython/Compiler/Parsing.py
index 5d6b11f..088e862 100644
--- a/Cython/Compiler/Parsing.py
+++ b/Cython/Compiler/Parsing.py
@@ -2481,9 +2481,12 @@ def p_c_simple_base_type(s, self_flag, nonempty, templates = None):
error(pos, "Expected an identifier, found '%s'" % s.sy)
if s.systring == 'const':
s.next()
- base_type = p_c_base_type(s,
- self_flag = self_flag, nonempty = nonempty, templates = templates)
- return Nodes.CConstTypeNode(pos, base_type = base_type)
+ base_type = p_c_base_type(s, self_flag=self_flag, nonempty=nonempty, templates=templates)
+ if isinstance(base_type, Nodes.MemoryViewSliceTypeNode):
+ # reverse order to avoid having to write "(const int)[:]"
+ base_type.base_type_node = Nodes.CConstTypeNode(pos, base_type=base_type.base_type_node)
+ return base_type
+ return Nodes.CConstTypeNode(pos, base_type=base_type)
if looking_at_base_type(s):
#print "p_c_simple_base_type: looking_at_base_type at", s.position()
is_basic = 1
@@ -2710,6 +2713,7 @@ special_basic_c_types = cython.declare(dict, {
"ssize_t" : (2, 0),
"size_t" : (0, 0),
"ptrdiff_t" : (2, 0),
+ "Py_tss_t" : (1, 0),
})
sign_and_longness_words = cython.declare(
@@ -3081,9 +3085,13 @@ def p_cdef_extern_block(s, pos, ctx):
ctx.namespace = p_string_literal(s, 'u')[2]
if p_nogil(s):
ctx.nogil = 1
- body = p_suite(s, ctx)
+
+ # Use "docstring" as verbatim string to include
+ verbatim_include, body = p_suite_with_docstring(s, ctx, True)
+
return Nodes.CDefExternNode(pos,
include_file = include_file,
+ verbatim_include = verbatim_include,
body = body,
namespace = ctx.namespace)
@@ -3435,19 +3443,15 @@ def p_c_class_definition(s, pos, ctx):
as_name = class_name
objstruct_name = None
typeobj_name = None
- base_class_module = None
- base_class_name = None
+ bases = None
if s.sy == '(':
- s.next()
- base_class_path = [p_ident(s)]
- while s.sy == '.':
- s.next()
- base_class_path.append(p_ident(s))
- if s.sy == ',':
- s.error("C class may only have one base class", fatal=False)
- s.expect(')')
- base_class_module = ".".join(base_class_path[:-1])
- base_class_name = base_class_path[-1]
+ positional_args, keyword_args = p_call_parse_args(s, allow_genexp=False)
+ if keyword_args:
+ s.error("C classes cannot take keyword bases.")
+ bases, _ = p_call_build_packed_args(pos, positional_args, keyword_args)
+ if bases is None:
+ bases = ExprNodes.TupleNode(pos, args=[])
+
if s.sy == '[':
if ctx.visibility not in ('public', 'extern') and not ctx.api:
error(s.position(), "Name options only allowed for 'public', 'api', or 'extern' C class")
@@ -3487,8 +3491,7 @@ def p_c_class_definition(s, pos, ctx):
module_name = ".".join(module_path),
class_name = class_name,
as_name = as_name,
- base_class_module = base_class_module,
- base_class_name = base_class_name,
+ bases = bases,
objstruct_name = objstruct_name,
typeobj_name = typeobj_name,
in_pxd = ctx.level == 'module_pxd',
diff --git a/Cython/Compiler/PyrexTypes.py b/Cython/Compiler/PyrexTypes.py
index 4cede00..be28e48 100644
--- a/Cython/Compiler/PyrexTypes.py
+++ b/Cython/Compiler/PyrexTypes.py
@@ -192,7 +192,8 @@ class PyrexType(BaseType):
# is_pythran_expr boolean Is Pythran expr
# is_numpy_buffer boolean Is Numpy array buffer
# has_attributes boolean Has C dot-selectable attributes
- # default_value string Initial value
+ # default_value string Initial value that can be assigned before first user assignment.
+ # declaration_value string The value statically assigned on declaration (if any).
# entry Entry The Entry for this type
#
# declaration_code(entity_code,
@@ -254,6 +255,7 @@ class PyrexType(BaseType):
is_numpy_buffer = 0
has_attributes = 0
default_value = ""
+ declaration_value = ""
def resolve(self):
# If a typedef, returns the base type.
@@ -314,6 +316,21 @@ class PyrexType(BaseType):
def needs_nonecheck(self):
return 0
+ def _assign_from_py_code(self, source_code, result_code, error_pos, code,
+ from_py_function=None, error_condition=None, extra_args=None):
+ args = ', ' + ', '.join('%s' % arg for arg in extra_args) if extra_args else ''
+ convert_call = "%s(%s%s)" % (
+ from_py_function or self.from_py_function,
+ source_code,
+ args,
+ )
+ if self.is_enum:
+ convert_call = typecast(self, c_long_type, convert_call)
+ return '%s = %s; %s' % (
+ result_code,
+ convert_call,
+ code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
+
def public_decl(base_code, dll_linkage):
if dll_linkage:
@@ -491,12 +508,11 @@ class CTypedefType(BaseType):
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
- if from_py_function is None:
- from_py_function = self.from_py_function
- if error_condition is None:
- error_condition = self.error_condition(result_code)
return self.typedef_base_type.from_py_call_code(
- source_code, result_code, error_pos, code, from_py_function, error_condition)
+ source_code, result_code, error_pos, code,
+ from_py_function or self.from_py_function,
+ error_condition or self.error_condition(result_code)
+ )
def overflow_check_binop(self, binop, env, const_rhs=False):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
@@ -619,6 +635,7 @@ class MemoryViewSliceType(PyrexType):
def same_as_resolved_type(self, other_type):
return ((other_type.is_memoryviewslice and
+ #self.writable_needed == other_type.writable_needed and # FIXME: should be only uni-directional
self.dtype.same_as(other_type.dtype) and
self.axes == other_type.axes) or
other_type is error_type)
@@ -765,7 +782,21 @@ class MemoryViewSliceType(PyrexType):
src = self
- if src.dtype != dst.dtype:
+ #if not copying and self.writable_needed and not dst.writable_needed:
+ # return False
+
+ src_dtype, dst_dtype = src.dtype, dst.dtype
+ if dst_dtype.is_const:
+ # Requesting read-only views is always ok => consider only the non-const base type.
+ dst_dtype = dst_dtype.const_base_type
+ if src_dtype.is_const:
+ # When assigning between read-only views, compare only the non-const base types.
+ src_dtype = src_dtype.const_base_type
+ elif copying and src_dtype.is_const:
+ # Copying by value => ignore const on source.
+ src_dtype = src_dtype.const_base_type
+
+ if src_dtype != dst_dtype:
return False
if src.ndim != dst.ndim:
@@ -883,11 +914,12 @@ class MemoryViewSliceType(PyrexType):
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
- return '%s = %s(%s); %s' % (
- result_code,
- from_py_function or self.from_py_function,
- source_code,
- code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
+ # NOTE: auto-detection of readonly buffers is disabled:
+ # writable = self.writable_needed or not self.dtype.is_const
+ writable = not self.dtype.is_const
+ return self._assign_from_py_code(
+ source_code, result_code, error_pos, code, from_py_function, error_condition,
+ extra_args=['PyBUF_WRITABLE' if writable else '0'])
def create_to_py_utility_code(self, env):
self._dtype_to_py_func, self._dtype_from_py_func = self.dtype_object_conversion_funcs(env)
@@ -915,25 +947,29 @@ class MemoryViewSliceType(PyrexType):
if self.dtype.is_pyobject:
utility_name = "MemviewObjectToObject"
else:
- to_py = self.dtype.create_to_py_utility_code(env)
- from_py = self.dtype.create_from_py_utility_code(env)
- if not (to_py or from_py):
- return "NULL", "NULL"
+ self.dtype.create_to_py_utility_code(env)
+ to_py_function = self.dtype.to_py_function
- if not self.dtype.to_py_function:
- get_function = "NULL"
+ from_py_function = None
+ if not self.dtype.is_const:
+ self.dtype.create_from_py_utility_code(env)
+ from_py_function = self.dtype.from_py_function
- if not self.dtype.from_py_function:
+ if not (to_py_function or from_py_function):
+ return "NULL", "NULL"
+ if not to_py_function:
+ get_function = "NULL"
+ if not from_py_function:
set_function = "NULL"
utility_name = "MemviewDtypeToObject"
error_condition = (self.dtype.error_condition('value') or
'PyErr_Occurred()')
context.update(
- to_py_function = self.dtype.to_py_function,
- from_py_function = self.dtype.from_py_function,
- dtype = self.dtype.empty_declaration_code(),
- error_condition = error_condition,
+ to_py_function=to_py_function,
+ from_py_function=from_py_function,
+ dtype=self.dtype.empty_declaration_code(),
+ error_condition=error_condition,
)
utility = TempitaUtilityCode.load_cached(
@@ -1083,6 +1119,7 @@ class PyObjectType(PyrexType):
name = "object"
is_pyobject = 1
default_value = "0"
+ declaration_value = "0"
buffer_defaults = None
is_extern = False
is_subclassed = False
@@ -1302,10 +1339,12 @@ class PyExtensionType(PyObjectType):
# vtabstruct_cname string Name of C method table struct
# vtabptr_cname string Name of pointer to C method table
# vtable_cname string Name of C method table definition
+ # early_init boolean Whether to initialize early (as opposed to during module execution).
# defered_declarations [thunk] Used to declare class hierarchies in order
is_extension_type = 1
has_attributes = 1
+ early_init = 1
objtypedef_cname = None
@@ -1465,11 +1504,10 @@ class CType(PyrexType):
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
- return '%s = %s(%s); %s' % (
- result_code,
- from_py_function or self.from_py_function,
- source_code,
- code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
+ return self._assign_from_py_code(
+ source_code, result_code, error_pos, code, from_py_function, error_condition)
+
+
class PythranExpr(CType):
# Pythran object of a given type
@@ -1487,25 +1525,32 @@ class PythranExpr(CType):
self.from_py_function = "from_python<%s>" % (self.pythran_type)
self.scope = None
- def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0):
- assert pyrex == 0
- return "%s %s" % (self.name, entity_code)
+ def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0):
+ assert not pyrex
+ return "%s %s" % (self.cname, entity_code)
def attributes_known(self):
if self.scope is None:
from . import Symtab
- self.scope = scope = Symtab.CClassScope(
- '',
- None,
- visibility="extern")
+ # FIXME: fake C scope, might be better represented by a struct or C++ class scope
+ self.scope = scope = Symtab.CClassScope('', None, visibility="extern")
scope.parent_type = self
scope.directives = {}
- # rank 3 == long
- scope.declare_var("shape", CPtrType(CIntType(3)), None, cname="_shape", is_cdef=True)
- scope.declare_var("ndim", CIntType(3), None, cname="value", is_cdef=True)
+ scope.declare_var("shape", CPtrType(c_long_type), None, cname="_shape", is_cdef=True)
+ scope.declare_var("ndim", c_long_type, None, cname="value", is_cdef=True)
return True
+ def __eq__(self, other):
+ return isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type
+
+ def __ne__(self, other):
+ return not (isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type)
+
+ def __hash__(self):
+ return hash(self.pythran_type)
+
+
class CConstType(BaseType):
is_const = 1
@@ -1720,15 +1765,13 @@ class ForbidUseClass:
ForbidUse = ForbidUseClass()
-class CIntType(CNumericType):
-
- is_int = 1
- typedef_flag = 0
+class CIntLike(object):
+ """Mixin for shared behaviour of C integers and enums.
+ """
to_py_function = None
from_py_function = None
to_pyunicode_utility = None
default_format_spec = 'd'
- exception_value = -1
def can_coerce_to_pyobject(self, env):
return True
@@ -1736,6 +1779,24 @@ class CIntType(CNumericType):
def can_coerce_from_pyobject(self, env):
return True
+ def create_to_py_utility_code(self, env):
+ if type(self).to_py_function is None:
+ self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "CIntToPy", "TypeConversion.c",
+ context={"TYPE": self.empty_declaration_code(),
+ "TO_PY_FUNCTION": self.to_py_function}))
+ return True
+
+ def create_from_py_utility_code(self, env):
+ if type(self).from_py_function is None:
+ self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "CIntFromPy", "TypeConversion.c",
+ context={"TYPE": self.empty_declaration_code(),
+ "FROM_PY_FUNCTION": self.from_py_function}))
+ return True
+
@staticmethod
def _parse_format(format_spec):
padding = ' '
@@ -1778,23 +1839,12 @@ class CIntType(CNumericType):
format_type, width, padding_char = self._parse_format(format_spec)
return "%s(%s, %d, '%s', '%s')" % (utility_code_name, cvalue, width, padding_char, format_type)
- def create_to_py_utility_code(self, env):
- if type(self).to_py_function is None:
- self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
- env.use_utility_code(TempitaUtilityCode.load_cached(
- "CIntToPy", "TypeConversion.c",
- context={"TYPE": self.empty_declaration_code(),
- "TO_PY_FUNCTION": self.to_py_function}))
- return True
- def create_from_py_utility_code(self, env):
- if type(self).from_py_function is None:
- self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
- env.use_utility_code(TempitaUtilityCode.load_cached(
- "CIntFromPy", "TypeConversion.c",
- context={"TYPE": self.empty_declaration_code(),
- "FROM_PY_FUNCTION": self.from_py_function}))
- return True
+class CIntType(CIntLike, CNumericType):
+
+ is_int = 1
+ typedef_flag = 0
+ exception_value = -1
def get_to_py_type_conversion(self):
if self.rank < list(rank_to_type_name).index('int'):
@@ -2211,6 +2261,25 @@ complex_ops = {
}
+class CPyTSSTType(CType):
+ #
+ # PEP-539 "Py_tss_t" type
+ #
+
+ declaration_value = "Py_tss_NEEDS_INIT"
+
+ def __repr__(self):
+ return ""
+
+ def declaration_code(self, entity_code,
+ for_display=0, dll_linkage=None, pyrex=0):
+ if pyrex or for_display:
+ base_code = "Py_tss_t"
+ else:
+ base_code = public_decl("Py_tss_t", dll_linkage)
+ return self.base_declaration_code(base_code, entity_code)
+
+
class CPointerBaseType(CType):
# common base type for pointer/array types
#
@@ -2401,6 +2470,7 @@ class CArrayType(CPointerBaseType):
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
+ assert not error_condition, '%s: %s' % (error_pos, error_condition)
call_code = "%s(%s, %s, %s)" % (
from_py_function or self.from_py_function,
source_code, result_code, self.size)
@@ -3145,15 +3215,18 @@ class CFuncTypeArg(BaseType):
or_none = False
accept_none = True
accept_builtin_subtypes = False
+ annotation = None
subtypes = ['type']
- def __init__(self, name, type, pos, cname=None):
+ def __init__(self, name, type, pos, cname=None, annotation=None):
self.name = name
if cname is not None:
self.cname = cname
else:
self.cname = Naming.var_prefix + name
+ if annotation is not None:
+ self.annotation = annotation
self.type = type
self.pos = pos
self.needs_type_test = False # TODO: should these defaults be set in analyse_types()?
@@ -3167,6 +3240,7 @@ class CFuncTypeArg(BaseType):
def specialize(self, values):
return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname)
+
class ToPyStructUtilityCode(object):
requires = None
@@ -3718,6 +3792,8 @@ class CppClassType(CType):
return True
elif other_type.is_cpp_class:
return other_type.is_subclass(self)
+ elif other_type.is_string and self.cname in cpp_string_conversions:
+ return True
def attributes_known(self):
return self.scope is not None
@@ -3788,7 +3864,7 @@ def is_optional_template_param(type):
return isinstance(type, TemplatePlaceholderType) and type.optional
-class CEnumType(CType):
+class CEnumType(CIntLike, CType):
# name string
# cname string or None
# typedef_flag boolean
@@ -3836,38 +3912,6 @@ class CEnumType(CType):
self.name, self.cname, self.typedef_flag, namespace)
return self
- def can_coerce_to_pyobject(self, env):
- return True
-
- def can_coerce_from_pyobject(self, env):
- return True
-
- def create_to_py_utility_code(self, env):
- self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
- env.use_utility_code(TempitaUtilityCode.load_cached(
- "CIntToPy", "TypeConversion.c",
- context={"TYPE": self.empty_declaration_code(),
- "TO_PY_FUNCTION": self.to_py_function}))
- return True
-
- def create_from_py_utility_code(self, env):
- self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
- env.use_utility_code(TempitaUtilityCode.load_cached(
- "CIntFromPy", "TypeConversion.c",
- context={"TYPE": self.empty_declaration_code(),
- "FROM_PY_FUNCTION": self.from_py_function}))
- return True
-
- def from_py_call_code(self, source_code, result_code, error_pos, code,
- from_py_function=None, error_condition=None):
- rhs = "%s(%s)" % (
- from_py_function or self.from_py_function,
- source_code)
- return '%s = %s;%s' % (
- result_code,
- typecast(self, c_long_type, rhs),
- ' %s' % code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
-
def create_type_wrapper(self, env):
from .UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(
@@ -4089,6 +4133,9 @@ c_gilstate_type = CEnumType("PyGILState_STATE", "PyGILState_STATE", True)
c_threadstate_type = CStructOrUnionType("PyThreadState", "struct", None, 1, "PyThreadState")
c_threadstate_ptr_type = CPtrType(c_threadstate_type)
+# PEP-539 "Py_tss_t" type
+c_pytss_t_type = CPyTSSTType()
+
# the Py_buffer type is defined in Builtin.py
c_py_buffer_type = CStructOrUnionType("Py_buffer", "struct", None, 1, "Py_buffer")
c_py_buffer_ptr_type = CPtrType(c_py_buffer_type)
@@ -4154,6 +4201,7 @@ modifiers_and_name_to_type = {
#
(1, 0, "void"): c_void_type,
+ (1, 0, "Py_tss_t"): c_pytss_t_type,
(1, 0, "bint"): c_bint_type,
(0, 0, "Py_UNICODE"): c_py_unicode_type,
diff --git a/Cython/Compiler/Pythran.py b/Cython/Compiler/Pythran.py
index d2c3573..cf0e324 100644
--- a/Cython/Compiler/Pythran.py
+++ b/Cython/Compiler/Pythran.py
@@ -66,7 +66,8 @@ def _index_access(index_code, indices):
return ('[%s]' if len(indices) == 1 else '(%s)') % indexing
-def _index_type_code(idx):
+def _index_type_code(index_with_type):
+ idx, index_type = index_with_type
if idx.is_slice:
if idx.step.is_none:
func = "contiguous_slice"
@@ -76,11 +77,11 @@ def _index_type_code(idx):
n = 3
return "pythonic::types::%s(%s)" % (
func, ",".join(["0"]*n))
- elif idx.type.is_int:
- return "std::declval<%s>()" % idx.type.sign_and_name()
- elif idx.type.is_pythran_expr:
- return "std::declval<%s>()" % idx.type.pythran_type
- raise ValueError("unsupported indexing type %s!" % idx.type)
+ elif index_type.is_int:
+ return "std::declval<%s>()" % index_type.sign_and_name()
+ elif index_type.is_pythran_expr:
+ return "std::declval<%s>()" % index_type.pythran_type
+ raise ValueError("unsupported indexing type %s!" % index_type)
def _index_code(idx):
diff --git a/Cython/Compiler/Scanning.py b/Cython/Compiler/Scanning.py
index eabe676..9e07aa0 100644
--- a/Cython/Compiler/Scanning.py
+++ b/Cython/Compiler/Scanning.py
@@ -168,7 +168,7 @@ class SourceDescriptor(object):
if self._escaped_description is None:
esc_desc = \
self.get_description().encode('ASCII', 'replace').decode("ASCII")
- # Use foreward slashes on Windows since these paths
+ # Use forward slashes on Windows since these paths
# will be used in the #line directives in the C/C++ files.
self._escaped_description = esc_desc.replace('\\', '/')
return self._escaped_description
diff --git a/Cython/Compiler/StringEncoding.py b/Cython/Compiler/StringEncoding.py
index 608f371..af0b411 100644
--- a/Cython/Compiler/StringEncoding.py
+++ b/Cython/Compiler/StringEncoding.py
@@ -191,6 +191,14 @@ def bytes_literal(s, encoding):
return s
+def encoded_string(s, encoding):
+ assert isinstance(s, (_unicode, bytes))
+ s = EncodedString(s)
+ if encoding is not None:
+ s.encoding = encoding
+ return s
+
+
char_from_escape_sequence = {
r'\a' : u'\a',
r'\b' : u'\b',
diff --git a/Cython/Compiler/Symtab.py b/Cython/Compiler/Symtab.py
index ab43c70..21e89e7 100644
--- a/Cython/Compiler/Symtab.py
+++ b/Cython/Compiler/Symtab.py
@@ -35,13 +35,13 @@ iso_c99_keywords = set(
def c_safe_identifier(cname):
# There are some C limitations on struct entry names.
- if ((cname[:2] == '__'
- and not (cname.startswith(Naming.pyrex_prefix)
- or cname in ('__weakref__', '__dict__')))
- or cname in iso_c99_keywords):
+ if ((cname[:2] == '__' and not (cname.startswith(Naming.pyrex_prefix)
+ or cname in ('__weakref__', '__dict__')))
+ or cname in iso_c99_keywords):
cname = Naming.pyrex_prefix + cname
return cname
+
class BufferAux(object):
writable_needed = False
@@ -60,6 +60,7 @@ class Entry(object):
# cname string C name of entity
# type PyrexType Type of entity
# doc string Doc string
+ # annotation ExprNode PEP 484/526 annotation
# init string Initial value
# visibility 'private' or 'public' or 'extern'
# is_builtin boolean Is an entry in the Python builtins dict
@@ -121,7 +122,7 @@ class Entry(object):
#
# buffer_aux BufferAux or None Extra information needed for buffer variables
# inline_func_in_pxd boolean Hacky special case for inline function in pxd file.
- # Ideally this should not be necesarry.
+ # Ideally this should not be necessary.
# might_overflow boolean In an arithmetic expression that could cause
# overflow (used for type inference).
# utility_code_definition For some Cython builtins, the utility code
@@ -138,6 +139,7 @@ class Entry(object):
inline_func_in_pxd = False
borrowed = 0
init = ""
+ annotation = None
visibility = 'private'
is_builtin = 0
is_cglobal = 0
@@ -216,9 +218,12 @@ class Entry(object):
def __repr__(self):
return "%s(<%x>, name=%s, type=%s)" % (type(self).__name__, id(self), self.name, self.type)
+ def already_declared_here(self):
+ error(self.pos, "Previous declaration is here")
+
def redeclared(self, pos):
error(pos, "'%s' does not match previous declaration" % self.name)
- error(self.pos, "Previous declaration is here")
+ self.already_declared_here()
def all_alternatives(self):
return [self] + self.overloaded_alternatives
@@ -441,17 +446,33 @@ class Scope(object):
warning(pos, "'%s' is a reserved name in C." % cname, -1)
entries = self.entries
if name and name in entries and not shadow:
- old_type = entries[name].type
- if self.is_cpp_class_scope and type.is_cfunction and old_type.is_cfunction and type != old_type:
- # C++ method overrides are ok
+ old_entry = entries[name]
+
+ # Reject redeclared C++ functions only if they have the same type signature.
+ cpp_override_allowed = False
+ if type.is_cfunction and old_entry.type.is_cfunction and self.is_cpp():
+ for alt_entry in old_entry.all_alternatives():
+ if type == alt_entry.type:
+ if name == '' and not type.args:
+ # Cython pre-declares the no-args constructor - allow later user definitions.
+ cpp_override_allowed = True
+ break
+ else:
+ cpp_override_allowed = True
+
+ if cpp_override_allowed:
+ # C++ function/method overrides with different signatures are ok.
pass
elif self.is_cpp_class_scope and entries[name].is_inherited:
# Likewise ignore inherited classes.
pass
elif visibility == 'extern':
- warning(pos, "'%s' redeclared " % name, 0)
+ # Silenced outside of "cdef extern" blocks, until we have a safe way to
+ # prevent pxd-defined cpdef functions from ending up here.
+ warning(pos, "'%s' redeclared " % name, 1 if self.in_cinclude else 0)
elif visibility != 'ignore':
error(pos, "'%s' redeclared " % name)
+ entries[name].already_declared_here()
entry = Entry(name, cname, type, pos = pos)
entry.in_cinclude = self.in_cinclude
entry.create_wrapper = create_wrapper
@@ -583,6 +604,7 @@ class Scope(object):
else:
if not (entry.is_type and entry.type.is_cpp_class):
error(pos, "'%s' redeclared " % name)
+ entry.already_declared_here()
return None
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
@@ -593,11 +615,13 @@ class Scope(object):
if base_classes:
if entry.type.base_classes and entry.type.base_classes != base_classes:
error(pos, "Base type does not match previous declaration")
+ entry.already_declared_here()
else:
entry.type.base_classes = base_classes
if templates or entry.type.templates:
if templates != entry.type.templates:
error(pos, "Template parameters do not match previous declaration")
+ entry.already_declared_here()
def declare_inherited_attributes(entry, base_classes):
for base_class in base_classes:
@@ -1066,8 +1090,8 @@ class ModuleScope(Scope):
# doc string Module doc string
# doc_cname string C name of module doc string
# utility_code_list [UtilityCode] Queuing utility codes for forwarding to Code.py
- # python_include_files [string] Standard Python headers to be included
- # include_files [string] Other C headers to be included
+ # c_includes {key: IncludeCode} C headers or verbatim code to be generated
+ # See process_include() for more documentation
# string_to_entry {string : Entry} Map string const to entry
# identifier_to_entry {string : Entry} Map identifier string const to entry
# context Context
@@ -1110,8 +1134,7 @@ class ModuleScope(Scope):
self.doc_cname = Naming.moddoc_cname
self.utility_code_list = []
self.module_entries = {}
- self.python_include_files = ["Python.h"]
- self.include_files = []
+ self.c_includes = {}
self.type_names = dict(outer_scope.type_names)
self.pxd_file_loaded = 0
self.cimported_modules = []
@@ -1125,6 +1148,7 @@ class ModuleScope(Scope):
for var_name in ['__builtins__', '__name__', '__file__', '__doc__', '__path__',
'__spec__', '__loader__', '__package__', '__cached__']:
self.declare_var(EncodedString(var_name), py_object_type, None)
+ self.process_include(Code.IncludeCode("Python.h", initial=True))
def qualifying_scope(self):
return self.parent_module
@@ -1247,15 +1271,50 @@ class ModuleScope(Scope):
module = module.lookup_submodule(submodule)
return module
- def add_include_file(self, filename):
- if filename not in self.python_include_files \
- and filename not in self.include_files:
- self.include_files.append(filename)
+ def add_include_file(self, filename, verbatim_include=None, late=False):
+ """
+ Add `filename` as include file. Add `verbatim_include` as
+ verbatim text in the C file.
+ Both `filename` and `verbatim_include` can be `None` or empty.
+ """
+ inc = Code.IncludeCode(filename, verbatim_include, late=late)
+ self.process_include(inc)
+
+ def process_include(self, inc):
+ """
+ Add `inc`, which is an instance of `IncludeCode`, to this
+ `ModuleScope`. This either adds a new element to the
+ `c_includes` dict or it updates an existing entry.
+
+ In detail: the values of the dict `self.c_includes` are
+ instances of `IncludeCode` containing the code to be put in the
+ generated C file. The keys of the dict are needed to ensure
+ uniqueness in two ways: if an include file is specified in
+ multiple "cdef extern" blocks, only one `#include` statement is
+ generated. Second, the same include might occur multiple times
+ if we find it through multiple "cimport" paths. So we use the
+ generated code (of the form `#include "header.h"`) as dict key.
+
+ If verbatim code does not belong to any include file (i.e. it
+ was put in a `cdef extern from *` block), then we use a unique
+ dict key: namely, the `sortkey()`.
+
+ One `IncludeCode` object can contain multiple pieces of C code:
+ one optional "main piece" for the include file and several other
+ pieces for the verbatim code. The `IncludeCode.dict_update`
+ method merges the pieces of two different `IncludeCode` objects
+ if needed.
+ """
+ key = inc.mainpiece()
+ if key is None:
+ key = inc.sortkey()
+ inc.dict_update(self.c_includes, key)
+ inc = self.c_includes[key]
def add_imported_module(self, scope):
if scope not in self.cimported_modules:
- for filename in scope.include_files:
- self.add_include_file(filename)
+ for inc in scope.c_includes.values():
+ self.process_include(inc)
self.cimported_modules.append(scope)
for m in scope.cimported_modules:
self.add_imported_module(m)
@@ -1341,8 +1400,8 @@ class ModuleScope(Scope):
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
if is_cdef:
entry.is_cglobal = 1
- if entry.type.is_pyobject:
- entry.init = 0
+ if entry.type.declaration_value:
+ entry.init = entry.type.declaration_value
self.var_entries.append(entry)
else:
entry.is_pyglobal = 1
@@ -1673,8 +1732,8 @@ class LocalScope(Scope):
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
- if type.is_pyobject:
- entry.init = "0"
+ if entry.type.declaration_value:
+ entry.init = entry.type.declaration_value
entry.is_local = 1
entry.in_with_gil_block = self._in_with_gil_block
@@ -1694,6 +1753,7 @@ class LocalScope(Scope):
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
+ orig_entry.already_declared_here()
else:
entry = self.lookup(name)
if entry is None or not entry.from_closure:
@@ -1825,7 +1885,7 @@ class StructOrUnionScope(Scope):
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0,
- allow_pyobject = 0):
+ allow_pyobject=False, allow_memoryview=False):
# Add an entry for an attribute.
if not cname:
cname = name
@@ -1837,11 +1897,12 @@ class StructOrUnionScope(Scope):
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_pyobject and not allow_pyobject:
- error(pos,
- "C struct/union member cannot be a Python object")
+ error(pos, "C struct/union member cannot be a Python object")
+ elif type.is_memoryviewslice and not allow_memoryview:
+ # Memory views wrap their buffer owner as a Python object.
+ error(pos, "C struct/union member cannot be a memory view")
if visibility != 'private':
- error(pos,
- "C struct/union member cannot be declared %s" % visibility)
+ error(pos, "C struct/union member cannot be declared %s" % visibility)
return entry
def declare_cfunction(self, name, type, pos,
@@ -1926,6 +1987,7 @@ class PyClassScope(ClassScope):
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
+ orig_entry.already_declared_here()
else:
entry = self.lookup(name)
if entry is None:
@@ -2329,6 +2391,12 @@ class CppClassScope(Scope):
cname = "%s__dealloc__%s" % (Naming.func_prefix, class_name)
name = ''
type.return_type = PyrexTypes.CVoidType()
+ if name in ('', '') and type.nogil:
+ for base in self.type.base_classes:
+ base_entry = base.scope.lookup(name)
+ if base_entry and not base_entry.type.nogil:
+ error(pos, "Constructor cannot be called without GIL unless all base constructors can also be called without GIL")
+ error(base_entry.pos, "Base constructor defined here.")
prev_entry = self.lookup_here(name)
entry = self.declare_var(name, type, pos,
defining=defining,
@@ -2353,7 +2421,7 @@ class CppClassScope(Scope):
# to work with this type.
for base_entry in \
base_scope.inherited_var_entries + base_scope.var_entries:
- #contructor/destructor is not inherited
+ #constructor/destructor is not inherited
if base_entry.name in ("", ""):
continue
#print base_entry.name, self.entries
diff --git a/Cython/Compiler/Tests/TestTreeFragment.py b/Cython/Compiler/Tests/TestTreeFragment.py
index 3352c71..3f15b74 100644
--- a/Cython/Compiler/Tests/TestTreeFragment.py
+++ b/Cython/Compiler/Tests/TestTreeFragment.py
@@ -45,7 +45,7 @@ class TestTreeFragments(CythonTest):
T = F.substitute({"v" : NameNode(pos=None, name="a")})
v = F.root.stats[1].rhs.operand2.operand1
a = T.stats[1].rhs.operand2.operand1
- self.assertEquals(v.pos, a.pos)
+ self.assertEqual(v.pos, a.pos)
def test_temps(self):
TemplateTransform.temp_name_counter = 0
diff --git a/Cython/Compiler/Tests/TestTreePath.py b/Cython/Compiler/Tests/TestTreePath.py
index 9b8ca38..bee53b3 100644
--- a/Cython/Compiler/Tests/TestTreePath.py
+++ b/Cython/Compiler/Tests/TestTreePath.py
@@ -20,75 +20,75 @@ class TestTreePath(TransformTest):
def test_node_path(self):
t = self._build_tree()
- self.assertEquals(2, len(find_all(t, "//DefNode")))
- self.assertEquals(2, len(find_all(t, "//NameNode")))
- self.assertEquals(1, len(find_all(t, "//ReturnStatNode")))
- self.assertEquals(1, len(find_all(t, "//DefNode//ReturnStatNode")))
+ self.assertEqual(2, len(find_all(t, "//DefNode")))
+ self.assertEqual(2, len(find_all(t, "//NameNode")))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode")))
+ self.assertEqual(1, len(find_all(t, "//DefNode//ReturnStatNode")))
def test_node_path_star(self):
t = self._build_tree()
- self.assertEquals(10, len(find_all(t, "//*")))
- self.assertEquals(8, len(find_all(t, "//DefNode//*")))
- self.assertEquals(0, len(find_all(t, "//NameNode//*")))
+ self.assertEqual(10, len(find_all(t, "//*")))
+ self.assertEqual(8, len(find_all(t, "//DefNode//*")))
+ self.assertEqual(0, len(find_all(t, "//NameNode//*")))
def test_node_path_attribute(self):
t = self._build_tree()
- self.assertEquals(2, len(find_all(t, "//NameNode/@name")))
- self.assertEquals(['fun', 'decorator'], find_all(t, "//NameNode/@name"))
+ self.assertEqual(2, len(find_all(t, "//NameNode/@name")))
+ self.assertEqual(['fun', 'decorator'], find_all(t, "//NameNode/@name"))
def test_node_path_attribute_dotted(self):
t = self._build_tree()
- self.assertEquals(1, len(find_all(t, "//ReturnStatNode/@value.name")))
- self.assertEquals(['fun'], find_all(t, "//ReturnStatNode/@value.name"))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode/@value.name")))
+ self.assertEqual(['fun'], find_all(t, "//ReturnStatNode/@value.name"))
def test_node_path_child(self):
t = self._build_tree()
- self.assertEquals(1, len(find_all(t, "//DefNode/ReturnStatNode/NameNode")))
- self.assertEquals(1, len(find_all(t, "//ReturnStatNode/NameNode")))
+ self.assertEqual(1, len(find_all(t, "//DefNode/ReturnStatNode/NameNode")))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode/NameNode")))
def test_node_path_node_predicate(self):
t = self._build_tree()
- self.assertEquals(0, len(find_all(t, "//DefNode[.//ForInStatNode]")))
- self.assertEquals(2, len(find_all(t, "//DefNode[.//NameNode]")))
- self.assertEquals(1, len(find_all(t, "//ReturnStatNode[./NameNode]")))
- self.assertEquals(Nodes.ReturnStatNode,
- type(find_first(t, "//ReturnStatNode[./NameNode]")))
+ self.assertEqual(0, len(find_all(t, "//DefNode[.//ForInStatNode]")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode[./NameNode]")))
+ self.assertEqual(Nodes.ReturnStatNode,
+ type(find_first(t, "//ReturnStatNode[./NameNode]")))
def test_node_path_node_predicate_step(self):
t = self._build_tree()
- self.assertEquals(2, len(find_all(t, "//DefNode[.//NameNode]")))
- self.assertEquals(8, len(find_all(t, "//DefNode[.//NameNode]//*")))
- self.assertEquals(1, len(find_all(t, "//DefNode[.//NameNode]//ReturnStatNode")))
- self.assertEquals(Nodes.ReturnStatNode,
- type(find_first(t, "//DefNode[.//NameNode]//ReturnStatNode")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
+ self.assertEqual(8, len(find_all(t, "//DefNode[.//NameNode]//*")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode]//ReturnStatNode")))
+ self.assertEqual(Nodes.ReturnStatNode,
+ type(find_first(t, "//DefNode[.//NameNode]//ReturnStatNode")))
def test_node_path_attribute_exists(self):
t = self._build_tree()
- self.assertEquals(2, len(find_all(t, "//NameNode[@name]")))
- self.assertEquals(ExprNodes.NameNode,
- type(find_first(t, "//NameNode[@name]")))
+ self.assertEqual(2, len(find_all(t, "//NameNode[@name]")))
+ self.assertEqual(ExprNodes.NameNode,
+ type(find_first(t, "//NameNode[@name]")))
def test_node_path_attribute_exists_not(self):
t = self._build_tree()
- self.assertEquals(0, len(find_all(t, "//NameNode[not(@name)]")))
- self.assertEquals(2, len(find_all(t, "//NameNode[not(@honking)]")))
+ self.assertEqual(0, len(find_all(t, "//NameNode[not(@name)]")))
+ self.assertEqual(2, len(find_all(t, "//NameNode[not(@honking)]")))
def test_node_path_and(self):
t = self._build_tree()
- self.assertEquals(1, len(find_all(t, "//DefNode[.//ReturnStatNode and .//NameNode]")))
- self.assertEquals(0, len(find_all(t, "//NameNode[@honking and @name]")))
- self.assertEquals(0, len(find_all(t, "//NameNode[@name and @honking]")))
- self.assertEquals(2, len(find_all(t, "//DefNode[.//NameNode[@name] and @name]")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode and .//NameNode]")))
+ self.assertEqual(0, len(find_all(t, "//NameNode[@honking and @name]")))
+ self.assertEqual(0, len(find_all(t, "//NameNode[@name and @honking]")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name] and @name]")))
def test_node_path_attribute_string_predicate(self):
t = self._build_tree()
- self.assertEquals(1, len(find_all(t, "//NameNode[@name = 'decorator']")))
+ self.assertEqual(1, len(find_all(t, "//NameNode[@name = 'decorator']")))
def test_node_path_recursive_predicate(self):
t = self._build_tree()
- self.assertEquals(2, len(find_all(t, "//DefNode[.//NameNode[@name]]")))
- self.assertEquals(1, len(find_all(t, "//DefNode[.//NameNode[@name = 'decorator']]")))
- self.assertEquals(1, len(find_all(t, "//DefNode[.//ReturnStatNode[./NameNode[@name = 'fun']]/NameNode]")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name]]")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode[@name = 'decorator']]")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode[./NameNode[@name = 'fun']]/NameNode]")))
if __name__ == '__main__':
unittest.main()
diff --git a/Cython/Compiler/Tests/TestUtilityLoad.py b/Cython/Compiler/Tests/TestUtilityLoad.py
index a7ec3ef..3d1906c 100644
--- a/Cython/Compiler/Tests/TestUtilityLoad.py
+++ b/Cython/Compiler/Tests/TestUtilityLoad.py
@@ -23,27 +23,27 @@ class TestUtilityLoader(unittest.TestCase):
def test_load_as_string(self):
got = strip_2tup(self.cls.load_as_string(self.name))
- self.assertEquals(got, self.expected)
+ self.assertEqual(got, self.expected)
got = strip_2tup(self.cls.load_as_string(self.name, self.filename))
- self.assertEquals(got, self.expected)
+ self.assertEqual(got, self.expected)
def test_load(self):
utility = self.cls.load(self.name)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEquals(got, self.expected)
+ self.assertEqual(got, self.expected)
required, = utility.requires
got = strip_2tup((required.proto, required.impl))
- self.assertEquals(got, self.required)
+ self.assertEqual(got, self.required)
utility = self.cls.load(self.name, from_file=self.filename)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEquals(got, self.expected)
+ self.assertEqual(got, self.expected)
utility = self.cls.load_cached(self.name, from_file=self.filename)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEquals(got, self.expected)
+ self.assertEqual(got, self.expected)
class TestTempitaUtilityLoader(TestUtilityLoader):
@@ -60,20 +60,20 @@ class TestTempitaUtilityLoader(TestUtilityLoader):
def test_load_as_string(self):
got = strip_2tup(self.cls.load_as_string(self.name, context=self.context))
- self.assertEquals(got, self.expected_tempita)
+ self.assertEqual(got, self.expected_tempita)
def test_load(self):
utility = self.cls.load(self.name, context=self.context)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEquals(got, self.expected_tempita)
+ self.assertEqual(got, self.expected_tempita)
required, = utility.requires
got = strip_2tup((required.proto, required.impl))
- self.assertEquals(got, self.required_tempita)
+ self.assertEqual(got, self.required_tempita)
utility = self.cls.load(self.name, from_file=self.filename, context=self.context)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEquals(got, self.expected_tempita)
+ self.assertEqual(got, self.expected_tempita)
class TestCythonUtilityLoader(TestTempitaUtilityLoader):
diff --git a/Cython/Compiler/TreeFragment.py b/Cython/Compiler/TreeFragment.py
index 0d37741..9f3e89e 100644
--- a/Cython/Compiler/TreeFragment.py
+++ b/Cython/Compiler/TreeFragment.py
@@ -24,13 +24,13 @@ from . import UtilNodes
class StringParseContext(Main.Context):
- def __init__(self, name, include_directories=None, compiler_directives=None):
+ def __init__(self, name, include_directories=None, compiler_directives=None, cpp=False):
if include_directories is None:
include_directories = []
if compiler_directives is None:
compiler_directives = {}
Main.Context.__init__(self, include_directories, compiler_directives,
- create_testscope=False)
+ create_testscope=False, cpp=cpp)
self.module_name = name
def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1, absolute_fallback=True):
diff --git a/Cython/Compiler/TypeInference.py b/Cython/Compiler/TypeInference.py
index c09220c..c7ffee7 100644
--- a/Cython/Compiler/TypeInference.py
+++ b/Cython/Compiler/TypeInference.py
@@ -378,7 +378,7 @@ class SimpleAssignmentTypeInferer(object):
self.set_entry_type(entry, py_object_type)
return
- # Set of assignemnts
+ # Set of assignments
assignments = set()
assmts_resolved = set()
dependencies = {}
@@ -415,6 +415,24 @@ class SimpleAssignmentTypeInferer(object):
entry = node.entry
return spanning_type(types, entry.might_overflow, entry.pos, scope)
+ def inferred_types(entry):
+ has_none = False
+ has_pyobjects = False
+ types = []
+ for assmt in entry.cf_assignments:
+ if assmt.rhs.is_none:
+ has_none = True
+ else:
+ rhs_type = assmt.inferred_type
+ if rhs_type and rhs_type.is_pyobject:
+ has_pyobjects = True
+ types.append(rhs_type)
+ # Ignore None assignments as long as there are concrete Python type assignments.
+ # but include them if None is the only assigned Python object.
+ if has_none and not has_pyobjects:
+ types.append(py_object_type)
+ return types
+
def resolve_assignments(assignments):
resolved = set()
for assmt in assignments:
@@ -467,7 +485,7 @@ class SimpleAssignmentTypeInferer(object):
continue
entry_type = py_object_type
if assmts_resolved.issuperset(entry.cf_assignments):
- types = [assmt.inferred_type for assmt in entry.cf_assignments]
+ types = inferred_types(entry)
if types and all(types):
entry_type = spanning_type(
types, entry.might_overflow, entry.pos, scope)
@@ -477,8 +495,9 @@ class SimpleAssignmentTypeInferer(object):
def reinfer():
dirty = False
for entry in inferred:
- types = [assmt.infer_type()
- for assmt in entry.cf_assignments]
+ for assmt in entry.cf_assignments:
+ assmt.infer_type()
+ types = inferred_types(entry)
new_type = spanning_type(types, entry.might_overflow, entry.pos, scope)
if new_type != entry.type:
self.set_entry_type(entry, new_type)
@@ -544,6 +563,8 @@ def safe_spanning_type(types, might_overflow, pos, scope):
# find_spanning_type() only returns 'bint' for clean boolean
# operations without other int types, so this is safe, too
return result_type
+ elif result_type.is_pythran_expr:
+ return result_type
elif result_type.is_ptr:
# Any pointer except (signed|unsigned|) char* can't implicitly
# become a PyObject, and inferring char* is now accepted, too.
diff --git a/Cython/Compiler/TypeSlots.py b/Cython/Compiler/TypeSlots.py
index ae0257f..7cf6cc4 100644
--- a/Cython/Compiler/TypeSlots.py
+++ b/Cython/Compiler/TypeSlots.py
@@ -612,6 +612,20 @@ def get_slot_function(scope, slot):
return slot_code
return None
+
+def get_slot_by_name(slot_name):
+ # For now, only search the type struct, no referenced sub-structs.
+ for slot in slot_table:
+ if slot.slot_name == slot_name:
+ return slot
+ assert False, "Slot not found: %s" % slot_name
+
+
+def get_slot_code_by_name(scope, slot_name):
+ slot = get_slot_by_name(slot_name)
+ return slot.slot_code(scope)
+
+
#------------------------------------------------------------------------------------------
#
# Signatures for generic Python functions and methods.
@@ -678,8 +692,7 @@ delattrofunc = Signature("TO", 'r')
cmpfunc = Signature("TO", "i") # typedef int (*cmpfunc)(PyObject *, PyObject *);
reprfunc = Signature("T", "O") # typedef PyObject *(*reprfunc)(PyObject *);
hashfunc = Signature("T", "h") # typedef Py_hash_t (*hashfunc)(PyObject *);
- # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
-richcmpfunc = Signature("OOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
+richcmpfunc = Signature("TOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
getiterfunc = Signature("T", "O") # typedef PyObject *(*getiterfunc) (PyObject *);
iternextfunc = Signature("T", "O") # typedef PyObject *(*iternextfunc) (PyObject *);
descrgetfunc = Signature("TOO", "O") # typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *);
diff --git a/Cython/Compiler/UtilNodes.py b/Cython/Compiler/UtilNodes.py
index 0dd5119..c41748a 100644
--- a/Cython/Compiler/UtilNodes.py
+++ b/Cython/Compiler/UtilNodes.py
@@ -1,7 +1,7 @@
#
# Nodes used as utilities and support for transforms etc.
# These often make up sets including both Nodes and ExprNodes
-# so it is convenient to have them in a seperate module.
+# so it is convenient to have them in a separate module.
#
from __future__ import absolute_import
@@ -267,6 +267,9 @@ class EvalWithTempExprNode(ExprNodes.ExprNode, LetNodeMixin):
def infer_type(self, env):
return self.subexpression.infer_type(env)
+ def may_be_none(self):
+ return self.subexpression.may_be_none()
+
def result(self):
return self.subexpression.result()
diff --git a/Cython/Compiler/UtilityCode.py b/Cython/Compiler/UtilityCode.py
index 1e39da0..98e9ab5 100644
--- a/Cython/Compiler/UtilityCode.py
+++ b/Cython/Compiler/UtilityCode.py
@@ -8,11 +8,10 @@ from . import Code
class NonManglingModuleScope(Symtab.ModuleScope):
- cpp = False
-
def __init__(self, prefix, *args, **kw):
self.prefix = prefix
self.cython_scope = None
+ self.cpp = kw.pop('cpp', False)
Symtab.ModuleScope.__init__(self, *args, **kw)
def add_imported_entry(self, name, entry, pos):
@@ -44,7 +43,7 @@ class CythonUtilityCodeContext(StringParseContext):
if self.scope is None:
self.scope = NonManglingModuleScope(
- self.prefix, module_name, parent_module=None, context=self)
+ self.prefix, module_name, parent_module=None, context=self, cpp=self.cpp)
return self.scope
@@ -119,7 +118,8 @@ class CythonUtilityCode(Code.UtilityCodeBase):
from . import Pipeline, ParseTreeTransforms
context = CythonUtilityCodeContext(
- self.name, compiler_directives=self.compiler_directives)
+ self.name, compiler_directives=self.compiler_directives,
+ cpp=cython_scope.is_cpp() if cython_scope else False)
context.prefix = self.prefix
context.cython_scope = cython_scope
#context = StringParseContext(self.name)
@@ -223,7 +223,7 @@ class CythonUtilityCode(Code.UtilityCodeBase):
for dep in self.requires:
if dep.is_cython_utility:
- dep.declare_in_scope(dest_scope)
+ dep.declare_in_scope(dest_scope, cython_scope=cython_scope)
return original_scope
diff --git a/Cython/Compiler/Visitor.py b/Cython/Compiler/Visitor.py
index 7e4b331..cfc027a 100644
--- a/Cython/Compiler/Visitor.py
+++ b/Cython/Compiler/Visitor.py
@@ -581,15 +581,23 @@ class MethodDispatcherTransform(EnvTransform):
# into a C function call (defined in the builtin scope)
if not function.entry:
return node
+ entry = function.entry
is_builtin = (
- function.entry.is_builtin or
- function.entry is self.current_env().builtin_scope().lookup_here(function.name))
+ entry.is_builtin or
+ entry is self.current_env().builtin_scope().lookup_here(function.name))
if not is_builtin:
if function.cf_state and function.cf_state.is_single:
# we know the value of the variable
# => see if it's usable instead
return self._delegate_to_assigned_value(
node, function, arg_list, kwargs)
+ if arg_list and entry.is_cmethod and entry.scope and entry.scope.parent_type.is_builtin_type:
+ if entry.scope.parent_type is arg_list[0].type:
+ # Optimised (unbound) method of a builtin type => try to "de-optimise".
+ return self._dispatch_to_method_handler(
+ entry.name, self_arg=None, is_unbound_method=True,
+ type_name=entry.scope.parent_type.name,
+ node=node, function=function, arg_list=arg_list, kwargs=kwargs)
return node
function_handler = self._find_handler(
"function_%s" % function.name, kwargs)
@@ -615,8 +623,7 @@ class MethodDispatcherTransform(EnvTransform):
obj_type = self_arg.type
is_unbound_method = False
if obj_type.is_builtin_type:
- if (obj_type is Builtin.type_type and self_arg.is_name and
- arg_list and arg_list[0].type.is_pyobject):
+ if obj_type is Builtin.type_type and self_arg.is_name and arg_list and arg_list[0].type.is_pyobject:
# calling an unbound method like 'list.append(L,x)'
# (ignoring 'type.mro()' here ...)
type_name = self_arg.name
diff --git a/Cython/Coverage.py b/Cython/Coverage.py
index 6fa0fe4..6b9c473 100644
--- a/Cython/Coverage.py
+++ b/Cython/Coverage.py
@@ -100,6 +100,8 @@ class Plugin(CoveragePlugin):
if not c_file:
return None # unknown file
rel_file_path, code = self._parse_lines(c_file, filename)
+ if code is None:
+ return None # no source found
return CythonModuleReporter(c_file, filename, rel_file_path, code)
def _find_source_files(self, filename):
diff --git a/Cython/Debugger/libcython.py b/Cython/Debugger/libcython.py
index ff6d58e..42546ef 100644
--- a/Cython/Debugger/libcython.py
+++ b/Cython/Debugger/libcython.py
@@ -488,7 +488,7 @@ class SourceFileDescriptor(object):
class CyGDBError(gdb.GdbError):
"""
- Base class for Cython-command related erorrs
+ Base class for Cython-command related errors
"""
def __init__(self, *args):
diff --git a/Cython/Debugger/libpython.py b/Cython/Debugger/libpython.py
index 26f3f69..dc3e44a 100644
--- a/Cython/Debugger/libpython.py
+++ b/Cython/Debugger/libpython.py
@@ -1941,7 +1941,6 @@ PyLocals()
##################################################################
import re
-import atexit
import warnings
import tempfile
import textwrap
@@ -2023,14 +2022,13 @@ class _LoggingState(object):
"""
def __init__(self):
- self.fd, self.filename = tempfile.mkstemp()
- self.file = os.fdopen(self.fd, 'r+')
+ f = tempfile.NamedTemporaryFile('r+')
+ self.file = f
+ self.filename = f.name
+ self.fd = f.fileno()
_execute("set logging file %s" % self.filename)
self.file_position_stack = []
- atexit.register(os.close, self.fd)
- atexit.register(os.remove, self.filename)
-
def __enter__(self):
if not self.file_position_stack:
_execute("set logging redirect on")
@@ -2596,7 +2594,7 @@ class PythonCodeExecutor(object):
inferior.
Of course, executing any code in the inferior may be dangerous and may
- leave the debuggee in an unsafe state or terminate it alltogether.
+ leave the debuggee in an unsafe state or terminate it altogether.
"""
if '\0' in code:
raise gdb.GdbError("String contains NUL byte.")
diff --git a/Cython/Distutils/build_ext.py b/Cython/Distutils/build_ext.py
index 0f3ef63..598bb4a 100644
--- a/Cython/Distutils/build_ext.py
+++ b/Cython/Distutils/build_ext.py
@@ -14,9 +14,11 @@ else:
class new_build_ext(_build_ext, object):
def finalize_options(self):
if self.distribution.ext_modules:
+ nthreads = getattr(self, 'parallel', None) # -j option in Py3.5+
+ nthreads = int(nthreads) if nthreads else None
from Cython.Build.Dependencies import cythonize
self.distribution.ext_modules[:] = cythonize(
- self.distribution.ext_modules)
+ self.distribution.ext_modules, nthreads=nthreads, force=self.force)
super(new_build_ext, self).finalize_options()
# This will become new_build_ext in the future.
diff --git a/Cython/Includes/Deprecated/python2.5.pxd b/Cython/Includes/Deprecated/python2.5.pxd
index f9a25c6..0a5036c 100644
--- a/Cython/Includes/Deprecated/python2.5.pxd
+++ b/Cython/Includes/Deprecated/python2.5.pxd
@@ -213,7 +213,7 @@ cdef extern from "Python.h":
object PyList_AsTuple (object)
int PyList_Check (object) # Always succeeds.
int PyList_CheckExact (object) # Always succeeds.
- int PyList_GET_SIZE (object) # Always suceeds.
+ int PyList_GET_SIZE (object) # Always succeeds.
object PyList_GetSlice (object, Py_ssize_t, Py_ssize_t)
int PyList_Insert (object, Py_ssize_t, object) except -1
object PyList_New (Py_ssize_t)
diff --git a/Cython/Includes/cpython/__init__.pxd b/Cython/Includes/cpython/__init__.pxd
index 27c1160..c81f4e6 100644
--- a/Cython/Includes/cpython/__init__.pxd
+++ b/Cython/Includes/cpython/__init__.pxd
@@ -10,13 +10,13 @@
# Read http://docs.python.org/api/refcounts.html which is so
# important I've copied it below.
#
-# For all the declaration below, whenver the Py_ function returns
+# For all the declaration below, whenever the Py_ function returns
# a *new reference* to a PyObject*, the return type is "object".
# When the function returns a borrowed reference, the return
# type is PyObject*. When Cython sees "object" as a return type
# it doesn't increment the reference count. When it sees PyObject*
# in order to use the result you must explicitly cast to ,
-# and when you do that Cython increments the reference count wether
+# and when you do that Cython increments the reference count whether
# you want it to or not, forcing you to an explicit DECREF (or leak memory).
# To avoid this we make the above convention. Note, you can
# always locally override this convention by putting something like
diff --git a/Cython/Includes/cpython/array.pxd b/Cython/Includes/cpython/array.pxd
index 20d73b4..d985e37 100644
--- a/Cython/Includes/cpython/array.pxd
+++ b/Cython/Includes/cpython/array.pxd
@@ -92,7 +92,7 @@ cdef extern from *: # Hard-coded utility code hack.
def __getbuffer__(self, Py_buffer* info, int flags):
# This implementation of getbuffer is geared towards Cython
- # requirements, and does not yet fullfill the PEP.
+ # requirements, and does not yet fulfill the PEP.
# In particular strided access is always provided regardless
# of flags
item_count = Py_SIZE(self)
@@ -143,7 +143,7 @@ cdef inline array copy(array self):
return op
cdef inline int extend_buffer(array self, char* stuff, Py_ssize_t n) except -1:
- """ efficent appending of new stuff of same type
+ """ efficient appending of new stuff of same type
(e.g. of same array type)
n: number of elements (not number of bytes!) """
cdef Py_ssize_t itemsize = self.ob_descr.itemsize
diff --git a/Cython/Includes/cpython/object.pxd b/Cython/Includes/cpython/object.pxd
index 093a43e..2d8deca 100644
--- a/Cython/Includes/cpython/object.pxd
+++ b/Cython/Includes/cpython/object.pxd
@@ -10,26 +10,27 @@ cdef extern from "Python.h":
ctypedef object (*unaryfunc)(object)
ctypedef object (*binaryfunc)(object, object)
ctypedef object (*ternaryfunc)(object, object, object)
- ctypedef int (*inquiry)(object)
- ctypedef Py_ssize_t (*lenfunc)(object)
+ ctypedef int (*inquiry)(object) except -1
+ ctypedef Py_ssize_t (*lenfunc)(object) except -1
ctypedef object (*ssizeargfunc)(object, Py_ssize_t)
ctypedef object (*ssizessizeargfunc)(object, Py_ssize_t, Py_ssize_t)
- ctypedef int (*ssizeobjargproc)(object, Py_ssize_t, object)
- ctypedef int (*ssizessizeobjargproc)(object, Py_ssize_t, Py_ssize_t, object)
- ctypedef int (*objobjargproc)(object, object, object)
- ctypedef int (*objobjproc)(object, object)
+ ctypedef int (*ssizeobjargproc)(object, Py_ssize_t, object) except -1
+ ctypedef int (*ssizessizeobjargproc)(object, Py_ssize_t, Py_ssize_t, object) except -1
+ ctypedef int (*objobjargproc)(object, object, object) except -1
+ ctypedef int (*objobjproc)(object, object) except -1
- ctypedef Py_hash_t (*hashfunc)(object)
+ ctypedef Py_hash_t (*hashfunc)(object) except -1
ctypedef object (*reprfunc)(object)
- ctypedef int (*cmpfunc)(object, object)
+ ctypedef int (*cmpfunc)(object, object) except -2
ctypedef object (*richcmpfunc)(object, object, int)
# The following functions use 'PyObject*' as first argument instead of 'object' to prevent
# accidental reference counting when calling them during a garbage collection run.
ctypedef void (*destructor)(PyObject*)
- ctypedef int (*visitproc)(PyObject*, void *)
- ctypedef int (*traverseproc)(PyObject*, visitproc, void*)
+ ctypedef int (*visitproc)(PyObject*, void *) except -1
+ ctypedef int (*traverseproc)(PyObject*, visitproc, void*) except -1
+ ctypedef void (*freefunc)(void*)
ctypedef object (*descrgetfunc)(object, object, object)
ctypedef int (*descrsetfunc)(object, object, object) except -1
@@ -46,6 +47,7 @@ cdef extern from "Python.h":
destructor tp_dealloc
traverseproc tp_traverse
inquiry tp_clear
+ freefunc tp_free
ternaryfunc tp_call
hashfunc tp_hash
diff --git a/Cython/Includes/cpython/pythread.pxd b/Cython/Includes/cpython/pythread.pxd
index e6c6395..392bef7 100644
--- a/Cython/Includes/cpython/pythread.pxd
+++ b/Cython/Includes/cpython/pythread.pxd
@@ -6,9 +6,11 @@ cdef extern from "pythread.h":
ctypedef void *PyThread_type_sema
void PyThread_init_thread()
- long PyThread_start_new_thread(void (*)(void *), void *)
+ long PyThread_start_new_thread(void (*)(void *), void *) # FIXME: legacy
+ #unsigned long PyThread_start_new_thread(void (*)(void *), void *) # returned 'long' before Py3.7
void PyThread_exit_thread()
- long PyThread_get_thread_ident()
+ long PyThread_get_thread_ident() # FIXME: legacy
+ #unsigned long PyThread_get_thread_ident() # returned 'long' before Py3.7
PyThread_type_lock PyThread_allocate_lock()
void PyThread_free_lock(PyThread_type_lock)
@@ -29,7 +31,7 @@ cdef extern from "pythread.h":
size_t PyThread_get_stacksize()
int PyThread_set_stacksize(size_t)
- # Thread Local Storage (TLS) API
+ # Thread Local Storage (TLS) API deprecated in CPython 3.7+
int PyThread_create_key()
void PyThread_delete_key(int)
int PyThread_set_key_value(int, void *)
@@ -38,3 +40,14 @@ cdef extern from "pythread.h":
# Cleanup after a fork
void PyThread_ReInitTLS()
+
+ # Thread Specific Storage (TSS) API in CPython 3.7+ (also backported)
+ #ctypedef struct Py_tss_t: pass # Cython built-in type
+ Py_tss_t Py_tss_NEEDS_INIT # Not normally useful: Cython auto-initialises declared "Py_tss_t" variables.
+ Py_tss_t * PyThread_tss_alloc()
+ void PyThread_tss_free(Py_tss_t *key)
+ int PyThread_tss_is_created(Py_tss_t *key)
+ int PyThread_tss_create(Py_tss_t *key)
+ void PyThread_tss_delete(Py_tss_t *key)
+ int PyThread_tss_set(Py_tss_t *key, void *value)
+ void * PyThread_tss_get(Py_tss_t *key)
diff --git a/Cython/Includes/libc/limits.pxd b/Cython/Includes/libc/limits.pxd
index 18ec0fd..39d10a1 100644
--- a/Cython/Includes/libc/limits.pxd
+++ b/Cython/Includes/libc/limits.pxd
@@ -1,29 +1,28 @@
# 5.2.4.2.1 Sizes of integer types
cdef extern from "":
+ const int CHAR_BIT
+ const int MB_LEN_MAX
- enum: CHAR_BIT
- enum: MB_LEN_MAX
+ const char CHAR_MIN
+ const char CHAR_MAX
- enum: CHAR_MIN
- enum: CHAR_MAX
+ const signed char SCHAR_MIN
+ const signed char SCHAR_MAX
+ const unsigned char UCHAR_MAX
- enum: SCHAR_MIN
- enum: SCHAR_MAX
- enum: UCHAR_MAX
+ const short SHRT_MIN
+ const short SHRT_MAX
+ const unsigned short USHRT_MAX
- enum: SHRT_MIN
- enum: SHRT_MAX
- enum: USHRT_MAX
+ const int INT_MIN
+ const int INT_MAX
+ const unsigned int UINT_MAX
- enum: INT_MIN
- enum: INT_MAX
- enum: UINT_MAX
+ const long LONG_MIN
+ const long LONG_MAX
+ const unsigned long ULONG_MAX
- enum: LONG_MIN
- enum: LONG_MAX
- enum: ULONG_MAX
-
- enum: LLONG_MIN
- enum: LLONG_MAX
- enum: ULLONG_MAX
+ const long long LLONG_MIN
+ const long long LLONG_MAX
+ const unsigned long long ULLONG_MAX
diff --git a/Cython/Includes/libc/signal.pxd b/Cython/Includes/libc/signal.pxd
index 1122d20..5d34935 100644
--- a/Cython/Includes/libc/signal.pxd
+++ b/Cython/Includes/libc/signal.pxd
@@ -6,13 +6,6 @@ cdef extern from "" nogil:
ctypedef int sig_atomic_t
- enum: SIGABRT
- enum: SIGFPE
- enum: SIGILL
- enum: SIGINT
- enum: SIGSEGV
- enum: SIGTERM
-
sighandler_t SIG_DFL
sighandler_t SIG_IGN
sighandler_t SIG_ERR
@@ -20,49 +13,52 @@ cdef extern from "" nogil:
sighandler_t signal (int signum, sighandler_t action)
int raise_"raise" (int signum)
-
-cdef extern from "" nogil:
-
- # Program Error
- enum: SIGFPE
- enum: SIGILL
- enum: SIGSEGV
- enum: SIGBUS
- enum: SIGABRT
- enum: SIGIOT
- enum: SIGTRAP
- enum: SIGEMT
- enum: SIGSYS
- # Termination
- enum: SIGTERM
- enum: SIGINT
- enum: SIGQUIT
- enum: SIGKILL
- enum: SIGHUP
- # Alarm
- enum: SIGALRM
- enum: SIGVTALRM
- enum: SIGPROF
- # Asynchronous I/O
- enum: SIGIO
- enum: SIGURG
- enum: SIGPOLL
- # Job Control
- enum: SIGCHLD
- enum: SIGCLD
- enum: SIGCONT
- enum: SIGSTOP
- enum: SIGTSTP
- enum: SIGTTIN
- enum: SIGTTOU
- # Operation Error
- enum: SIGPIPE
- enum: SIGLOST
- enum: SIGXCPU
- enum: SIGXFSZ
- # Miscellaneous
- enum: SIGUSR1
- enum: SIGUSR2
- enum: SIGWINCH
- enum: SIGINFO
-
+ # Signals
+ enum:
+ # Program Error
+ SIGFPE
+ SIGILL
+ SIGSEGV
+ SIGBUS
+ SIGABRT
+ SIGIOT
+ SIGTRAP
+ SIGEMT
+ SIGSYS
+ SIGSTKFLT
+ # Termination
+ SIGTERM
+ SIGINT
+ SIGQUIT
+ SIGKILL
+ SIGHUP
+ # Alarm
+ SIGALRM
+ SIGVTALRM
+ SIGPROF
+ # Asynchronous I/O
+ SIGIO
+ SIGURG
+ SIGPOLL
+ # Job Control
+ SIGCHLD
+ SIGCLD
+ SIGCONT
+ SIGSTOP
+ SIGTSTP
+ SIGTTIN
+ SIGTTOU
+ # Operation Error
+ SIGPIPE
+ SIGLOST
+ SIGXCPU
+ SIGXFSZ
+ SIGPWR
+ # Miscellaneous
+ SIGUSR1
+ SIGUSR2
+ SIGWINCH
+ SIGINFO
+ # Real-time signals
+ SIGRTMIN
+ SIGRTMAX
diff --git a/Cython/Includes/libcpp/deque.pxd b/Cython/Includes/libcpp/deque.pxd
index c36bef3..78862e5 100644
--- a/Cython/Includes/libcpp/deque.pxd
+++ b/Cython/Includes/libcpp/deque.pxd
@@ -1,21 +1,44 @@
cdef extern from "" namespace "std" nogil:
cdef cppclass deque[T,ALLOCATOR=*]:
+ ctypedef T value_type
+ ctypedef ALLOCATOR allocator_type
+
+ # these should really be allocator_type.size_type and
+ # allocator_type.difference_type to be true to the C++ definition
+ # but cython doesn't support defered access on template arguments
+ ctypedef size_t size_type
+ ctypedef ptrdiff_t difference_type
+
cppclass iterator:
T& operator*()
iterator operator++()
iterator operator--()
+ iterator operator+(size_type)
+ iterator operator-(size_type)
+ difference_type operator-(iterator)
bint operator==(iterator)
bint operator!=(iterator)
+ bint operator<(iterator)
+ bint operator>(iterator)
+ bint operator<=(iterator)
+ bint operator>=(iterator)
cppclass reverse_iterator:
T& operator*()
- iterator operator++()
- iterator operator--()
+ reverse_iterator operator++()
+ reverse_iterator operator--()
+ reverse_iterator operator+(size_type)
+ reverse_iterator operator-(size_type)
+ difference_type operator-(reverse_iterator)
bint operator==(reverse_iterator)
bint operator!=(reverse_iterator)
+ bint operator<(reverse_iterator)
+ bint operator>(reverse_iterator)
+ bint operator<=(reverse_iterator)
+ bint operator>=(reverse_iterator)
cppclass const_iterator(iterator):
pass
- #cppclass const_reverse_iterator(reverse_iterator):
- # pass
+ cppclass const_reverse_iterator(reverse_iterator):
+ pass
deque() except +
deque(deque&) except +
deque(size_t) except +
diff --git a/Cython/Includes/libcpp/string.pxd b/Cython/Includes/libcpp/string.pxd
index 07b206b..1077cd1 100644
--- a/Cython/Includes/libcpp/string.pxd
+++ b/Cython/Includes/libcpp/string.pxd
@@ -9,9 +9,9 @@ cdef extern from "" namespace "std" nogil:
cdef cppclass string:
string() except +
- string(char *) except +
- string(char *, size_t) except +
- string(string&) except +
+ string(const char *) except +
+ string(const char *, size_t) except +
+ string(const string&) except +
# as a string formed by a repetition of character c, n times.
string(size_t, char) except +
@@ -63,65 +63,65 @@ cdef extern from "" namespace "std" nogil:
char& at(size_t)
char& operator[](size_t)
- int compare(string&)
+ int compare(const string&)
- string& append(string&)
- string& append(string&, size_t, size_t)
- string& append(char *)
- string& append(char *, size_t)
+ string& append(const string&)
+ string& append(const string&, size_t, size_t)
+ string& append(const char *)
+ string& append(const char *, size_t)
string& append(size_t, char)
void push_back(char c)
- string& assign (string&)
- string& assign (string&, size_t, size_t)
- string& assign (char *, size_t)
- string& assign (char *)
+ string& assign (const string&)
+ string& assign (const string&, size_t, size_t)
+ string& assign (const char *, size_t)
+ string& assign (const char *)
string& assign (size_t n, char c)
- string& insert(size_t, string&)
- string& insert(size_t, string&, size_t, size_t)
- string& insert(size_t, char* s, size_t)
+ string& insert(size_t, const string&)
+ string& insert(size_t, const string&, size_t, size_t)
+ string& insert(size_t, const char* s, size_t)
- string& insert(size_t, char* s)
+ string& insert(size_t, const char* s)
string& insert(size_t, size_t, char c)
size_t copy(char *, size_t, size_t)
- size_t find(string&)
- size_t find(string&, size_t)
- size_t find(char*, size_t pos, size_t)
- size_t find(char*, size_t pos)
+ size_t find(const string&)
+ size_t find(const string&, size_t)
+ size_t find(const char*, size_t pos, size_t)
+ size_t find(const char*, size_t pos)
size_t find(char, size_t pos)
- size_t rfind(string&, size_t)
- size_t rfind(char* s, size_t, size_t)
- size_t rfind(char*, size_t pos)
+ size_t rfind(const string&, size_t)
+ size_t rfind(const char* s, size_t, size_t)
+ size_t rfind(const char*, size_t pos)
size_t rfind(char c, size_t)
size_t rfind(char c)
- size_t find_first_of(string&, size_t)
- size_t find_first_of(char* s, size_t, size_t)
- size_t find_first_of(char*, size_t pos)
+ size_t find_first_of(const string&, size_t)
+ size_t find_first_of(const char* s, size_t, size_t)
+ size_t find_first_of(const char*, size_t pos)
size_t find_first_of(char c, size_t)
size_t find_first_of(char c)
- size_t find_first_not_of(string&, size_t)
- size_t find_first_not_of(char* s, size_t, size_t)
- size_t find_first_not_of(char*, size_t pos)
+ size_t find_first_not_of(const string&, size_t)
+ size_t find_first_not_of(const char* s, size_t, size_t)
+ size_t find_first_not_of(const char*, size_t pos)
size_t find_first_not_of(char c, size_t)
size_t find_first_not_of(char c)
- size_t find_last_of(string&, size_t)
- size_t find_last_of(char* s, size_t, size_t)
- size_t find_last_of(char*, size_t pos)
+ size_t find_last_of(const string&, size_t)
+ size_t find_last_of(const char* s, size_t, size_t)
+ size_t find_last_of(const char*, size_t pos)
size_t find_last_of(char c, size_t)
size_t find_last_of(char c)
- size_t find_last_not_of(string&, size_t)
- size_t find_last_not_of(char* s, size_t, size_t)
- size_t find_last_not_of(char*, size_t pos)
+ size_t find_last_not_of(const string&, size_t)
+ size_t find_last_not_of(const char* s, size_t, size_t)
+ size_t find_last_not_of(const char*, size_t pos)
string substr(size_t, size_t)
string substr()
@@ -130,27 +130,27 @@ cdef extern from "" namespace "std" nogil:
size_t find_last_not_of(char c, size_t)
size_t find_last_not_of(char c)
- #string& operator= (string&)
- #string& operator= (char*)
+ #string& operator= (const string&)
+ #string& operator= (const char*)
#string& operator= (char)
- string operator+ (string& rhs)
- string operator+ (char* rhs)
+ string operator+ (const string& rhs)
+ string operator+ (const char* rhs)
- bint operator==(string&)
- bint operator==(char*)
+ bint operator==(const string&)
+ bint operator==(const char*)
- bint operator!= (string& rhs )
- bint operator!= (char* )
+ bint operator!= (const string& rhs )
+ bint operator!= (const char* )
- bint operator< (string&)
- bint operator< (char*)
+ bint operator< (const string&)
+ bint operator< (const char*)
- bint operator> (string&)
- bint operator> (char*)
+ bint operator> (const string&)
+ bint operator> (const char*)
- bint operator<= (string&)
- bint operator<= (char*)
+ bint operator<= (const string&)
+ bint operator<= (const char*)
- bint operator>= (string&)
- bint operator>= (char*)
+ bint operator>= (const string&)
+ bint operator>= (const char*)
diff --git a/Cython/Includes/libcpp/vector.pxd b/Cython/Includes/libcpp/vector.pxd
index caa8020..4f21dd3 100644
--- a/Cython/Includes/libcpp/vector.pxd
+++ b/Cython/Includes/libcpp/vector.pxd
@@ -24,10 +24,11 @@ cdef extern from "" namespace "std" nogil:
bint operator>=(iterator)
cppclass reverse_iterator:
T& operator*()
- iterator operator++()
- iterator operator--()
- iterator operator+(size_type)
- iterator operator-(size_type)
+ reverse_iterator operator++()
+ reverse_iterator operator--()
+ reverse_iterator operator+(size_type)
+ reverse_iterator operator-(size_type)
+ difference_type operator-(reverse_iterator)
bint operator==(reverse_iterator)
bint operator!=(reverse_iterator)
bint operator<(reverse_iterator)
diff --git a/Cython/Includes/numpy/__init__.pxd b/Cython/Includes/numpy/__init__.pxd
index 4791684..3e613e2 100644
--- a/Cython/Includes/numpy/__init__.pxd
+++ b/Cython/Includes/numpy/__init__.pxd
@@ -90,6 +90,7 @@ cdef extern from "numpy/arrayobject.h":
NPY_ANYORDER
NPY_CORDER
NPY_FORTRANORDER
+ NPY_KEEPORDER
ctypedef enum NPY_CLIPMODE:
NPY_CLIP
@@ -213,23 +214,16 @@ cdef extern from "numpy/arrayobject.h":
# -- the details of this may change.
def __getbuffer__(ndarray self, Py_buffer* info, int flags):
# This implementation of getbuffer is geared towards Cython
- # requirements, and does not yet fullfill the PEP.
+ # requirements, and does not yet fulfill the PEP.
# In particular strided access is always provided regardless
# of flags
- if info == NULL: return
-
- cdef int copy_shape, i, ndim
+ cdef int i, ndim
cdef int endian_detector = 1
cdef bint little_endian = ((&endian_detector)[0] != 0)
ndim = PyArray_NDIM(self)
- if sizeof(npy_intp) != sizeof(Py_ssize_t):
- copy_shape = 1
- else:
- copy_shape = 0
-
if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
raise ValueError(u"ndarray is not C contiguous")
@@ -240,7 +234,7 @@ cdef extern from "numpy/arrayobject.h":
info.buf = PyArray_DATA(self)
info.ndim = ndim
- if copy_shape:
+ if sizeof(npy_intp) != sizeof(Py_ssize_t):
# Allocate new buffer for strides and shape info.
# This is allocated as one block, strides first.
info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim)
@@ -260,16 +254,9 @@ cdef extern from "numpy/arrayobject.h":
cdef dtype descr = self.descr
cdef int offset
- cdef bint hasfields = PyDataType_HASFIELDS(descr)
-
- if not hasfields and not copy_shape:
- # do not call releasebuffer
- info.obj = None
- else:
- # need to call releasebuffer
- info.obj = self
+ info.obj = self
- if not hasfields:
+ if not PyDataType_HASFIELDS(descr):
t = descr.type_num
if ((descr.byteorder == c'>' and little_endian) or
(descr.byteorder == c'<' and not little_endian)):
diff --git a/Cython/Includes/posix/signal.pxd b/Cython/Includes/posix/signal.pxd
index ed00730..e503ef4 100644
--- a/Cython/Includes/posix/signal.pxd
+++ b/Cython/Includes/posix/signal.pxd
@@ -31,6 +31,11 @@ cdef extern from "" nogil:
sigset_t sa_mask
int sa_flags
+ ctypedef struct stack_t:
+ void *ss_sp
+ int ss_flags
+ size_t ss_size
+
enum: SA_NOCLDSTOP
enum: SIG_BLOCK
enum: SIG_UNBLOCK
@@ -64,3 +69,5 @@ cdef extern from "" nogil:
int sigemptyset (sigset_t *)
int sigfillset (sigset_t *)
int sigismember (const sigset_t *)
+
+ int sigaltstack(const stack_t *, stack_t *)
diff --git a/Cython/Includes/posix/time.pxd b/Cython/Includes/posix/time.pxd
index 6897823..6bc81bf 100644
--- a/Cython/Includes/posix/time.pxd
+++ b/Cython/Includes/posix/time.pxd
@@ -4,9 +4,6 @@ from posix.types cimport suseconds_t, time_t, clockid_t, timer_t
from posix.signal cimport sigevent
cdef extern from "" nogil:
- enum: CLOCK_PROCESS_CPUTIME_ID
- enum: CLOCK_THREAD_CPUTIME_ID
-
enum: CLOCK_REALTIME
enum: TIMER_ABSTIME
enum: CLOCK_MONOTONIC
diff --git a/Cython/Parser/Grammar b/Cython/Parser/Grammar
index ebfa9c8..214e36d 100644
--- a/Cython/Parser/Grammar
+++ b/Cython/Parser/Grammar
@@ -127,7 +127,7 @@ arglist: argument (',' argument)* [',']
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
# we explicitly match '*' here, too, to give it proper precedence.
# Illegal combinations and orderings are blocked in ast.c:
-# multiple (test comp_for) arguements are blocked; keyword unpackings
+# multiple (test comp_for) arguments are blocked; keyword unpackings
# that precede iterable unpackings are blocked; etc.
argument: ( test [comp_for] |
test '=' test |
diff --git a/Cython/Shadow.py b/Cython/Shadow.py
index c25fc49..35a13e6 100644
--- a/Cython/Shadow.py
+++ b/Cython/Shadow.py
@@ -1,7 +1,7 @@
# cython.* namespace for pure mode.
from __future__ import absolute_import
-__version__ = "0.27.3"
+__version__ = "0.28"
try:
from __builtin__ import basestring
@@ -109,7 +109,7 @@ cclass = ccall = cfunc = _EmptyDecoratorAndManager()
returns = wraparound = boundscheck = initializedcheck = nonecheck = \
overflowcheck = embedsignature = cdivision = cdivision_warnings = \
- always_allows_keywords = profile = linetrace = infer_type = \
+ always_allows_keywords = profile = linetrace = infer_types = \
unraisable_tracebacks = freelist = \
lambda _: _EmptyDecoratorAndManager()
@@ -385,7 +385,7 @@ py_complex = typedef(complex, "double complex")
int_types = ['char', 'short', 'Py_UNICODE', 'int', 'Py_UCS4', 'long', 'longlong', 'Py_ssize_t', 'size_t']
float_types = ['longdouble', 'double', 'float']
complex_types = ['longdoublecomplex', 'doublecomplex', 'floatcomplex', 'complex']
-other_types = ['bint', 'void']
+other_types = ['bint', 'void', 'Py_tss_t']
to_repr = {
'longlong': 'long long',
@@ -420,13 +420,13 @@ for name in complex_types:
gs[name] = typedef(py_complex, to_repr(name, name))
bint = typedef(bool, "bint")
-void = typedef(int, "void")
+void = typedef(None, "void")
+Py_tss_t = typedef(None, "Py_tss_t")
for t in int_types + float_types + complex_types + other_types:
for i in range(1, 4):
gs["%s_%s" % ('p'*i, t)] = gs[t]._pointer(i)
-void = typedef(None, "void")
NULL = gs['p_void'](0)
# looks like 'gs' has some users out there by now...
diff --git a/Cython/StringIOTree.pxd b/Cython/StringIOTree.pxd
new file mode 100644
index 0000000..20455c9
--- /dev/null
+++ b/Cython/StringIOTree.pxd
@@ -0,0 +1,17 @@
+cimport cython
+
+cdef class StringIOTree:
+ cdef public list prepended_children
+ cdef public object stream
+ cdef public object write
+ cdef public list markers
+
+ @cython.locals(x=StringIOTree)
+ cpdef getvalue(self)
+ @cython.locals(child=StringIOTree)
+ cpdef copyto(self, target)
+ cpdef commit(self)
+ #def insert(self, iotree)
+ #def insertion_point(self)
+ @cython.locals(c=StringIOTree)
+ cpdef allmarkers(self)
diff --git a/Cython/StringIOTree.py b/Cython/StringIOTree.py
index 2429702..b406319 100644
--- a/Cython/StringIOTree.py
+++ b/Cython/StringIOTree.py
@@ -1,7 +1,42 @@
+r"""
+Implements a buffer with insertion points. When you know you need to
+"get back" to a place and write more later, simply call insertion_point()
+at that spot and get a new StringIOTree object that is "left behind".
+
+EXAMPLE:
+
+>>> a = StringIOTree()
+>>> _= a.write('first\n')
+>>> b = a.insertion_point()
+>>> _= a.write('third\n')
+>>> _= b.write('second\n')
+>>> a.getvalue().split()
+['first', 'second', 'third']
+
+>>> c = b.insertion_point()
+>>> d = c.insertion_point()
+>>> _= d.write('alpha\n')
+>>> _= b.write('gamma\n')
+>>> _= c.write('beta\n')
+>>> b.getvalue().split()
+['second', 'alpha', 'beta', 'gamma']
+
+>>> i = StringIOTree()
+>>> d.insert(i)
+>>> _= i.write('inserted\n')
+>>> out = StringIO()
+>>> a.copyto(out)
+>>> out.getvalue().split()
+['first', 'second', 'alpha', 'inserted', 'beta', 'gamma', 'third']
+"""
+
+from __future__ import absolute_import #, unicode_literals
+
try:
+ # Prefer cStringIO since io.StringIO() does not support writing 'str' in Py2.
from cStringIO import StringIO
except ImportError:
- from io import StringIO # does not support writing 'str' in Py2
+ from io import StringIO
class StringIOTree(object):
@@ -69,35 +104,3 @@ class StringIOTree(object):
def allmarkers(self):
children = self.prepended_children
return [m for c in children for m in c.allmarkers()] + self.markers
-
-
-__doc__ = r"""
-Implements a buffer with insertion points. When you know you need to
-"get back" to a place and write more later, simply call insertion_point()
-at that spot and get a new StringIOTree object that is "left behind".
-
-EXAMPLE:
-
->>> a = StringIOTree()
->>> _= a.write('first\n')
->>> b = a.insertion_point()
->>> _= a.write('third\n')
->>> _= b.write('second\n')
->>> a.getvalue().split()
-['first', 'second', 'third']
-
->>> c = b.insertion_point()
->>> d = c.insertion_point()
->>> _= d.write('alpha\n')
->>> _= b.write('gamma\n')
->>> _= c.write('beta\n')
->>> b.getvalue().split()
-['second', 'alpha', 'beta', 'gamma']
->>> i = StringIOTree()
->>> d.insert(i)
->>> _= i.write('inserted\n')
->>> out = StringIO()
->>> a.copyto(out)
->>> out.getvalue().split()
-['first', 'second', 'alpha', 'inserted', 'beta', 'gamma', 'third']
-"""
diff --git a/Cython/Tests/TestCodeWriter.py b/Cython/Tests/TestCodeWriter.py
index 6f9b547..42e457d 100644
--- a/Cython/Tests/TestCodeWriter.py
+++ b/Cython/Tests/TestCodeWriter.py
@@ -4,7 +4,7 @@ class TestCodeWriter(CythonTest):
# CythonTest uses the CodeWriter heavily, so do some checking by
# roundtripping Cython code through the test framework.
- # Note that this test is dependant upon the normal Cython parser
+ # Note that this test is dependent upon the normal Cython parser
# to generate the input trees to the CodeWriter. This save *a lot*
# of time; better to spend that time writing other tests than perfecting
# this one...
diff --git a/Cython/Utility/AsyncGen.c b/Cython/Utility/AsyncGen.c
index ea82be7..0750fdf 100644
--- a/Cython/Utility/AsyncGen.c
+++ b/Cython/Utility/AsyncGen.c
@@ -34,7 +34,7 @@ static PyObject *__Pyx__PyAsyncGenValueWrapperNew(PyObject *val);
static __pyx_CoroutineObject *__Pyx_AsyncGen_New(
- __pyx_coroutine_body_t body, PyObject *closure,
+ __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name) {
__pyx_PyAsyncGenObject *gen = PyObject_GC_New(__pyx_PyAsyncGenObject, __pyx_AsyncGenType);
if (unlikely(!gen))
@@ -42,7 +42,7 @@ static __pyx_CoroutineObject *__Pyx_AsyncGen_New(
gen->ag_finalizer = NULL;
gen->ag_closed = 0;
gen->ag_hooks_inited = 0;
- return __Pyx__Coroutine_NewInit((__pyx_CoroutineObject*)gen, body, closure, name, qualname, module_name);
+ return __Pyx__Coroutine_NewInit((__pyx_CoroutineObject*)gen, body, code, closure, name, qualname, module_name);
}
static int __pyx_AsyncGen_init(void);
@@ -102,8 +102,9 @@ static int __Pyx_async_gen_init_hooks(__pyx_PyAsyncGenObject *o) {
//////////////////// AsyncGenerator ////////////////////
//@requires: AsyncGeneratorInitFinalizer
//@requires: Coroutine.c::Coroutine
+//@requires: Coroutine.c::ReturnWithStopIteration
//@requires: ObjectHandling.c::PyObjectCallMethod1
-
+//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict
PyDoc_STRVAR(__Pyx_async_gen_send_doc,
"send(arg) -> send 'arg' into generator,\n\
@@ -279,19 +280,19 @@ __Pyx_async_gen_athrow(__pyx_PyAsyncGenObject *o, PyObject *args)
static PyGetSetDef __Pyx_async_gen_getsetlist[] = {
- {"__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name,
- PyDoc_STR("name of the async generator"), 0},
- {"__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
- PyDoc_STR("qualified name of the async generator"), 0},
- //REMOVED: {"ag_await", (getter)coro_get_cr_await, NULL,
- //REMOVED: PyDoc_STR("object being awaited on, or None")},
+ {(char*) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name,
+ (char*) PyDoc_STR("name of the async generator"), 0},
+ {(char*) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
+ (char*) PyDoc_STR("qualified name of the async generator"), 0},
+ //REMOVED: {(char*) "ag_await", (getter)coro_get_cr_await, NULL,
+ //REMOVED: (char*) PyDoc_STR("object being awaited on, or None")},
{0, 0, 0, 0, 0} /* Sentinel */
};
static PyMemberDef __Pyx_async_gen_memberlist[] = {
- //REMOVED: {"ag_frame", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_frame), READONLY},
- {"ag_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
- //REMOVED: {"ag_code", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_code), READONLY},
+ //REMOVED: {(char*) "ag_frame", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_frame), READONLY},
+ {(char*) "ag_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
+ //REMOVED: {(char*) "ag_code", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_code), READONLY},
//ADDED: "ag_await"
{(char*) "ag_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
(char*) PyDoc_STR("object being awaited on, or None")},
@@ -557,11 +558,13 @@ static PyMethodDef __Pyx_async_gen_asend_methods[] = {
};
+#if CYTHON_USE_ASYNC_SLOTS
static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_asend_as_async = {
PyObject_SelfIter, /* am_await */
0, /* am_aiter */
0 /* am_anext */
};
+#endif
static PyTypeObject __pyx__PyAsyncGenASendType_type = {
@@ -944,11 +947,13 @@ static PyMethodDef __Pyx_async_gen_athrow_methods[] = {
};
+#if CYTHON_USE_ASYNC_SLOTS
static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_athrow_as_async = {
PyObject_SelfIter, /* am_await */
0, /* am_aiter */
0 /* am_anext */
};
+#endif
static PyTypeObject __pyx__PyAsyncGenAThrowType_type = {
@@ -1036,10 +1041,10 @@ __Pyx_async_gen_athrow_new(__pyx_PyAsyncGenObject *gen, PyObject *args)
static int __pyx_AsyncGen_init(void) {
// on Windows, C-API functions can't be used in slots statically
- __pyx_AsyncGenType_type.tp_getattro = PyObject_GenericGetAttr;
- __pyx__PyAsyncGenWrappedValueType_type.tp_getattro = PyObject_GenericGetAttr;
- __pyx__PyAsyncGenAThrowType_type.tp_getattro = PyObject_GenericGetAttr;
- __pyx__PyAsyncGenASendType_type.tp_getattro = PyObject_GenericGetAttr;
+ __pyx_AsyncGenType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ __pyx__PyAsyncGenWrappedValueType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ __pyx__PyAsyncGenAThrowType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ __pyx__PyAsyncGenASendType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx_AsyncGenType = __Pyx_FetchCommonType(&__pyx_AsyncGenType_type);
if (unlikely(!__pyx_AsyncGenType))
diff --git a/Cython/Utility/Buffer.c b/Cython/Utility/Buffer.c
index 88b52a8..1aa4b8d 100644
--- a/Cython/Utility/Buffer.c
+++ b/Cython/Utility/Buffer.c
@@ -751,7 +751,7 @@ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const cha
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
- /* fall through */
+ CYTHON_FALLTHROUGH;
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
@@ -765,7 +765,7 @@ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const cha
++ts;
break;
}
- /* fall through */
+ CYTHON_FALLTHROUGH;
case 's':
/* 's' or new type (cannot be added to current pool) */
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
diff --git a/Cython/Utility/Builtins.c b/Cython/Utility/Builtins.c
index af8355c..57f2a4f 100644
--- a/Cython/Utility/Builtins.c
+++ b/Cython/Utility/Builtins.c
@@ -109,7 +109,7 @@ static PyObject* __Pyx_PyExec3(PyObject* o, PyObject* globals, PyObject* locals)
locals = globals;
}
- if (PyDict_GetItem(globals, PYIDENT("__builtins__")) == NULL) {
+ if (__Pyx_PyDict_GetItemStr(globals, PYIDENT("__builtins__")) == NULL) {
if (PyDict_SetItem(globals, PYIDENT("__builtins__"), PyEval_GetBuiltins()) < 0)
goto bad;
}
diff --git a/Cython/Utility/Coroutine.c b/Cython/Utility/Coroutine.c
index b8cd077..f795a76 100644
--- a/Cython/Utility/Coroutine.c
+++ b/Cython/Utility/Coroutine.c
@@ -15,7 +15,7 @@ static void __PyxPyIter_CheckErrorAndDecref(PyObject *source) {
static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) {
PyObject *source_gen, *retval;
#ifdef __Pyx_Coroutine_USED
- if (__Pyx_Coroutine_CheckExact(source)) {
+ if (__Pyx_Coroutine_Check(source)) {
// TODO: this should only happen for types.coroutine()ed generators, but we can't determine that here
Py_INCREF(source);
source_gen = source;
@@ -71,7 +71,7 @@ static PyObject* __Pyx__Coroutine_Yield_From_Generic(__pyx_CoroutineObject *gen,
return NULL;
}
// source_gen is now the iterator, make the first next() call
- if (__Pyx_Coroutine_CheckExact(source_gen)) {
+ if (__Pyx_Coroutine_Check(source_gen)) {
retval = __Pyx_Generator_Next(source_gen);
} else {
#if CYTHON_USE_TYPE_SLOTS
@@ -90,7 +90,7 @@ static PyObject* __Pyx__Coroutine_Yield_From_Generic(__pyx_CoroutineObject *gen,
static CYTHON_INLINE PyObject* __Pyx_Coroutine_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) {
PyObject *retval;
- if (__Pyx_Coroutine_CheckExact(source)) {
+ if (__Pyx_Coroutine_Check(source)) {
if (unlikely(((__pyx_CoroutineObject*)source)->yieldfrom)) {
PyErr_SetString(
PyExc_RuntimeError,
@@ -126,7 +126,7 @@ static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *o); /*proto*/
static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o) {
#ifdef __Pyx_Coroutine_USED
- if (__Pyx_Coroutine_CheckExact(o)) {
+ if (__Pyx_Coroutine_Check(o)) {
return __Pyx_NewRef(o);
}
#endif
@@ -219,7 +219,7 @@ static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *obj) {
} else {
int is_coroutine = 0;
#ifdef __Pyx_Coroutine_USED
- is_coroutine |= __Pyx_Coroutine_CheckExact(res);
+ is_coroutine |= __Pyx_Coroutine_Check(res);
#endif
#if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact)
is_coroutine |= PyCoro_CheckExact(res);
@@ -382,17 +382,18 @@ typedef struct {
PyObject *gi_name;
PyObject *gi_qualname;
PyObject *gi_modulename;
+ PyObject *gi_code;
int resume_label;
// using T_BOOL for property below requires char value
char is_running;
} __pyx_CoroutineObject;
static __pyx_CoroutineObject *__Pyx__Coroutine_New(
- PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *closure,
+ PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/
static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit(
- __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *closure,
+ __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/
static int __Pyx_Coroutine_clear(PyObject *self); /*proto*/
@@ -427,10 +428,12 @@ static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__pyx_CoroutineO
static PyTypeObject *__pyx_CoroutineType = 0;
static PyTypeObject *__pyx_CoroutineAwaitType = 0;
#define __Pyx_Coroutine_CheckExact(obj) (Py_TYPE(obj) == __pyx_CoroutineType)
+// __Pyx_Coroutine_Check(obj): see override for IterableCoroutine below
+#define __Pyx_Coroutine_Check(obj) __Pyx_Coroutine_CheckExact(obj)
#define __Pyx_CoroutineAwait_CheckExact(obj) (Py_TYPE(obj) == __pyx_CoroutineAwaitType)
-#define __Pyx_Coroutine_New(body, closure, name, qualname, module_name) \
- __Pyx__Coroutine_New(__pyx_CoroutineType, body, closure, name, qualname, module_name)
+#define __Pyx_Coroutine_New(body, code, closure, name, qualname, module_name) \
+ __Pyx__Coroutine_New(__pyx_CoroutineType, body, code, closure, name, qualname, module_name)
static int __pyx_Coroutine_init(void); /*proto*/
static PyObject *__Pyx__Coroutine_await(PyObject *coroutine); /*proto*/
@@ -450,8 +453,8 @@ static PyObject *__Pyx_CoroutineAwait_Throw(__pyx_CoroutineAwaitObject *self, Py
static PyTypeObject *__pyx_GeneratorType = 0;
#define __Pyx_Generator_CheckExact(obj) (Py_TYPE(obj) == __pyx_GeneratorType)
-#define __Pyx_Generator_New(body, closure, name, qualname, module_name) \
- __Pyx__Coroutine_New(__pyx_GeneratorType, body, closure, name, qualname, module_name)
+#define __Pyx_Generator_New(body, code, closure, name, qualname, module_name) \
+ __Pyx__Coroutine_New(__pyx_GeneratorType, body, code, closure, name, qualname, module_name)
static PyObject *__Pyx_Generator_Next(PyObject *self);
static int __pyx_Generator_init(void); /*proto*/
@@ -594,7 +597,7 @@ static void __Pyx__Coroutine_AlreadyRunningError(CYTHON_UNUSED __pyx_CoroutineOb
const char *msg;
if (0) {
#ifdef __Pyx_Coroutine_USED
- } else if (__Pyx_Coroutine_CheckExact((PyObject*)gen)) {
+ } else if (__Pyx_Coroutine_Check((PyObject*)gen)) {
msg = "coroutine already executing";
#endif
#ifdef __Pyx_AsyncGen_USED
@@ -612,7 +615,7 @@ static void __Pyx__Coroutine_NotStartedError(CYTHON_UNUSED PyObject *gen) {
const char *msg;
if (0) {
#ifdef __Pyx_Coroutine_USED
- } else if (__Pyx_Coroutine_CheckExact(gen)) {
+ } else if (__Pyx_Coroutine_Check(gen)) {
msg = "can't send non-None value to a just-started coroutine";
#endif
#ifdef __Pyx_AsyncGen_USED
@@ -628,7 +631,7 @@ static void __Pyx__Coroutine_NotStartedError(CYTHON_UNUSED PyObject *gen) {
#define __Pyx_Coroutine_AlreadyTerminatedError(gen, value, closing) (__Pyx__Coroutine_AlreadyTerminatedError(gen, value, closing), (PyObject*)NULL)
static void __Pyx__Coroutine_AlreadyTerminatedError(CYTHON_UNUSED PyObject *gen, PyObject *value, CYTHON_UNUSED int closing) {
#ifdef __Pyx_Coroutine_USED
- if (!closing && __Pyx_Coroutine_CheckExact(gen)) {
+ if (!closing && __Pyx_Coroutine_Check(gen)) {
// `self` is an exhausted coroutine: raise an error,
// except when called from gen_close(), which should
// always be a silent method.
@@ -774,7 +777,7 @@ static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) {
} else
#endif
#ifdef __Pyx_Coroutine_USED
- if (__Pyx_Coroutine_CheckExact(yf)) {
+ if (__Pyx_Coroutine_Check(yf)) {
ret = __Pyx_Coroutine_Send(yf, value);
} else
#endif
@@ -827,7 +830,7 @@ static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) {
} else
#endif
#ifdef __Pyx_Coroutine_USED
- if (__Pyx_Coroutine_CheckExact(yf)) {
+ if (__Pyx_Coroutine_Check(yf)) {
retval = __Pyx_Coroutine_Close(yf);
if (!retval)
return -1;
@@ -891,6 +894,11 @@ static PyObject *__Pyx_Generator_Next(PyObject *self) {
ret = _PyGen_Send((PyGenObject*)yf, NULL);
} else
#endif
+ #ifdef __Pyx_Coroutine_USED
+ if (__Pyx_Coroutine_Check(yf)) {
+ ret = __Pyx_Coroutine_Send(yf, Py_None);
+ } else
+ #endif
ret = Py_TYPE(yf)->tp_iternext(yf);
gen->is_running = 0;
//Py_DECREF(yf);
@@ -925,7 +933,7 @@ static PyObject *__Pyx_Coroutine_Close(PyObject *self) {
Py_DECREF(retval);
if ((0)) {
#ifdef __Pyx_Coroutine_USED
- } else if (__Pyx_Coroutine_CheckExact(self)) {
+ } else if (__Pyx_Coroutine_Check(self)) {
msg = "coroutine ignored GeneratorExit";
#endif
#ifdef __Pyx_AsyncGen_USED
@@ -980,7 +988,7 @@ static PyObject *__Pyx__Coroutine_Throw(PyObject *self, PyObject *typ, PyObject
|| __Pyx_Generator_CheckExact(yf)
#endif
#ifdef __Pyx_Coroutine_USED
- || __Pyx_Coroutine_CheckExact(yf)
+ || __Pyx_Coroutine_Check(yf)
#endif
) {
ret = __Pyx__Coroutine_Throw(yf, typ, val, tb, args, close_on_genexit);
@@ -1056,6 +1064,7 @@ static int __Pyx_Coroutine_clear(PyObject *self) {
Py_CLEAR(((__pyx_PyAsyncGenObject*)gen)->ag_finalizer);
}
#endif
+ Py_CLEAR(gen->gi_code);
Py_CLEAR(gen->gi_name);
Py_CLEAR(gen->gi_qualname);
Py_CLEAR(gen->gi_modulename);
@@ -1287,16 +1296,16 @@ __Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value)
}
static __pyx_CoroutineObject *__Pyx__Coroutine_New(
- PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *closure,
+ PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name) {
__pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type);
if (unlikely(!gen))
return NULL;
- return __Pyx__Coroutine_NewInit(gen, body, closure, name, qualname, module_name);
+ return __Pyx__Coroutine_NewInit(gen, body, code, closure, name, qualname, module_name);
}
static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit(
- __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *closure,
+ __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name) {
gen->body = body;
gen->closure = closure;
@@ -1315,6 +1324,8 @@ static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit(
gen->gi_name = name;
Py_XINCREF(module_name);
gen->gi_modulename = module_name;
+ Py_XINCREF(code);
+ gen->gi_code = code;
PyObject_GC_Track(gen);
return gen;
@@ -1324,6 +1335,7 @@ static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit(
//////////////////// Coroutine ////////////////////
//@requires: CoroutineBase
//@requires: PatchGeneratorABC
+//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict
static void __Pyx_CoroutineAwait_dealloc(PyObject *self) {
PyObject_GC_UnTrack(self);
@@ -1446,13 +1458,20 @@ static CYTHON_INLINE PyObject *__Pyx__Coroutine_await(PyObject *coroutine) {
}
static PyObject *__Pyx_Coroutine_await(PyObject *coroutine) {
- if (unlikely(!coroutine || !__Pyx_Coroutine_CheckExact(coroutine))) {
+ if (unlikely(!coroutine || !__Pyx_Coroutine_Check(coroutine))) {
PyErr_SetString(PyExc_TypeError, "invalid input, expected coroutine");
return NULL;
}
return __Pyx__Coroutine_await(coroutine);
}
+static PyObject *
+__Pyx_Coroutine_get_frame(CYTHON_UNUSED __pyx_CoroutineObject *self)
+{
+ // Fake implementation that always returns None, but at least does not raise an AttributeError.
+ Py_RETURN_NONE;
+}
+
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
static PyObject *__Pyx_Coroutine_compare(PyObject *obj, PyObject *other, int op) {
PyObject* result;
@@ -1485,6 +1504,7 @@ static PyMemberDef __pyx_Coroutine_memberlist[] = {
{(char *) "cr_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
{(char*) "cr_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
(char*) PyDoc_STR("object being awaited, or None")},
+ {(char*) "cr_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL},
{(char *) "__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), PY_WRITE_RESTRICTED, 0},
{0, 0, 0, 0, 0}
};
@@ -1494,6 +1514,8 @@ static PyGetSetDef __pyx_Coroutine_getsets[] = {
(char*) PyDoc_STR("name of the coroutine"), 0},
{(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
(char*) PyDoc_STR("qualified name of the coroutine"), 0},
+ {(char *) "cr_frame", (getter)__Pyx_Coroutine_get_frame, NULL,
+ (char*) PyDoc_STR("Frame of the coroutine"), 0},
{0, 0, 0, 0, 0}
};
@@ -1576,21 +1598,123 @@ static PyTypeObject __pyx_CoroutineType_type = {
static int __pyx_Coroutine_init(void) {
// on Windows, C-API functions can't be used in slots statically
- __pyx_CoroutineType_type.tp_getattro = PyObject_GenericGetAttr;
-
+ __pyx_CoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx_CoroutineType = __Pyx_FetchCommonType(&__pyx_CoroutineType_type);
if (unlikely(!__pyx_CoroutineType))
return -1;
+#ifdef __Pyx_IterableCoroutine_USED
+ if (unlikely(__pyx_IterableCoroutine_init() == -1))
+ return -1;
+#endif
+
__pyx_CoroutineAwaitType = __Pyx_FetchCommonType(&__pyx_CoroutineAwaitType_type);
if (unlikely(!__pyx_CoroutineAwaitType))
return -1;
return 0;
}
+
+//////////////////// IterableCoroutine.proto ////////////////////
+
+#define __Pyx_IterableCoroutine_USED
+
+static PyTypeObject *__pyx_IterableCoroutineType = 0;
+
+#undef __Pyx_Coroutine_Check
+#define __Pyx_Coroutine_Check(obj) (__Pyx_Coroutine_CheckExact(obj) || (Py_TYPE(obj) == __pyx_IterableCoroutineType))
+
+#define __Pyx_IterableCoroutine_New(body, code, closure, name, qualname, module_name) \
+ __Pyx__Coroutine_New(__pyx_IterableCoroutineType, body, code, closure, name, qualname, module_name)
+
+static int __pyx_IterableCoroutine_init(void);/*proto*/
+
+
+//////////////////// IterableCoroutine ////////////////////
+//@requires: Coroutine
+//@requires: CommonStructures.c::FetchCommonType
+
+static PyTypeObject __pyx_IterableCoroutineType_type = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "iterable_coroutine", /*tp_name*/
+ sizeof(__pyx_CoroutineObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+#if CYTHON_USE_ASYNC_SLOTS
+ &__pyx_Coroutine_as_async, /*tp_as_async (tp_reserved) - Py3 only! */
+#else
+ 0, /*tp_reserved*/
+#endif
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/
+ 0, /*tp_doc*/
+ (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/
+ 0, /*tp_clear*/
+#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
+ // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare
+ __Pyx_Coroutine_compare, /*tp_richcompare*/
+#else
+ 0, /*tp_richcompare*/
+#endif
+ offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/
+ // enable iteration for legacy support of asyncio yield-from protocol
+ __Pyx_Coroutine_await, /*tp_iter*/
+ (iternextfunc) __Pyx_Generator_Next, /*tp_iternext*/
+ __pyx_Coroutine_methods, /*tp_methods*/
+ __pyx_Coroutine_memberlist, /*tp_members*/
+ __pyx_Coroutine_getsets, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ 0, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+#if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_del*/
+#else
+ __Pyx_Coroutine_del, /*tp_del*/
+#endif
+ 0, /*tp_version_tag*/
+#if PY_VERSION_HEX >= 0x030400a1
+ __Pyx_Coroutine_del, /*tp_finalize*/
+#endif
+};
+
+
+static int __pyx_IterableCoroutine_init(void) {
+ __pyx_IterableCoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ __pyx_IterableCoroutineType = __Pyx_FetchCommonType(&__pyx_IterableCoroutineType_type);
+ if (unlikely(!__pyx_IterableCoroutineType))
+ return -1;
+ return 0;
+}
+
+
//////////////////// Generator ////////////////////
//@requires: CoroutineBase
//@requires: PatchGeneratorABC
+//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict
static PyMethodDef __pyx_Generator_methods[] = {
{"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O,
@@ -1606,6 +1730,7 @@ static PyMemberDef __pyx_Generator_memberlist[] = {
{(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
{(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
(char*) PyDoc_STR("object being iterated by 'yield from', or None")},
+ {(char*) "gi_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL},
{0, 0, 0, 0, 0}
};
@@ -1678,7 +1803,7 @@ static PyTypeObject __pyx_GeneratorType_type = {
static int __pyx_Generator_init(void) {
// on Windows, C-API functions can't be used in slots statically
- __pyx_GeneratorType_type.tp_getattro = PyObject_GenericGetAttr;
+ __pyx_GeneratorType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx_GeneratorType_type.tp_iter = PyObject_SelfIter;
__pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type);
@@ -1724,13 +1849,20 @@ static void __Pyx__ReturnWithStopIteration(PyObject* value) {
Py_INCREF(value);
exc = value;
}
+ #if CYTHON_FAST_THREAD_STATE
__Pyx_PyThreadState_assign
- if (!$local_tstate_cname->exc_type) {
+ #if PY_VERSION_HEX >= 0x030700A2
+ if (!$local_tstate_cname->exc_state.exc_type)
+ #else
+ if (!$local_tstate_cname->exc_type)
+ #endif
+ {
// no chaining needed => avoid the overhead in PyErr_SetObject()
Py_INCREF(PyExc_StopIteration);
__Pyx_ErrRestore(PyExc_StopIteration, exc, NULL);
return;
}
+ #endif
#else
args = PyTuple_Pack(1, value);
if (unlikely(!args)) return;
diff --git a/Cython/Utility/CythonFunction.c b/Cython/Utility/CythonFunction.c
index ba969c2..512449d 100644
--- a/Cython/Utility/CythonFunction.c
+++ b/Cython/Utility/CythonFunction.c
@@ -2,7 +2,6 @@
//////////////////// CythonFunction.proto ////////////////////
#define __Pyx_CyFunction_USED 1
-#include
#define __Pyx_CYFUNCTION_STATICMETHOD 0x01
#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02
@@ -75,6 +74,8 @@ static int __pyx_CyFunction_init(void);
//@requires: CommonStructures.c::FetchCommonType
////@requires: ObjectHandling.c::PyObjectGetAttrStr
+#include
+
static PyObject *
__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure)
{
diff --git a/Cython/Utility/Exceptions.c b/Cython/Utility/Exceptions.c
index fe37bf3..1774156 100644
--- a/Cython/Utility/Exceptions.c
+++ b/Cython/Utility/Exceptions.c
@@ -622,7 +622,7 @@ static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_li
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(${cython_runtime_cname});
if (likely(cython_runtime_dict)) {
- use_cline = PyDict_GetItem(*cython_runtime_dict, PYIDENT("cline_in_traceback"));
+ use_cline = __Pyx_PyDict_GetItemStr(*cython_runtime_dict, PYIDENT("cline_in_traceback"));
} else
#endif
{
diff --git a/Cython/Utility/ExtensionTypes.c b/Cython/Utility/ExtensionTypes.c
index 8173a10..fc11faf 100644
--- a/Cython/Utility/ExtensionTypes.c
+++ b/Cython/Utility/ExtensionTypes.c
@@ -1,3 +1,78 @@
+/////////////// PyType_Ready.proto ///////////////
+
+static int __Pyx_PyType_Ready(PyTypeObject *t);
+
+/////////////// PyType_Ready ///////////////
+
+// Wrapper around PyType_Ready() with some runtime checks and fixes
+// to deal with multiple inheritance.
+static int __Pyx_PyType_Ready(PyTypeObject *t) {
+ // Loop over all bases (except the first) and check that those
+ // really are heap types. Otherwise, it would not be safe to
+ // subclass them.
+ //
+ // We also check tp_dictoffset: it is unsafe to inherit
+ // tp_dictoffset from a base class because the object structures
+ // would not be compatible. So, if our extension type doesn't set
+ // tp_dictoffset (i.e. there is no __dict__ attribute in the object
+ // structure), we need to check that none of the base classes sets
+ // it either.
+ int r;
+ PyObject *bases = t->tp_bases;
+ if (bases)
+ {
+ Py_ssize_t i, n = PyTuple_GET_SIZE(bases);
+ for (i = 1; i < n; i++) /* Skip first base */
+ {
+ PyObject *b0 = PyTuple_GET_ITEM(bases, i);
+ PyTypeObject *b;
+#if PY_MAJOR_VERSION < 3
+ /* Disallow old-style classes */
+ if (PyClass_Check(b0))
+ {
+ PyErr_Format(PyExc_TypeError, "base class '%.200s' is an old-style class",
+ PyString_AS_STRING(((PyClassObject*)b0)->cl_name));
+ return -1;
+ }
+#endif
+ b = (PyTypeObject*)b0;
+ if (!PyType_HasFeature(b, Py_TPFLAGS_HEAPTYPE))
+ {
+ PyErr_Format(PyExc_TypeError, "base class '%.200s' is not a heap type",
+ b->tp_name);
+ return -1;
+ }
+ if (t->tp_dictoffset == 0 && b->tp_dictoffset)
+ {
+ PyErr_Format(PyExc_TypeError,
+ "extension type '%.200s' has no __dict__ slot, but base type '%.200s' has: "
+ "either add 'cdef dict __dict__' to the extension type "
+ "or add '__slots__ = [...]' to the base type",
+ t->tp_name, b->tp_name);
+ return -1;
+ }
+ }
+ }
+
+#if PY_VERSION_HEX >= 0x03050000
+ // As of https://bugs.python.org/issue22079
+ // PyType_Ready enforces that all bases of a non-heap type are
+ // non-heap. We know that this is the case for the solid base but
+ // other bases are heap allocated and are kept alive through the
+ // tp_bases reference.
+ // Other than this check, the Py_TPFLAGS_HEAPTYPE flag is unused
+ // in PyType_Ready().
+ t->tp_flags |= Py_TPFLAGS_HEAPTYPE;
+#endif
+
+ r = PyType_Ready(t);
+
+#if PY_VERSION_HEX >= 0x03050000
+ t->tp_flags &= ~Py_TPFLAGS_HEAPTYPE;
+#endif
+
+ return r;
+}
/////////////// CallNextTpDealloc.proto ///////////////
diff --git a/Cython/Utility/ImportExport.c b/Cython/Utility/ImportExport.c
index a40f518..74553e0 100644
--- a/Cython/Utility/ImportExport.c
+++ b/Cython/Utility/ImportExport.c
@@ -656,6 +656,67 @@ bad:
}
+/////////////// MergeVTables.proto ///////////////
+//@requires: GetVTable
+
+static int __Pyx_MergeVtables(PyTypeObject *type); /*proto*/
+
+/////////////// MergeVTables ///////////////
+
+static int __Pyx_MergeVtables(PyTypeObject *type) {
+ int i;
+ void** base_vtables;
+ void* unknown = (void*)-1;
+ PyObject* bases = type->tp_bases;
+ int base_depth = 0;
+ {
+ PyTypeObject* base = type->tp_base;
+ while (base) {
+ base_depth += 1;
+ base = base->tp_base;
+ }
+ }
+ base_vtables = (void**) malloc(sizeof(void*) * (base_depth + 1));
+ base_vtables[0] = unknown;
+ // Could do MRO resolution of individual methods in the future, assuming
+ // compatible vtables, but for now simply require a common vtable base.
+ // Note that if the vtables of various bases are extended separately,
+ // resolution isn't possible and we must reject it just as when the
+ // instance struct is so extended. (It would be good to also do this
+ // check when a multiple-base class is created in pure Python as well.)
+ for (i = 1; i < PyTuple_GET_SIZE(bases); i++) {
+ void* base_vtable = __Pyx_GetVtable(((PyTypeObject*)PyTuple_GET_ITEM(bases, i))->tp_dict);
+ if (base_vtable != NULL) {
+ int j;
+ PyTypeObject* base = type->tp_base;
+ for (j = 0; j < base_depth; j++) {
+ if (base_vtables[j] == unknown) {
+ base_vtables[j] = __Pyx_GetVtable(base->tp_dict);
+ base_vtables[j + 1] = unknown;
+ }
+ if (base_vtables[j] == base_vtable) {
+ break;
+ } else if (base_vtables[j] == NULL) {
+ // No more potential matching bases (with vtables).
+ goto bad;
+ }
+ base = base->tp_base;
+ }
+ }
+ }
+ PyErr_Clear();
+ free(base_vtables);
+ return 0;
+bad:
+ PyErr_Format(
+ PyExc_TypeError,
+ "multiple bases have vtable conflict: '%s' and '%s'",
+ type->tp_base->tp_name, ((PyTypeObject*)PyTuple_GET_ITEM(bases, i))->tp_name);
+ free(base_vtables);
+ return -1;
+}
+
+
/////////////// ImportNumPyArray.proto ///////////////
static PyObject *__pyx_numpy_ndarray = NULL;
diff --git a/Cython/Utility/MemoryView.pyx b/Cython/Utility/MemoryView.pyx
index def3dd7..df83fe0 100644
--- a/Cython/Utility/MemoryView.pyx
+++ b/Cython/Utility/MemoryView.pyx
@@ -65,6 +65,7 @@ cdef extern from *:
PyBUF_STRIDES
PyBUF_INDIRECT
PyBUF_RECORDS
+ PyBUF_RECORDS_RO
ctypedef struct __Pyx_TypeInfo:
pass
@@ -408,6 +409,9 @@ cdef class memoryview(object):
return self.convert_item_to_object(itemp)
def __setitem__(memoryview self, object index, object value):
+ if self.view.readonly:
+ raise TypeError("Cannot assign to read-only memoryview")
+
have_slices, index = _unellipsify(index, self.view.ndim)
if have_slices:
@@ -507,6 +511,9 @@ cdef class memoryview(object):
@cname('getbuffer')
def __getbuffer__(self, Py_buffer *info, int flags):
+ if flags & PyBUF_WRITABLE and self.view.readonly:
+ raise ValueError("Cannot create writable memory view from read-only memoryview")
+
if flags & PyBUF_STRIDES:
info.shape = self.view.shape
else:
@@ -531,7 +538,7 @@ cdef class memoryview(object):
info.ndim = self.view.ndim
info.itemsize = self.view.itemsize
info.len = self.view.len
- info.readonly = 0
+ info.readonly = self.view.readonly
info.obj = self
__pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
@@ -1012,7 +1019,10 @@ cdef memoryview_fromslice({{memviewslice_name}} memviewslice,
(<__pyx_buffer *> &result.view).obj = Py_None
Py_INCREF(Py_None)
- result.flags = PyBUF_RECORDS
+ if (memviewslice.memview).flags & PyBUF_WRITABLE:
+ result.flags = PyBUF_RECORDS
+ else:
+ result.flags = PyBUF_RECORDS_RO
result.view.shape = result.from_slice.shape
result.view.strides = result.from_slice.strides
@@ -1340,7 +1350,7 @@ cdef void broadcast_leading({{memviewslice_name}} *mslice,
mslice.suboffsets[i] = -1
#
-### Take care of refcounting the objects in slices. Do this seperately from any copying,
+### Take care of refcounting the objects in slices. Do this separately from any copying,
### to minimize acquiring the GIL
#
diff --git a/Cython/Utility/MemoryView_C.c b/Cython/Utility/MemoryView_C.c
index 0ac4ce6..b1bd74a 100644
--- a/Cython/Utility/MemoryView_C.c
+++ b/Cython/Utility/MemoryView_C.c
@@ -82,7 +82,7 @@ typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
/////////////// ObjectToMemviewSlice.proto ///////////////
-static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *);
+static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *, int writable_flag);
////////// MemviewSliceInit.proto //////////
@@ -127,7 +127,7 @@ static CYTHON_INLINE char *__pyx_memviewslice_index_full(
/////////////// ObjectToMemviewSlice ///////////////
//@requires: MemviewSliceValidateAndInit
-static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *obj) {
+static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *obj, int writable_flag) {
{{memviewslice_name}} result = {{memslice_init}};
__Pyx_BufFmt_StackElem stack[{{struct_nesting_depth}}];
int axes_specs[] = { {{axes_specs}} };
@@ -140,7 +140,7 @@ static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *obj) {
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, {{c_or_f_flag}},
- {{buf_flag}}, {{ndim}},
+ {{buf_flag}} | writable_flag, {{ndim}},
&{{dtype_typeinfo}}, stack,
&result, obj);
diff --git a/Cython/Utility/ModuleSetupCode.c b/Cython/Utility/ModuleSetupCode.c
index 910d3e9..2aeabd2 100644
--- a/Cython/Utility/ModuleSetupCode.c
+++ b/Cython/Utility/ModuleSetupCode.c
@@ -195,6 +195,162 @@
#undef MASK
#endif
+#ifndef __has_attribute
+ #define __has_attribute(x) 0
+#endif
+
+#ifndef __has_cpp_attribute
+ #define __has_cpp_attribute(x) 0
+#endif
+
+// restrict
+#ifndef CYTHON_RESTRICT
+ #if defined(__GNUC__)
+ #define CYTHON_RESTRICT __restrict__
+ #elif defined(_MSC_VER) && _MSC_VER >= 1400
+ #define CYTHON_RESTRICT __restrict
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_RESTRICT restrict
+ #else
+ #define CYTHON_RESTRICT
+ #endif
+#endif
+
+// unused attribute
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+
+#ifndef CYTHON_MAYBE_UNUSED_VAR
+# if defined(__cplusplus)
+ template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
+# else
+# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
+# endif
+#endif
+
+#ifndef CYTHON_NCP_UNUSED
+# if CYTHON_COMPILING_IN_CPYTHON
+# define CYTHON_NCP_UNUSED
+# else
+# define CYTHON_NCP_UNUSED CYTHON_UNUSED
+# endif
+#endif
+
+#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+
+#ifdef _MSC_VER
+ #ifndef _MSC_STDINT_H_
+ #if _MSC_VER < 1300
+ typedef unsigned char uint8_t;
+ typedef unsigned int uint32_t;
+ #else
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int32 uint32_t;
+ #endif
+ #endif
+#else
+ #include
+#endif
+
+
+#ifndef CYTHON_FALLTHROUGH
+ #if defined(__cplusplus) && __cplusplus >= 201103L
+ #if __has_cpp_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH [[fallthrough]]
+ #elif __has_cpp_attribute(clang::fallthrough)
+ #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
+ #elif __has_cpp_attribute(gnu::fallthrough)
+ #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
+ #endif
+ #endif
+
+ #ifndef CYTHON_FALLTHROUGH
+ #if __has_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
+ #else
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+
+ #if defined(__clang__ ) && defined(__apple_build_version__)
+ #if __apple_build_version__ < 7000000 /* Xcode < 7.0 */
+ #undef CYTHON_FALLTHROUGH
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+#endif
+
+/////////////// CInitCode ///////////////
+
+// inline attribute
+#ifndef CYTHON_INLINE
+ #if defined(__clang__)
+ #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
+ #elif defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+
+/////////////// CppInitCode ///////////////
+
+#ifndef __cplusplus
+ #error "Cython files generated with the C++ option must be compiled with a C++ compiler."
+#endif
+
+// inline attribute
+#ifndef CYTHON_INLINE
+ #if defined(__clang__)
+ #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
+ #else
+ #define CYTHON_INLINE inline
+ #endif
+#endif
+
+// Work around clang bug http://stackoverflow.com/questions/21847816/c-invoke-nested-template-class-destructor
+template
+void __Pyx_call_destructor(T& x) {
+ x.~T();
+}
+
+// Used for temporary variables of "reference" type.
+template
+class __Pyx_FakeReference {
+ public:
+ __Pyx_FakeReference() : ptr(NULL) { }
+ // __Pyx_FakeReference(T& ref) : ptr(&ref) { }
+ // Const version needed as Cython doesn't know about const overloads (e.g. for stl containers).
+ __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { }
+ T *operator->() { return ptr; }
+ T *operator&() { return ptr; }
+ operator T&() { return *ptr; }
+ // TODO(robertwb): Delegate all operators (or auto-generate unwrapping code where needed).
+ template bool operator ==(U other) { return *ptr == other; }
+ template bool operator !=(U other) { return *ptr != other; }
+ private:
+ T *ptr;
+};
+
+
+/////////////// PythonCompatibility ///////////////
+
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
@@ -227,14 +383,18 @@
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
-#if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL)
- // new in CPython 3.6, but changed in 3.7 - see https://bugs.python.org/issue29464
+#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
+ // new in CPython 3.6, but changed in 3.7 - see
+ // positional-only parameters:
+ // https://bugs.python.org/issue29464
+ // const args:
+ // https://bugs.python.org/issue32240
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
- typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs);
+ typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
// new in CPython 3.7, used to be old signature of _PyCFunctionFast() in 3.6
- typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args,
+ typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
@@ -247,6 +407,21 @@
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
+ #define PyObject_Malloc(s) PyMem_Malloc(s)
+ #define PyObject_Free(p) PyMem_Free(p)
+ #define PyObject_Realloc(p) PyMem_Realloc(p)
+#endif
+
+#if CYTHON_COMPILING_IN_PYSTON
+ // special C-API functions only in Pyston
+ #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
+#else
+ #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
+#endif
+
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
@@ -259,6 +434,40 @@
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
+// TSS (Thread Specific Storage) API
+#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
+#include "pythread.h"
+#define Py_tss_NEEDS_INIT 0
+typedef int Py_tss_t;
+static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
+ *key = PyThread_create_key();
+ return 0; // PyThread_create_key reports success always
+}
+static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
+ Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
+ *key = Py_tss_NEEDS_INIT;
+ return key;
+}
+static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
+ PyObject_Free(key);
+}
+static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
+ return *key != Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
+ PyThread_delete_key(*key);
+ *key = Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
+ return PyThread_set_key_value(*key, value);
+}
+static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
+ return PyThread_get_key_value(*key);
+}
+// PyThread_delete_key_value(key) is equalivalent to PyThread_set_key_value(key, NULL)
+// PyThread_ReInitTLS() is a no-op
+#endif // TSS (Thread Specific Storage) API
+
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
@@ -273,6 +482,12 @@
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
+#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
+#else
+#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
+#endif
+
/* new Py3.3 unicode type (PEP 393) */
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
@@ -324,21 +539,6 @@
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
-#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
- #define PyObject_Malloc(s) PyMem_Malloc(s)
- #define PyObject_Free(p) PyMem_Free(p)
- #define PyObject_Realloc(p) PyMem_Realloc(p)
-#endif
-
-#if CYTHON_COMPILING_IN_PYSTON
- // special C-API functions only in Pyston
- #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
- #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
-#else
- #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
- #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
-#endif
-
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
@@ -372,7 +572,12 @@
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
-#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
+#if CYTHON_ASSUME_SAFE_MACROS
+ #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
+#else
+ // NOTE: might fail with exception => check for -1
+ #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
+#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
@@ -412,19 +617,11 @@
#endif
#if PY_MAJOR_VERSION >= 3
- #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
+ #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
-#ifndef __has_attribute
- #define __has_attribute(x) 0
-#endif
-
-#ifndef __has_cpp_attribute
- #define __has_cpp_attribute(x) 0
-#endif
-
// backport of PyAsyncMethods from Py3.5 to older Py3.x versions
// (mis-)using the "tp_reserved" type slot which is re-activated as "tp_as_async" in Py3.5
#if CYTHON_USE_ASYNC_SLOTS
@@ -445,151 +642,39 @@
} __Pyx_PyAsyncMethodsStruct;
#endif
-// restrict
-#ifndef CYTHON_RESTRICT
- #if defined(__GNUC__)
- #define CYTHON_RESTRICT __restrict__
- #elif defined(_MSC_VER) && _MSC_VER >= 1400
- #define CYTHON_RESTRICT __restrict
- #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
- #define CYTHON_RESTRICT restrict
- #else
- #define CYTHON_RESTRICT
- #endif
-#endif
-
-// unused attribute
-#ifndef CYTHON_UNUSED
-# if defined(__GNUC__)
-# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
-# define CYTHON_UNUSED __attribute__ ((__unused__))
-# else
-# define CYTHON_UNUSED
-# endif
-# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
-# define CYTHON_UNUSED __attribute__ ((__unused__))
-# else
-# define CYTHON_UNUSED
-# endif
-#endif
-
-#ifndef CYTHON_MAYBE_UNUSED_VAR
-# if defined(__cplusplus)
- template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
-# else
-# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
-# endif
-#endif
-#ifndef CYTHON_NCP_UNUSED
-# if CYTHON_COMPILING_IN_CPYTHON
-# define CYTHON_NCP_UNUSED
-# else
-# define CYTHON_NCP_UNUSED CYTHON_UNUSED
-# endif
-#endif
+/////////////// PyModInitFuncType.proto ///////////////
-#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+#if PY_MAJOR_VERSION < 3
-#ifdef _MSC_VER
- #ifndef _MSC_STDINT_H_
- #if _MSC_VER < 1300
- typedef unsigned char uint8_t;
- typedef unsigned int uint32_t;
- #else
- typedef unsigned __int8 uint8_t;
- typedef unsigned __int32 uint32_t;
- #endif
- #endif
+#ifdef CYTHON_NO_PYINIT_EXPORT
+// define this to void manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition.
+#define __Pyx_PyMODINIT_FUNC void
#else
- #include
+#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
+#else
-#ifndef CYTHON_FALLTHROUGH
- #if defined(__cplusplus) && __cplusplus >= 201103L
- #if __has_cpp_attribute(fallthrough)
- #define CYTHON_FALLTHROUGH [[fallthrough]]
- #elif __has_cpp_attribute(clang::fallthrough)
- #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
- #elif __has_cpp_attribute(gnu::fallthrough)
- #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
- #endif
- #endif
-
- #ifndef CYTHON_FALLTHROUGH
- #if __has_attribute(fallthrough)
- #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
- #else
- #define CYTHON_FALLTHROUGH
- #endif
- #endif
-
- #if defined(__clang__ ) && defined(__apple_build_version__)
- #if __apple_build_version__ < 7000000 /* Xcode < 7.0 */
- #undef CYTHON_FALLTHROUGH
- #define CYTHON_FALLTHROUGH
- #endif
- #endif
+#ifdef CYTHON_NO_PYINIT_EXPORT
+// define this to PyObject * manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition.
+#define __Pyx_PyMODINIT_FUNC PyObject *
+#else
+#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
-/////////////// CInitCode ///////////////
-
-// inline attribute
-#ifndef CYTHON_INLINE
- #if defined(__clang__)
- #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
- #elif defined(__GNUC__)
- #define CYTHON_INLINE __inline__
- #elif defined(_MSC_VER)
- #define CYTHON_INLINE __inline
- #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
- #define CYTHON_INLINE inline
- #else
- #define CYTHON_INLINE
- #endif
#endif
-
-/////////////// CppInitCode ///////////////
-
-#ifndef __cplusplus
- #error "Cython files generated with the C++ option must be compiled with a C++ compiler."
+#ifndef CYTHON_SMALL_CODE
+#if defined(__clang__)
+ #define CYTHON_SMALL_CODE
+#elif defined(__GNUC__)
+ #define CYTHON_SMALL_CODE __attribute__((optimize("Os")))
+#else
+ #define CYTHON_SMALL_CODE
#endif
-
-// inline attribute
-#ifndef CYTHON_INLINE
- #if defined(__clang__)
- #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
- #else
- #define CYTHON_INLINE inline
- #endif
#endif
-// Work around clang bug http://stackoverflow.com/questions/21847816/c-invoke-nested-template-class-destructor
-template
-void __Pyx_call_destructor(T& x) {
- x.~T();
-}
-
-// Used for temporary variables of "reference" type.
-template
-class __Pyx_FakeReference {
- public:
- __Pyx_FakeReference() : ptr(NULL) { }
- // __Pyx_FakeReference(T& ref) : ptr(&ref) { }
- // Const version needed as Cython doesn't know about const overloads (e.g. for stl containers).
- __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { }
- T *operator->() { return ptr; }
- T *operator&() { return ptr; }
- operator T&() { return *ptr; }
- // TODO(robertwb): Delegate all operators (or auto-generate unwrapping code where needed).
- template bool operator ==(U other) { return *ptr == other; }
- template bool operator !=(U other) { return *ptr != other; }
- private:
- T *ptr;
-};
-
/////////////// FastTypeChecks.proto ///////////////
@@ -604,6 +689,8 @@ static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObj
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
+#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
+
/////////////// FastTypeChecks ///////////////
//@requires: Exceptions.c::PyThreadStateGet
//@requires: Exceptions.c::PyErrFetchRestore
@@ -1030,6 +1117,20 @@ end:
}
#endif /* CYTHON_REFNANNY */
+
+/////////////// ImportRefnannyAPI ///////////////
+
+#if CYTHON_REFNANNY
+__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+}
+#endif
+
+
/////////////// RegisterModuleCleanup.proto ///////////////
//@substitute: naming
diff --git a/Cython/Utility/ObjectHandling.c b/Cython/Utility/ObjectHandling.c
index a48b4d7..4221057 100644
--- a/Cython/Utility/ObjectHandling.c
+++ b/Cython/Utility/ObjectHandling.c
@@ -170,12 +170,10 @@ static PyObject *__Pyx_PyIter_Next2Default(PyObject* defval) {
__Pyx_PyThreadState_assign
exc_type = __Pyx_PyErr_Occurred();
if (unlikely(exc_type)) {
- if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))
+ if (!defval || unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))
return NULL;
- if (defval) {
- __Pyx_PyErr_Clear();
- Py_INCREF(defval);
- }
+ __Pyx_PyErr_Clear();
+ Py_INCREF(defval);
return defval;
}
if (defval) {
@@ -194,7 +192,7 @@ static void __Pyx_PyIter_Next_ErrorNoIterator(PyObject *iterator) {
// originally copied from Py3's builtin_next()
static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject* iterator, PyObject* defval) {
PyObject* next;
- // we always do a quick slot check because always PyIter_Check() is so wasteful
+ // We always do a quick slot check because calling PyIter_Check() is so wasteful.
iternextfunc iternext = Py_TYPE(iterator)->tp_iternext;
if (likely(iternext)) {
#if CYTHON_USE_TYPE_SLOTS
@@ -206,15 +204,26 @@ static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject* iterator, PyObject*
return NULL;
#endif
#else
- // note: PyIter_Next() crashes if the slot is NULL in CPython
+ // Since the slot was set, assume that PyIter_Next() will likely succeed, and properly fail otherwise.
+ // Note: PyIter_Next() crashes in CPython if "tp_iternext" is NULL.
next = PyIter_Next(iterator);
if (likely(next))
return next;
#endif
- } else if (CYTHON_USE_TYPE_SLOTS || !PyIter_Check(iterator)) {
+ } else if (CYTHON_USE_TYPE_SLOTS || unlikely(!PyIter_Check(iterator))) {
+ // If CYTHON_USE_TYPE_SLOTS, then the slot was not set and we don't have an iterable.
+ // Otherwise, don't trust "tp_iternext" and rely on PyIter_Check().
__Pyx_PyIter_Next_ErrorNoIterator(iterator);
return NULL;
}
+#if !CYTHON_USE_TYPE_SLOTS
+ else {
+ // We have an iterator with an empty "tp_iternext", but didn't call next() on it yet.
+ next = PyIter_Next(iterator);
+ if (likely(next))
+ return next;
+ }
+#endif
return __Pyx_PyIter_Next2Default(defval);
}
@@ -262,9 +271,68 @@ static CYTHON_INLINE int __Pyx_IterFinish(void) {
#endif
}
+
+/////////////// ObjectGetItem.proto ///////////////
+
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);/*proto*/
+#else
+#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
+#endif
+
+/////////////// ObjectGetItem ///////////////
+// //@requires: GetItemInt - added in IndexNode as it uses templating.
+
+#if CYTHON_USE_TYPE_SLOTS
+static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
+ PyObject *runerr;
+ Py_ssize_t key_value;
+ PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
+ if (unlikely(!(m && m->sq_item))) {
+ PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
+ return NULL;
+ }
+
+ key_value = __Pyx_PyIndex_AsSsize_t(index);
+ if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
+ return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
+ }
+
+ // Error handling code -- only manage OverflowError differently.
+ if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
+ PyErr_Clear();
+ PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
+ }
+ return NULL;
+}
+
+static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
+ PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
+ if (likely(m && m->mp_subscript)) {
+ return m->mp_subscript(obj, key);
+ }
+ return __Pyx_PyObject_GetIndex(obj, key);
+}
+#endif
+
+
/////////////// DictGetItem.proto ///////////////
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
+static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);/*proto*/
+
+#define __Pyx_PyObject_Dict_GetItem(obj, name) \
+ (likely(PyDict_CheckExact(obj)) ? \
+ __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name))
+
+#else
+#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
+#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name)
+#endif
+
+/////////////// DictGetItem ///////////////
+
+#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
@@ -280,8 +348,6 @@ static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
Py_INCREF(value);
return value;
}
-#else
- #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
/////////////// GetItemInt.proto ///////////////
@@ -808,7 +874,7 @@ static PyObject *__Pyx_Py3MetaclassGet(PyObject *bases, PyObject *mkw); /*proto*
//@requires: CalculateMetaclass
static PyObject *__Pyx_Py3MetaclassGet(PyObject *bases, PyObject *mkw) {
- PyObject *metaclass = mkw ? PyDict_GetItem(mkw, PYIDENT("metaclass")) : NULL;
+ PyObject *metaclass = mkw ? __Pyx_PyDict_GetItemStr(mkw, PYIDENT("metaclass")) : NULL;
if (metaclass) {
Py_INCREF(metaclass);
if (PyDict_DelItem(mkw, PYIDENT("metaclass")) < 0) {
@@ -845,7 +911,7 @@ static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *na
return NULL;
/* Python2 __metaclass__ */
- metaclass = PyDict_GetItem(dict, PYIDENT("__metaclass__"));
+ metaclass = __Pyx_PyDict_GetItemStr(dict, PYIDENT("__metaclass__"));
if (metaclass) {
Py_INCREF(metaclass);
if (PyType_Check(metaclass)) {
@@ -978,6 +1044,36 @@ static CYTHON_INLINE int __Pyx_PyDict_ContainsTF(PyObject* item, PyObject* dict,
return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
}
+/////////////// PySetContains.proto ///////////////
+
+static CYTHON_INLINE int __Pyx_PySet_ContainsTF(PyObject* key, PyObject* set, int eq); /* proto */
+
+/////////////// PySetContains ///////////////
+
+static int __Pyx_PySet_ContainsUnhashable(PyObject *set, PyObject *key) {
+ int result = -1;
+ if (PySet_Check(key) && PyErr_ExceptionMatches(PyExc_TypeError)) {
+ /* Convert key to frozenset */
+ PyObject *tmpkey;
+ PyErr_Clear();
+ tmpkey = PyFrozenSet_New(key);
+ if (tmpkey != NULL) {
+ result = PySet_Contains(set, tmpkey);
+ Py_DECREF(tmpkey);
+ }
+ }
+ return result;
+}
+
+static CYTHON_INLINE int __Pyx_PySet_ContainsTF(PyObject* key, PyObject* set, int eq) {
+ int result = PySet_Contains(set, key);
+
+ if (unlikely(result < 0)) {
+ result = __Pyx_PySet_ContainsUnhashable(set, key);
+ }
+ return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
+}
+
/////////////// PySequenceContains.proto ///////////////
static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) {
@@ -1041,6 +1137,21 @@ static PyObject *__Pyx_GetNameInClass(PyObject *nmspace, PyObject *name) {
return result;
}
+
+/////////////// SetNameInClass.proto ///////////////
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
+// Identifier names are always interned and have a pre-calculated hash value.
+#define __Pyx_SetNameInClass(ns, name, value) \
+ (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value))
+#elif CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_SetNameInClass(ns, name, value) \
+ (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value))
+#else
+#define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value)
+#endif
+
+
/////////////// GetModuleGlobalName.proto ///////////////
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*proto*/
@@ -1052,10 +1163,20 @@ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*prot
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
+ // Identifier names are always interned and have a pre-calculated hash value.
+ result = _PyDict_GetItem_KnownHash($moddict_cname, name, ((PyASCIIObject *) name)->hash);
+ if (likely(result)) {
+ Py_INCREF(result);
+ } else if (unlikely(PyErr_Occurred())) {
+ result = NULL;
+ } else {
+#else
result = PyDict_GetItem($moddict_cname, name);
if (likely(result)) {
Py_INCREF(result);
} else {
+#endif
#else
result = PyObject_GetItem($moddict_cname, name);
if (!result) {
@@ -1114,9 +1235,102 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_LookupSpecial(PyObject* obj, PyObj
#define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n)
#endif
+
+/////////////// PyObject_GenericGetAttrNoDict.proto ///////////////
+
+// Setting "tp_getattro" to anything but "PyObject_GenericGetAttr" disables fast method calls in Py3.7.
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
+#else
+// No-args macro to allow function pointer assignment.
+#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
+#endif
+
+/////////////// PyObject_GenericGetAttrNoDict ///////////////
+
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+
+static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
+ PyErr_Format(PyExc_AttributeError,
+#if PY_MAJOR_VERSION >= 3
+ "'%.50s' object has no attribute '%U'",
+ tp->tp_name, attr_name);
+#else
+ "'%.50s' object has no attribute '%.400s'",
+ tp->tp_name, PyString_AS_STRING(attr_name));
+#endif
+ return NULL;
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
+ // Copied and adapted from _PyObject_GenericGetAttrWithDict() in CPython 2.6/3.7.
+ // To be used in the "tp_getattro" slot of extension types that have no instance dict and cannot be subclassed.
+ PyObject *descr;
+ PyTypeObject *tp = Py_TYPE(obj);
+
+ if (unlikely(!PyString_Check(attr_name))) {
+ return PyObject_GenericGetAttr(obj, attr_name);
+ }
+
+ assert(!tp->tp_dictoffset);
+ descr = _PyType_Lookup(tp, attr_name);
+ if (unlikely(!descr)) {
+ return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
+ }
+
+ Py_INCREF(descr);
+
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
+ #endif
+ {
+ descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
+ // Optimise for the non-descriptor case because it is faster.
+ if (unlikely(f)) {
+ PyObject *res = f(descr, obj, (PyObject *)tp);
+ Py_DECREF(descr);
+ return res;
+ }
+ }
+ return descr;
+}
+#endif
+
+
+/////////////// PyObject_GenericGetAttr.proto ///////////////
+
+// Setting "tp_getattro" to anything but "PyObject_GenericGetAttr" disables fast method calls in Py3.7.
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
+#else
+// No-args macro to allow function pointer assignment.
+#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
+#endif
+
+/////////////// PyObject_GenericGetAttr ///////////////
+//@requires: PyObject_GenericGetAttrNoDict
+
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
+ if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
+ return PyObject_GenericGetAttr(obj, attr_name);
+ }
+ return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
+}
+#endif
+
+
/////////////// PyObjectGetAttrStr.proto ///////////////
#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);/*proto*/
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+/////////////// PyObjectGetAttrStr ///////////////
+
+#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
@@ -1127,14 +1341,22 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject
#endif
return PyObject_GetAttr(obj, attr_name);
}
-#else
-#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
+
/////////////// PyObjectSetAttrStr.proto ///////////////
#if CYTHON_USE_TYPE_SLOTS
-#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o,n,NULL)
+#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL)
+static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value);/*proto*/
+#else
+#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n)
+#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v)
+#endif
+
+/////////////// PyObjectSetAttrStr ///////////////
+
+#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_setattro))
@@ -1145,9 +1367,6 @@ static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr
#endif
return PyObject_SetAttr(obj, attr_name, value);
}
-#else
-#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n)
-#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v)
#endif
@@ -1192,18 +1411,19 @@ static int __Pyx_TryUnpackUnboundCMethod(__Pyx_CachedCFunction* target) {
static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); /*proto*/
#if CYTHON_COMPILING_IN_CPYTHON
+// FASTCALL methods receive "&empty_tuple" as simple "PyObject[0]*"
#define __Pyx_CallUnboundCMethod0(cfunc, self) \
- ((likely((cfunc)->func)) ? \
+ (likely((cfunc)->func) ? \
(likely((cfunc)->flag == METH_NOARGS) ? (*((cfunc)->func))(self, NULL) : \
- (likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(cfunc)->func)(self, $empty_tuple, NULL)) : \
- ((cfunc)->flag == METH_VARARGS ? (*((cfunc)->func))(self, $empty_tuple) : \
- (PY_VERSION_HEX >= 0x030600B1 && (cfunc)->flag == METH_FASTCALL ? \
- (PY_VERSION_HEX >= 0x030700A0 ? \
- (*(__Pyx_PyCFunctionFast)(cfunc)->func)(self, &PyTuple_GET_ITEM($empty_tuple, 0), 0) : \
- (*(__Pyx_PyCFunctionFastWithKeywords)(cfunc)->func)(self, &PyTuple_GET_ITEM($empty_tuple, 0), 0, NULL)) : \
- (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ? \
- (*(__Pyx_PyCFunctionFastWithKeywords)(cfunc)->func)(self, &PyTuple_GET_ITEM($empty_tuple, 0), 0, NULL) : \
- __Pyx__CallUnboundCMethod0(cfunc, self)))))) : \
+ (PY_VERSION_HEX >= 0x030600B1 && likely((cfunc)->flag == METH_FASTCALL) ? \
+ (PY_VERSION_HEX >= 0x030700A0 ? \
+ (*(__Pyx_PyCFunctionFast)(cfunc)->func)(self, &$empty_tuple, 0) : \
+ (*(__Pyx_PyCFunctionFastWithKeywords)(cfunc)->func)(self, &$empty_tuple, 0, NULL)) : \
+ (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ? \
+ (*(__Pyx_PyCFunctionFastWithKeywords)(cfunc)->func)(self, &$empty_tuple, 0, NULL) : \
+ (likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(cfunc)->func)(self, $empty_tuple, NULL)) : \
+ ((cfunc)->flag == METH_VARARGS ? (*((cfunc)->func))(self, $empty_tuple) : \
+ __Pyx__CallUnboundCMethod0(cfunc, self)))))) : \
__Pyx__CallUnboundCMethod0(cfunc, self))
#else
#define __Pyx_CallUnboundCMethod0(cfunc, self) __Pyx__CallUnboundCMethod0(cfunc, self)
@@ -1234,18 +1454,10 @@ bad:
/////////////// CallUnboundCMethod1.proto ///////////////
-static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg); /*proto*/
+static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);/*proto*/
#if CYTHON_COMPILING_IN_CPYTHON
-#define __Pyx_CallUnboundCMethod1(cfunc, self, arg) \
- ((likely((cfunc)->func && (cfunc)->flag == METH_O)) ? (*((cfunc)->func))(self, arg) : \
- ((PY_VERSION_HEX >= 0x030600B1 && (cfunc)->func && (cfunc)->flag == METH_FASTCALL) ? \
- (PY_VERSION_HEX >= 0x030700A0 ? \
- (*(__Pyx_PyCFunctionFast)(cfunc)->func)(self, &arg, 1) : \
- (*(__Pyx_PyCFunctionFastWithKeywords)(cfunc)->func)(self, &arg, 1, NULL)) : \
- (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->func && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ? \
- (*(__Pyx_PyCFunctionFastWithKeywords)(cfunc)->func)(self, &arg, 1, NULL) : \
- __Pyx__CallUnboundCMethod1(cfunc, self, arg))))
+static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);/*proto*/
#else
#define __Pyx_CallUnboundCMethod1(cfunc, self, arg) __Pyx__CallUnboundCMethod1(cfunc, self, arg)
#endif
@@ -1254,9 +1466,30 @@ static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObje
//@requires: UnpackUnboundCMethod
//@requires: PyObjectCall
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg) {
+ if (likely(cfunc->func)) {
+ int flag = cfunc->flag;
+ // Not using #ifdefs for PY_VERSION_HEX to avoid C compiler warnings about unused functions.
+ if (flag == METH_O) {
+ return (*(cfunc->func))(self, arg);
+ } else if (PY_VERSION_HEX >= 0x030600B1 && flag == METH_FASTCALL) {
+ if (PY_VERSION_HEX >= 0x030700A0) {
+ return (*(__Pyx_PyCFunctionFast)cfunc->func)(self, &arg, 1);
+ } else {
+ return (*(__Pyx_PyCFunctionFastWithKeywords)cfunc->func)(self, &arg, 1, NULL);
+ }
+ } else if (PY_VERSION_HEX >= 0x030700A0 && flag == (METH_FASTCALL | METH_KEYWORDS)) {
+ return (*(__Pyx_PyCFunctionFastWithKeywords)cfunc->func)(self, &arg, 1, NULL);
+ }
+ }
+ return __Pyx__CallUnboundCMethod1(cfunc, self, arg);
+}
+#endif
+
static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg){
PyObject *args, *result = NULL;
- if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL;
+ if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL;
#if CYTHON_COMPILING_IN_CPYTHON
if (cfunc->func && (cfunc->flag & METH_VARARGS)) {
args = PyTuple_New(1);
@@ -1287,6 +1520,77 @@ bad:
}
+/////////////// CallUnboundCMethod2.proto ///////////////
+
+static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2); /*proto*/
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030600B1
+static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2); /*proto*/
+#else
+#define __Pyx_CallUnboundCMethod2(cfunc, self, arg1, arg2) __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2)
+#endif
+
+/////////////// CallUnboundCMethod2 ///////////////
+//@requires: UnpackUnboundCMethod
+//@requires: PyObjectCall
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030600B1
+static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2) {
+ if (likely(cfunc->func)) {
+ PyObject *args[2] = {arg1, arg2};
+ if (cfunc->flag == METH_FASTCALL) {
+ #if PY_VERSION_HEX >= 0x030700A0
+ return (*(__Pyx_PyCFunctionFast)cfunc->func)(self, args, 2);
+ #else
+ return (*(__Pyx_PyCFunctionFastWithKeywords)cfunc->func)(self, args, 2, NULL);
+ #endif
+ }
+ #if PY_VERSION_HEX >= 0x030700A0
+ if (cfunc->flag == (METH_FASTCALL | METH_KEYWORDS))
+ return (*(__Pyx_PyCFunctionFastWithKeywords)cfunc->func)(self, args, 2, NULL);
+ #endif
+ }
+ return __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2);
+}
+#endif
+
+static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2){
+ PyObject *args, *result = NULL;
+ if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL;
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (cfunc->func && (cfunc->flag & METH_VARARGS)) {
+ args = PyTuple_New(2);
+ if (unlikely(!args)) goto bad;
+ Py_INCREF(arg1);
+ PyTuple_SET_ITEM(args, 0, arg1);
+ Py_INCREF(arg2);
+ PyTuple_SET_ITEM(args, 1, arg2);
+ if (cfunc->flag & METH_KEYWORDS)
+ result = (*(PyCFunctionWithKeywords)cfunc->func)(self, args, NULL);
+ else
+ result = (*cfunc->func)(self, args);
+ } else {
+ args = PyTuple_New(3);
+ if (unlikely(!args)) goto bad;
+ Py_INCREF(self);
+ PyTuple_SET_ITEM(args, 0, self);
+ Py_INCREF(arg1);
+ PyTuple_SET_ITEM(args, 1, arg1);
+ Py_INCREF(arg2);
+ PyTuple_SET_ITEM(args, 2, arg2);
+ result = __Pyx_PyObject_Call(cfunc->method, args, NULL);
+ }
+#else
+ args = PyTuple_Pack(3, self, arg1, arg2);
+ if (unlikely(!args)) goto bad;
+ result = __Pyx_PyObject_Call(cfunc->method, args, NULL);
+#endif
+bad:
+ Py_XDECREF(args);
+ return result;
+}
+
+
/////////////// PyObjectCallMethod0.proto ///////////////
static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); /*proto*/
@@ -1373,12 +1677,11 @@ done:
}
static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) {
- PyObject *method, *result = NULL;
+ PyObject *method, *result;
method = __Pyx_PyObject_GetAttrStr(obj, method_name);
- if (unlikely(!method)) goto done;
+ if (unlikely(!method)) return NULL;
result = __Pyx__PyObject_CallMethod1(method, arg);
-done:
- Py_XDECREF(method);
+ Py_DECREF(method);
return result;
}
diff --git a/Cython/Utility/Optimize.c b/Cython/Utility/Optimize.c
index e09edf3..a17d7cf 100644
--- a/Cython/Utility/Optimize.c
+++ b/Cython/Utility/Optimize.c
@@ -198,6 +198,8 @@ static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObjec
value = default_value;
}
Py_INCREF(value);
+ // avoid C compiler warning about unused utility functions
+ if ((1));
#else
if (PyString_CheckExact(key) || PyUnicode_CheckExact(key) || PyInt_CheckExact(key)) {
/* these presumably have safe hash functions */
@@ -206,13 +208,14 @@ static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObjec
value = default_value;
}
Py_INCREF(value);
- } else {
- if (default_value == Py_None)
- default_value = NULL;
- value = PyObject_CallMethodObjArgs(
- d, PYIDENT("get"), key, default_value, NULL);
}
#endif
+ else {
+ if (default_value == Py_None)
+ value = CALL_UNBOUND_METHOD(PyDict_Type, "get", d, key);
+ else
+ value = CALL_UNBOUND_METHOD(PyDict_Type, "get", d, key, default_value);
+ }
return value;
}
@@ -222,7 +225,6 @@ static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObjec
static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value, int is_safe_type); /*proto*/
/////////////// dict_setdefault ///////////////
-//@requires: ObjectHandling.c::PyObjectCallMethod2
static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value,
CYTHON_UNUSED int is_safe_type) {
@@ -259,7 +261,7 @@ static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *ke
#endif
#endif
} else {
- value = __Pyx_PyObject_CallMethod2(d, PYIDENT("setdefault"), key, default_value);
+ value = CALL_UNBOUND_METHOD(PyDict_Type, "setdefault", d, key, default_value);
}
return value;
}
@@ -269,6 +271,28 @@ static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *ke
#define __Pyx_PyDict_Clear(d) (PyDict_Clear(d), 0)
+
+/////////////// py_dict_pop.proto ///////////////
+
+static CYTHON_INLINE PyObject *__Pyx_PyDict_Pop(PyObject *d, PyObject *key, PyObject *default_value); /*proto*/
+
+/////////////// py_dict_pop ///////////////
+
+static CYTHON_INLINE PyObject *__Pyx_PyDict_Pop(PyObject *d, PyObject *key, PyObject *default_value) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B3
+ if ((1)) {
+ return _PyDict_Pop(d, key, default_value);
+ } else
+ // avoid "function unused" warnings
+#endif
+ if (default_value) {
+ return CALL_UNBOUND_METHOD(PyDict_Type, "pop", d, key, default_value);
+ } else {
+ return CALL_UNBOUND_METHOD(PyDict_Type, "pop", d, key);
+ }
+}
+
+
/////////////// dict_iter.proto ///////////////
static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name,
@@ -397,6 +421,142 @@ static CYTHON_INLINE int __Pyx_dict_iter_next(
}
+/////////////// set_iter.proto ///////////////
+
+static CYTHON_INLINE PyObject* __Pyx_set_iterator(PyObject* iterable, int is_set,
+ Py_ssize_t* p_orig_length, int* p_source_is_set); /*proto*/
+static CYTHON_INLINE int __Pyx_set_iter_next(
+ PyObject* iter_obj, Py_ssize_t orig_length,
+ Py_ssize_t* ppos, PyObject **value,
+ int source_is_set); /*proto*/
+
+/////////////// set_iter ///////////////
+//@requires: ObjectHandling.c::IterFinish
+
+static CYTHON_INLINE PyObject* __Pyx_set_iterator(PyObject* iterable, int is_set,
+ Py_ssize_t* p_orig_length, int* p_source_is_set) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ is_set = is_set || likely(PySet_CheckExact(iterable) || PyFrozenSet_CheckExact(iterable));
+ *p_source_is_set = is_set;
+ if (unlikely(!is_set))
+ return PyObject_GetIter(iterable);
+ *p_orig_length = PySet_Size(iterable);
+ Py_INCREF(iterable);
+ return iterable;
+#else
+ (void)is_set;
+ *p_source_is_set = 0;
+ *p_orig_length = 0;
+ return PyObject_GetIter(iterable);
+#endif
+}
+
+static CYTHON_INLINE int __Pyx_set_iter_next(
+ PyObject* iter_obj, Py_ssize_t orig_length,
+ Py_ssize_t* ppos, PyObject **value,
+ int source_is_set) {
+ if (!CYTHON_COMPILING_IN_CPYTHON || unlikely(!source_is_set)) {
+ *value = PyIter_Next(iter_obj);
+ if (unlikely(!*value)) {
+ return __Pyx_IterFinish();
+ }
+ (void)orig_length;
+ (void)ppos;
+ return 0;
+ }
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(PySet_GET_SIZE(iter_obj) != orig_length)) {
+ PyErr_SetString(
+ PyExc_RuntimeError,
+ "set changed size during iteration");
+ return -1;
+ }
+ {
+ Py_hash_t hash;
+ int ret = _PySet_NextEntry(iter_obj, ppos, value, &hash);
+ // CPython does not raise errors here, only if !isinstance(iter_obj, set/frozenset)
+ assert (ret != -1);
+ if (likely(ret)) {
+ Py_INCREF(*value);
+ return 1;
+ }
+ return 0;
+ }
+#endif
+}
+
+/////////////// py_set_discard_unhashable ///////////////
+
+static int __Pyx_PySet_DiscardUnhashable(PyObject *set, PyObject *key) {
+ PyObject *tmpkey;
+ int rv;
+
+ if (likely(!PySet_Check(key) || !PyErr_ExceptionMatches(PyExc_TypeError)))
+ return -1;
+ PyErr_Clear();
+ tmpkey = PyFrozenSet_New(key);
+ if (tmpkey == NULL)
+ return -1;
+ rv = PySet_Discard(set, tmpkey);
+ Py_DECREF(tmpkey);
+ return rv;
+}
+
+
+/////////////// py_set_discard.proto ///////////////
+
+static CYTHON_INLINE int __Pyx_PySet_Discard(PyObject *set, PyObject *key); /*proto*/
+
+/////////////// py_set_discard ///////////////
+//@requires: py_set_discard_unhashable
+
+static CYTHON_INLINE int __Pyx_PySet_Discard(PyObject *set, PyObject *key) {
+ int found = PySet_Discard(set, key);
+ // Convert *key* to frozenset if necessary
+ if (unlikely(found < 0)) {
+ found = __Pyx_PySet_DiscardUnhashable(set, key);
+ }
+ // note: returns -1 on error, 0 (not found) or 1 (found) otherwise => error check for -1 or < 0 works
+ return found;
+}
+
+
+/////////////// py_set_remove.proto ///////////////
+
+static CYTHON_INLINE int __Pyx_PySet_Remove(PyObject *set, PyObject *key); /*proto*/
+
+/////////////// py_set_remove ///////////////
+//@requires: py_set_discard_unhashable
+
+static int __Pyx_PySet_RemoveNotFound(PyObject *set, PyObject *key, int found) {
+ // Convert *key* to frozenset if necessary
+ if (unlikely(found < 0)) {
+ found = __Pyx_PySet_DiscardUnhashable(set, key);
+ }
+ if (likely(found == 0)) {
+ // Not found
+ PyObject *tup;
+ tup = PyTuple_Pack(1, key);
+ if (!tup)
+ return -1;
+ PyErr_SetObject(PyExc_KeyError, tup);
+ Py_DECREF(tup);
+ return -1;
+ }
+ // note: returns -1 on error, 0 (not found) or 1 (found) otherwise => error check for -1 or < 0 works
+ return found;
+}
+
+static CYTHON_INLINE int __Pyx_PySet_Remove(PyObject *set, PyObject *key) {
+ int found = PySet_Discard(set, key);
+ if (unlikely(found != 1)) {
+ // note: returns -1 on error, 0 (not found) or 1 (found) otherwise => error check for -1 or < 0 works
+ return __Pyx_PySet_RemoveNotFound(set, key, found);
+ }
+ return 0;
+}
+
+
/////////////// unicode_iter.proto ///////////////
static CYTHON_INLINE int __Pyx_init_unicode_iteration(
@@ -658,6 +818,7 @@ static PyObject* __Pyx_PyInt_{{op}}{{order}}(PyObject *op1, PyObject *op2, CYTHO
{{endif}}
}
// if size doesn't fit into a long or PY_LONG_LONG anymore, fall through to default
+ CYTHON_FALLTHROUGH;
{{endfor}}
{{endfor}}
@@ -842,6 +1003,7 @@ static PyObject* __Pyx_PyFloat_{{op}}{{order}}(PyObject *op1, PyObject *op2, dou
// check above. However, the number of digits that CPython uses for a given PyLong
// value is minimal, and together with the "(size-1) * SHIFT < 53" check above,
// this should make it safe.
+ CYTHON_FALLTHROUGH;
{{endfor}}
default:
#else
diff --git a/Cython/Utility/Overflow.c b/Cython/Utility/Overflow.c
index 92cca76..6ddcf3b 100644
--- a/Cython/Utility/Overflow.c
+++ b/Cython/Utility/Overflow.c
@@ -1,7 +1,7 @@
/*
These functions provide integer arithmetic with integer checking. They do not
actually raise an exception when an overflow is detected, but rather set a bit
-in the overflow parameter. (This parameter may be re-used accross several
+in the overflow parameter. (This parameter may be re-used across several
arithmetic operations, so should be or-ed rather than assigned to.)
The implementation is divided into two parts, the signed and unsigned basecases,
diff --git a/Cython/Utility/Profile.c b/Cython/Utility/Profile.c
index 4b8a351..c4463e8 100644
--- a/Cython/Utility/Profile.c
+++ b/Cython/Utility/Profile.c
@@ -1,4 +1,5 @@
/////////////// Profile.proto ///////////////
+//@requires: Exceptions.c::PyErrFetchRestore
//@substitute: naming
// Note that cPython ignores PyTrace_EXCEPTION,
@@ -110,7 +111,7 @@
static void __Pyx_call_return_trace_func(PyThreadState *tstate, PyFrameObject *frame, PyObject *result) {
PyObject *type, *value, *traceback;
- PyErr_Fetch(&type, &value, &traceback);
+ __Pyx_ErrFetchInState(tstate, &type, &value, &traceback);
tstate->tracing++;
tstate->use_tracing = 0;
if (CYTHON_TRACE && tstate->c_tracefunc)
@@ -120,7 +121,7 @@
CYTHON_FRAME_DEL(frame);
tstate->use_tracing = 1;
tstate->tracing--;
- PyErr_Restore(type, value, traceback);
+ __Pyx_ErrRestoreInState(tstate, type, value, traceback);
}
#ifdef WITH_THREAD
@@ -172,7 +173,7 @@
static int __Pyx_call_line_trace_func(PyThreadState *tstate, PyFrameObject *frame, int lineno) {
int ret;
PyObject *type, *value, *traceback;
- PyErr_Fetch(&type, &value, &traceback);
+ __Pyx_ErrFetchInState(tstate, &type, &value, &traceback);
__Pyx_PyFrame_SetLineNumber(frame, lineno);
tstate->tracing++;
tstate->use_tracing = 0;
@@ -180,7 +181,7 @@
tstate->use_tracing = 1;
tstate->tracing--;
if (likely(!ret)) {
- PyErr_Restore(type, value, traceback);
+ __Pyx_ErrRestoreInState(tstate, type, value, traceback);
} else {
Py_XDECREF(type);
Py_XDECREF(value);
@@ -266,7 +267,7 @@ static int __Pyx_TraceSetupAndCall(PyCodeObject** code,
retval = 1;
tstate->tracing++;
tstate->use_tracing = 0;
- PyErr_Fetch(&type, &value, &traceback);
+ __Pyx_ErrFetchInState(tstate, &type, &value, &traceback);
#if CYTHON_TRACE
if (tstate->c_tracefunc)
retval = tstate->c_tracefunc(tstate->c_traceobj, *frame, PyTrace_CALL, NULL) == 0;
@@ -277,7 +278,7 @@ static int __Pyx_TraceSetupAndCall(PyCodeObject** code,
(CYTHON_TRACE && tstate->c_tracefunc));
tstate->tracing--;
if (retval) {
- PyErr_Restore(type, value, traceback);
+ __Pyx_ErrRestoreInState(tstate, type, value, traceback);
return tstate->use_tracing && retval;
} else {
Py_XDECREF(type);
diff --git a/Cython/Utility/StringTools.c b/Cython/Utility/StringTools.c
index 3caa8e8..85b987a 100644
--- a/Cython/Utility/StringTools.c
+++ b/Cython/Utility/StringTools.c
@@ -40,7 +40,7 @@ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
return -1;
// initialise cached hash value
if (PyObject_Hash(*t->p) == -1)
- PyErr_Clear();
+ return -1;
++t;
}
return 0;
@@ -1136,3 +1136,27 @@ static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObj
Py_DECREF(s);
return result;
}
+
+
+//////////////////// PyUnicode_Unicode.proto ////////////////////
+
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Unicode(PyObject *obj);/*proto*/
+
+//////////////////// PyUnicode_Unicode ////////////////////
+
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Unicode(PyObject *obj) {
+ if (unlikely(obj == Py_None))
+ obj = PYUNICODE("None");
+ return __Pyx_NewRef(obj);
+}
+
+
+//////////////////// PyObject_Unicode.proto ////////////////////
+
+#if PY_MAJOR_VERSION >= 3
+#define __Pyx_PyObject_Unicode(obj) \
+ (likely(PyUnicode_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Str(obj))
+#else
+#define __Pyx_PyObject_Unicode(obj) \
+ (likely(PyUnicode_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Unicode(obj))
+#endif
diff --git a/Cython/Utility/TypeConversion.c b/Cython/Utility/TypeConversion.c
index a48320c..0807ace 100644
--- a/Cython/Utility/TypeConversion.c
+++ b/Cython/Utility/TypeConversion.c
@@ -645,7 +645,8 @@ static const char DIGIT_PAIRS_8[2*8*8+1] = {
};
static const char DIGITS_HEX[2*16+1] = {
- "0123456789abcdef0123456789ABCDEF"
+ "0123456789abcdef"
+ "0123456789ABCDEF"
};
@@ -686,43 +687,42 @@ static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value, Py_ssize_t wid
if (format_char == 'X') {
hex_digits += 16;
format_char = 'x';
- };
+ }
// surprise: even trivial sprintf() calls don't get optimised in gcc (4.8)
remaining = value; /* not using abs(value) to avoid overflow problems */
last_one_off = 0;
dpos = end;
- while (remaining != 0) {
+ do {
int digit_pos;
switch (format_char) {
case 'o':
digit_pos = abs((int)(remaining % (8*8)));
- remaining = remaining / (8*8);
+ remaining = ({{TYPE}}) (remaining / (8*8));
dpos -= 2;
*(uint16_t*)dpos = ((uint16_t*)DIGIT_PAIRS_8)[digit_pos]; /* copy 2 digits at a time */
last_one_off = (digit_pos < 8);
break;
case 'd':
digit_pos = abs((int)(remaining % (10*10)));
- remaining = remaining / (10*10);
+ remaining = ({{TYPE}}) (remaining / (10*10));
dpos -= 2;
*(uint16_t*)dpos = ((uint16_t*)DIGIT_PAIRS_10)[digit_pos]; /* copy 2 digits at a time */
last_one_off = (digit_pos < 10);
break;
case 'x':
*(--dpos) = hex_digits[abs((int)(remaining % 16))];
- remaining = remaining / 16;
+ remaining = ({{TYPE}}) (remaining / 16);
break;
default:
assert(0);
break;
}
- }
+ } while (unlikely(remaining != 0));
+
if (last_one_off) {
assert(*dpos == '0');
dpos++;
- } else if (unlikely(dpos == end)) {
- *(--dpos) = '0';
}
length = end - dpos;
ulength = length;
diff --git a/Demos/freeze/README.txt b/Demos/freeze/README.txt
index 65e4831..18e0452 100644
--- a/Demos/freeze/README.txt
+++ b/Demos/freeze/README.txt
@@ -17,7 +17,7 @@ DESCRIPTION
with one or more Cython modules built in. This allows one to create a single
executable from Cython code, without having to have separate shared objects
for each Cython module. A major advantage of this approach is that it allows
-debuging with gprof(1), which does not work with shared objects.
+debugging with gprof(1), which does not work with shared objects.
Unless ``-p`` is given, the first module's ``__name__`` is set to
``"__main__"`` and is imported on startup; if ``-p`` is given, a normal Python
diff --git a/Tools/cython-mode.el b/Tools/cython-mode.el
index 61d2cf0..e4be99f 100644
--- a/Tools/cython-mode.el
+++ b/Tools/cython-mode.el
@@ -1,5 +1,7 @@
;;; cython-mode.el --- Major mode for editing Cython files
+;; License: Apache-2.0
+
;;; Commentary:
;; This should work with python-mode.el as well as either the new
diff --git a/Tools/rules.bzl b/Tools/rules.bzl
index 6bc4d56..cd3eed5 100644
--- a/Tools/rules.bzl
+++ b/Tools/rules.bzl
@@ -48,7 +48,7 @@ def pyx_library(
["-s '%s=%s'" % x for x in cython_options])
# TODO(robertwb): It might be better to only generate the C files,
# letting cc_library (or similar) handle the rest, but there isn't yet
- # suport compiling Python C extensions from bazel.
+ # support compiling Python C extensions from bazel.
native.genrule(
name = name + "_cythonize",
srcs = pyx_srcs,
diff --git a/Tools/site_scons/site_tools/pyext.py b/Tools/site_scons/site_tools/pyext.py
index 3396957..003d2de 100644
--- a/Tools/site_scons/site_tools/pyext.py
+++ b/Tools/site_scons/site_tools/pyext.py
@@ -116,7 +116,7 @@ def pyext_coms(platform):
return pyext_cccom, pyext_cxxcom, pyext_linkcom
def set_basic_vars(env):
- # Set construction variables which are independant on whether we are using
+ # Set construction variables which are independent on whether we are using
# distutils or not.
env['PYEXTCPPPATH'] = SCons.Util.CLVar('$PYEXTINCPATH')
diff --git a/appveyor.yml b/appveyor.yml
index eae2aeb..15ab737 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -4,7 +4,7 @@ environment:
global:
# SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the
- # /E:ON and /V:ON options are not enabled in the batch script intepreter
+ # /E:ON and /V:ON options are not enabled in the batch script interpreter
# See: http://stackoverflow.com/a/13751649/163740
WITH_ENV: "cmd /E:ON /V:ON /C .\\appveyor\\run_with_env.cmd"
@@ -64,6 +64,7 @@ build_script:
test: off
test_script:
+ - "%PYTHON%\\Scripts\\pip.exe install -r test-requirements.txt"
- "set CFLAGS=/Od"
- "%WITH_ENV% %PYTHON%\\python.exe runtests.py -vv --no-cpp -j7"
diff --git a/docs/src/quickstart/htmlreport.png b/docs/src/quickstart/htmlreport.png
index 6cc30f9476dba26b496137b084152e7e74c5004a..cc30cec9fd2c812082521886aee7190d64b99009 100644
GIT binary patch
literal 22739
zcmc$`cT|(zw=Nn#yNG;JL_om;qEw|S5Rfh)J%H3G(yO!p2}MyrdhZaVLqd^W0}9eh
zqy(h*76=F-v{23)zrF81XYb#>+E5C}xA
z^g>Px1Ujn*0-YJZ^cQf&W!IPn_&DRDr6>a`?z^=L{5WSRttt%yl}1n*r6eZ}@h~A^p!wz_-pigQM>C$kuij)P7X-3C^-NN_YLTcn
zc=f8*2m9;o7jD6rreBK$TL!8$D8Dv_q6?=_gzK4LkVQxxqBvq<=xQ$j?OLH
zul7d8$APRG_MGfjepRNOhx1R<0+Oc>Pu)*3%c=EylIb+i?D7#41fg<7a&KL|$or)5
zr27;v*p5B**ljwpJSE?kd643}9ds&&7@(E8bl2r7O7R6pKI`32k>(3skWIFeSt(k9
z(J$~%f6K&QheSjwYk+~rx-b35W9M-9vuPm_f6H_T_~EZ>T}CNhx@*QHO}bP7-1+o(
zSt#%_1h^w}NnZhYnTrVppFY7z#hUp8Wp>0}1YVEMKtdRGwwLwd(|xxWz3h{S{P}SGSssCDq`Hv%#?i^<$;N3EgK{IOX!^L%{e*|R
z@B&b45Vp}kI&LNx_xx#2Fviz3%Dk&urV*SNkd*HzZg>&}&Ez6sGPLT}ahv}!B2w(Tg!{%Cs
zTuXxSG|-z8dsVQs)w`6nWObkK^kd1W?{gwfzjP0?#3FU#PYod#jk=3z%EYJNuc=qY
zMBzCQLmVQD0gLY^YBYC#+WQ1=Rf!G}*$JpU!ZcZS9!8iIto`)2K*+zh`pXvFT?uHl
z4hfZf@6a?%`B{8b>Wk>1w;a+ovikNqN(=^US~twO){=7{LA6;I#KV||S8FVH2QdKeo>FXb+vwS8hGK377B5i2Yg%q{};7H2wM#^#`W_u}yHh{S=
zAb(u%tZ!IWvJgUZZe4KUW6jQ1jC(4Il2l6x_R6o*VXm~7(~uqf9Bwj{s8Dwpv%$Bi
z?iL}U%3^U=Yv*^xybeEN+QhQvgUbJGQLzwR%+ek{i0b$##@GG?tzvox1#T?FGqf><
zK=SY07724DaX#7!T)l!uR1`S8x^@Z0IJY^TeDR|l%^z0?YFM}AkmN;QW9IivsK!{M
zOlG-H=xUU08h@W((}_l@wMHy{u;u&=Dau{fJSkE38mekFhFE_;5*?GPTzA*2NR^1c
zxc#D9e^tF=IM=%c|6UF(G3$X1VI+^U{^!x3G9K6kJdKt^=-Lw|Jw6|X=}}0CAJ>*H
zdk||N%*OWk?WVocoTS(6Y4rK~hNxFMy5+XPcMPe$FGb+a78Hrn-R8D=fC!~Q)nzCP
z-9brN)5uoF1Rs0~ZFiZTAH%q)&3`9=kNpx3Vebpmj4gN_0;K=q~4
zq>eMh^7P}gK$GV=5yB7hQqQ3F_Iyvv-KUQ`Qyd#fZ7jaz-?M8pLfg-8}e$>UXhyIOzkUkO4JUU{Tsfm@(;b0Tzr@x(bg3|aAMGy4i9-)vDdQ)D+B0Z=#
z0eCs8)%U3VbcnX#4lcXQ?jFmbLPyY#7s!sX&$_EZ%SbK4#%O>&MH;rd&}=^Y
zZywlZ-N!z*)=2J!$;HzP=5Sgbe58?-BY{A<9~&NYbC)+r3YLoczmy?StVe18%Mm`N
ze}5iA1A3kO1s>CdLn9A({3SY!2+>g&a`deyHdpAk=h>QwwoSWD$_vs}HlCEsa(c3N
zx}bita5~iSvFWJ0se&5>dR_AG1q3X-9O~*2T$(;gIaQ2~tN!8D?%YsI5`QWn<#tnBVuHd&zH#zTl2Yl>Mt-1~gLQGOL7#JgZrJe5=P)`pIhg
zH7&69u#lusS%PIccsh%4e2^}+cYyVk6j!2s6sx24ou@73YS^BH=ZVt7_c{BLCj9Xh
z+iB_N$;oqMa$J@v39BXWHa3HS
z)1Q9$r0MEjdR~=v&+Vs%0nsQ!vKvQg7@6gq>
zhhgmf=z*3F0;{vCsD0msqo($nPQB+nN6#U7n8#zTR9ymrcE7LYBN@e^+vui4^!D@O
z_1vY;N89|TWfi|SPmfO-8I#-QUz~q*w4KnD8kaDdg$E)qCifhUj={DGfndVMyqKSt
zOfBUfryCR?dfxLI5tlVD!-^XjPS^0G8+#o0G(l2sff-dwh
z+pAe=C6LCfE2PBhcah_9u|(ureZjMPjgQ`jZ9}zJxcB#y3AJ-9
zHI5Bi)s!$=vXo5Nkq8pvtN&fk5{vnTObR$Y9BD$vk@sscl9uR~IN}k?eB4AGN0&rb
z_6Xd9*u+>4k1eh`TjN;l9P(K~q06y4o1~HH$cyQxN6V_c*SA#7{q=*g4a$2M&{Hje
zB_Z0p_V
zM+F3%%UrCEx+&iluHMsoF2$guaO&el8i8lTgX+^={vSm6y}0sTZ!<<8cm2>dQYe5%
zn=!eA8YpBeP$r#dbkX(}!;jrE9T#H8BnPIPO3`{4^ct{gc!he5actYxTrWTlEQu
zQDWQ+lyJAvR=a?jE@_wc8OKTCK5hAmdVs!=uxh92fHp}?Dt>Fbn}0mhAy)X=DJ@eO
zP0q{&U?z_zPHbIW{8)I@|j)gYjPN2=IjvHq1(Rfae_}TRnN57ll%VdF+dE(O
zL&D_MAsX2*aC00jFcT8n>Z{ejo)evuV9)R-63^kcXx0}e>i#@t_nwI|GwUE=#FH=Z
zCB$$W#*NFii_wB3+NM`PuMi%1QWnQ{y5FvMV$Qup<&1K~(x28eoNu!nlMsOp=@lDO
zYMCWZpEh1^oOE#a@i_s^ia5V+i|A{((n!pK?hhn12t2w=L@uNi+DFbrD9Sv|$?P1f&A&b+S^jOuT=0Z8GBK@Ucoz0;}nbGOaxQiAr
zJ%KsTzuSdqrTA2O=Kam{@j6UahhVt*zreG@%M-Tfm)uE*q5#urbEj+1Qyv8Scs)pJ869PEy!Ra
zy0?gr5cTA^ws~(-=xBeV1a_fa%15y;kWJ3>P6*|fwwiV1($ff0|7?%FVN8AL
z28icFD;jC^BWx#@W)4C{mb)rlJUNP!2!X83dq#6Ne&CL>Y(bYaKA;iMtT)U4u4m$@
z!@MVMV6!ky&8vP%-z^C`m*Tve5BDntf9Hcz1291;ajL6sjVou%4USZJYm#7gm(^t@bEWslJY2OWve^omZy0R{o|!C1eikS_!hX``ht
z5=HHOe}Uc5-L5WC#<@nXs6
z77VekezWPc@rZ7gO&G|CzDM)!W^t4vpN*%c0?g!x`g*6!48YJ}d0C
z_ojXUhu=sW$MAc1G?xFPbJ|q9f0MglAWK+>OKkVfw5H4S
zp3u`4jrQ4HvHY{!6~w?>qM4n#8_VqZnEd|6#*zy_uXCTH@daq8>*(OH=wkQGog}MV
zcfpHVVBx-uvkKCO8~L@H2yEb`@L8h+HA(LuyDMO*G-K-^BNyUzex1K(;vP>?WnJhvqKZ2{Hls<9{M4)Zg`JQbVYu&=!=rQavDt_Vldm^DtM>?A$xl?$=X
zs!D)k)|>`rtz7>>rQrSM}}dYy8`Mwt)f&MJpW;0Y9v25n^
zjF(MV(;e`oi0dihm-AjlxmB-w6Ffcz%!Qr=0zUottO2A1)=Sju4oAbP#2irV=c`TR
znO7e-=c-i#_F_`f5I1jaxLAzEq}WM*{v5BHQr$Nq$D$_^98!iW`{j9ocl1{kGVsPu{5-u=|K#g~bRT_|Sj6288r$d<6>mn!h!C;od_=
zdBei3wIh*ScA;7RPJQ&OR$7sU+{l($(cymfL6b51bA9ZU=kt6FTgrP+aN%p15_&>e
zU68?azpj$kEgUSxFie!B$aGDE(O;}F%+;&;_r9~XplyCizIY|C9by%=vqEhhR#Ux|
zyz@|K)Fg&^>yj?afbBKlr_cIx;P3S1?%IUAC|w)m0#JnKDrEDU(A3KY`ao+2;CK85
zl>40l!yqzkc}oRF303(|7-S8>aDrxnIM=5EX!<7cM)&_HU=8HMBUp<%?wYnU0&Sc5
z{RQ40ej5sWec$pCe_I^pMM2SPG$_XHS{%M@Z@U@O9=^HgF?$GigN^fjW3Tb9GJyJi
z=i|A7^aM2Bc@5Ow#W~gF^!lih=$g284eprTlf;+Oq@g%hT+vmdcebDHF6#;NQSkR=
zlm(7Fgcj35Tc55-fm~~Fp_h+tdCR9-yoHBfJixcK$SmO=5V;2
zms4w{K9fBGtj-O7r2H_BWpmAQjcj?yXVO
zfxJ5GwiCIipQ7`&+0>jYHghD)YK*WuiL;PDg??^IfdPO`A!=F>=vE>hZcu>(Zg#Rp
z#bGK{db88CH8TK>te{(q&5t!65{)Gu&S^#fIWx1Kl>>S#M$|nWj;$k4FW1a14k}R6sJ5
zO-sjvfx8_BI`gSKRo6^JacW^;j0&ogaSPl>EBzwXu{b}t
zuLO>H@o8Sj6#%ti>p();RjS6fCezaP?jF3TJlfsRs6^pTQ|TT!erS~HDh_t2Fi*wH
zr*veFG;Vw3NPX945gH12q;j4NE%C=ICz&kxc`!(4kQm}%6`I1dgdTOhW
zA`1EyC%u#_l_8z2EOC{5_DSi3o6I}gBcfRle!fs0{zlKW&W6V`6x|L?qXYp
z!FJd!C%oA+5cvVbeiQ$TE~f>qPS&n?QJ>@;9Q#I{6Ir
zy0q42+yu2qG9L=B2I{OsuSDl=XFYjgZKNWcR1s4hyOfMU~p-;n5mvBG?
zpl??;5Fb>+o^5g8;09f6J`5oW`x5=N$myw~g@sK-G0oO|cqk!4>iK?MtIz?|P5N{rNRtvu;o0Iqcl4PmKEn`RG*rTO&6
z;TTbm%9ktelIxw5pgB)}o?%@I6v*9Ou-U-I?LbZ|*PLh~#F#3h-aKgvaAsI8CFUG_
zTepwsr8e$g8(4|5U*Fuq_O1UK`J39SucH>(m-K95?F|~=In&NRu|#u+Ede7H#JH>X
zB%{T8ql*EPUFDQ$c|3$P>OHQFMs}$YpdKDWo%vx`1HG)pxq259zQKev*+$l?f+<3|=?eE1BPrP>emSd^I%tW_
zgj(`D>aJ7oFrPrSsuxvTzD34qsOxU7KVirZ^Mj+K*|Kr*Yd>V?KZ^{jm;Q9F>vkRY
zNVr`DqrKfdN_wo;d6#*PRd}!65rwX|Sg!($jsHxy@Jh+BnKvO0iyhuaV;vOT>cr
zuxvD?$-o`OSy?$GPWgRTp^}bl;$otx*|tabCWRzJ8^a7TtICOqp{i3hYh4Nhvm!f^
z5%aJ|7hk37K3kf~6PsEaCF~s1rYAIfw-O(fV@ooQNyBP_o!G;=d}O*;K{?}X_3KuZ
zVewI
zXmJg@YhB}+5NuUcxK^a1logPxmcu(n?UrF{wjvpF7&oIt6?KOUa
z6|5E*HCL!DuEoL3>VLZ9?8Y2U^Pv5DqprNYwFJlMYiovL-jk&VsB-O-K;D+OjJA9bA)>ws7+e_=$52`ge
zCqp(&Fz#?;fJ=*4(#PjmOk}yXsAa=j-Y_DpZ>2jMyP$bpL+wf#S6??w9C|t2Ue1qm
z{i2hyZL@S`fOq_b4a(o`Ce7&EiD3NmhKaK7aNhGevY4Z>UTBTCxW2t6i^9c6FO&>s
ztR~HC>8V|4%Gz$DGV(=7A895&*(L|`3rq@u)tS?!5HnXGC1~TxFU;f8ipJK$TwQ%e
zEeC|$<-{LXS<~bDbjNJ$C9`Uauevup7&Djd?}%jm!CrEQ!#l99j2s{T#~2TeEVl1R
zbQ#)=IlB0osSbP=60=D(4EMO&u8C3h+?2vB5r2*0&0){BCBml`--j<&fNyiH$!OMM
z-?105zy^&(5d$i%ZD;wJnFh|PFLyk^7H3L@r7R22pGDizGjbqaR@>co)Pc{47G71=
z8fxe+=Joeb9Bp3exK=D0{yi^qSWw+9G)0_Sz2pj}erO`DkgcDO;lN=~!GPv#T=wMALj$A_
z;n;ezqO!c`*jmP5=yqfpvQuiujN3Z*SB-U3?{VkI3v@47eU&-=i5EG+?(GNYVU@8c
zGC)v=C2lE-%j#*wfFG~Q%D@y>++!wO6esBsO1GSQn)`=?`1f4sw~JHI+sVY^Kad;$;XP
zR}b2(D{~13?)pr>H?(iwNARLY>Y@{<$5j;u+Y)RlKJM<}Nu!R$BnBH*S7BS0*i#Z*
z&5)}|L0+HJycC-e=e;7zetEQ58$}Ryuc|C^NEXE&UMFbjx*c&q%e2E+NOFThLutp>
zQ;?PE(FUoo#P0%R$SKAcF(*Q(u9Z4SZaiLnBAM{!upG|RUsEaaq{_P`pZL@#VZPc9
zAqE-t&vZ_heKE#NkR~pTs^w%tH@uJ$YmkVjN<_H$yw!x*=F1(Pe0zFEbM!p#_(Ov*
zk#*+eCF9+HOYicPxw(e!`o*!)>)#9u^fsSz1S`j#2-$lTD2Q8WgDaQkcIdI?!-ARw
z_*Ae#_!XIt+xe$a%f~NH*vsO)S=7cf45|0?Q1>_WWCr(zU?PO5$n;E+p)>nM<}F{%
zha;>KI;VPd^=Xm~?Kz~PyUHA25Soh#8xdS~>oP8u^@oF@_<}Di1IGn+56MMU!xQOV
zA;{xZL7>CaC_QjzR)k51ZCm%~Orp0!y`9OZyp|p{yoBfxWUMUMnLA${iWolY!0vo1oRZ5(CL7|g?7nPd9O!T6Di
zlKVg$?+f%B*~uiUmdv=C<)V7KX|0Zkw#Xd?$DvcNN`k;>S5=B@-p4{{_|ctUz89U5
zlV(Epw{9m&mTlKC=&QpAis63tleDl*@#uNoJQk+tw<^`)bq-ZR=QFgLq*Ym{$M>fa
zGk?~y%EsRc@}zoGLQsfMx3nw|AXA6kpQ^F@+SqLtXfePwZeN5`+kWzph8=ai#BNP}
zDnHUazo2~b*jmmWk7|!7ZxkC~)gjY~WrfJ3v0`jk(!0x{IF$e};jNhKF?rgFwH&uv
zYh^t8y@O($+?4Psf!kk(73SkM&gM7}cf6>dz6l
z=Qi@|V~J-~g6U`P$aoIM7s;jd=W%ZU`OQG@(5-6BP<;q%M)*d=)u_lfXbmTRRI4uO
zpBlGwYk0t{qqsLE6vtcswm{NX0%nfIn7g7VO4w6A;T#wF&zzwy=U$zo%*~#wQ#elQ
z)5NKuj}#%zHF9!0rSX}0wTS(rRCdY}|3}m_pql@;n%$Qg@7;Nvvbp!Mw;{mQK*T(K
zl1WXYiFxW|)!Kt#*8`BT#7p$x!mc=_H{YVI(sA|PRgEWp-i7hHTXQ?lNqm1~!LK|4
z`6ds*ram7u)VY!}HE_6f&fvx~cgLEA=9t~$wij>K_c`8l`?c3=x9t8FU9%Xkd)6Yw
zr+vwD_E3B&prGq;(rP?Cwbj4cUfA*ViJEQx{f6Hg2K*)?wmRnZv*LAzn^0&<`;v5>
z{Yzs~X3>S|*|2KwWf88+0CP0DkdNF(HoM%K6A$8FWH1yB@4)A%c6Mb`>!C
zW+UVY96WI{Df}abm7yBot*_1x>y>AmW09#b;C#>v_
zim%lac;zK02L~Co-PR~(a&M%@<$NvL)ud>pGl{MR$g-P?Sxyg4q0zlPeq&Qo`1`U+
zZjUlEjq0-d&1yBfV#myFGcEWF;4RnJxWWZocyesl);1rYAhVp7rTE(RjAJ>Hb1xh)
zfag2|F2wy!!A_GxjyEt0Px&yGBxODA6M8yb@$%?iY+r>aNeGjyBX53yzl%yJ`5YWx
z1m6PpK0JUVrRS);*f>(YkZ+O)9aiosn!V?(kezoOpx57KZ|;{~_4Q{wRTAbdu(9i;
zFF2jzv&7-ehI2j#JFhZl!aeIX%SY9SU13Gxx#J&VUl)jtPHHWB23^0Fg6qEacBjxp
zJ&_^?*Ek3G@U+zG*y!8H3)tsbY)z!hFO>cyW~L)_>e8*0HU(@&@adyPYtaR_aH_ET
zRE?Xt<*JHlRW=271OACdc=ezbYitO7B-^$hu}wdTT9(XtlmE;}o&>o#(7a9fxSPdp
zopEO|Xb2@4%Ag3?;HGpro(Vi~F09|*qW@<2;(C>SA(y6ngu&-sZnJTDZgagHvsMKial)K%LCB51+7!Od+
z%(wr4RCOWL)P10OP~v6XnJfQj_M>ml7lZz2`-x!SVjF78p@L;BR6X2ySU&XjCbIWYPwIL#UC`6hVhdetRyH=H2R+k0rm6v3U0)qY}F;Q2tX
zW%xR4*Kv{Crqei4-0O{F_>}^T%
znthGy?hLHcA!U9b6v2gaY`HcSkg;G3l1w1Q5ntT^hFQUzXTINIjW4EP*BZ~LDR?Gk
z#6G(TZyosCALTdt`ynb}GGDm8$YDl7SWrdVcHR)Jieju!_PHfW>-*P8wbV;ec9`MT
zYcYA31=&dyeAcYM=GbATElfN!rPk9|nKpqt{WB$JrP5M-ssL#u)N5QWHx=4@KAPL!
z7bl4So5NcwNr~&sn87+1hHB60%qR@aE{k_`E4L@UQl1>xKl@p)K<3mbxXAkpt73bu
zS6`gvW|9g7TX~0>3?~#4TU^aD_?k39sXJM^99CzU6#Zp
zYTGUIXR&Zb194AURP>{JEM>xk@SxRje;6uU&u3
zBu(@v`NNA!zXBarBURv-{2KfP&i+|je0PiDT@cFBBq;IP9|Hj2-v1}q`j>tDmlr7=
zr^T$NYJ|)vC{KZnlp2GsR(bh9ZR^(rT=ysDr
z)}xIvK{WP7W4(80ypdlBAq&@)d+RfvpKjby^p=~=Sd}a1@$2P=S*zUk)`Pk|mma6h
zrC!ADU0@)sfG+H4V0Jhi8JtJm6HGi;z_O$7o7mY7@r8CJfp}7(?raXD=v#>T{`XvI
z($#ufgeIoB+1KcrIv|VOsydrYi=SpV*4dl_FM8;bHYPFT=Lvvkgnqwb
z^!+AIg=!o6ynKPtXYm8Zmv^3mk--R4jJH*nJBzlnEh6e^D!1O2zyAF#)B~iq8n#Xe
zInIIF`xwUSYvD$?nEOJ#ADm}6BqPwFP&gH%^M%tGj7}AS6m<49Ll7ZG7%xR7aR8D@W
zC%RFuzLq~?C}+qdg}@+c1TUX|R_}qYpK61RgyJ9Z
zA>n-aCUzc~PJP#?{qY>-?@c3*9d3zY@tqp&fjl86W-H}0@jr^1^@D9&cUjZy2&S4j
z>vlH;;_2DNZJYJ9>Xi*6$Ro<#MClS|)N(pIHMY79wnO|q+-8RaPY3kkFYh@7&rf2K
zJdt0p^b{Ax0Aczw6Moo*G%c1WLRqeQoAYTT{OZ6B#p&>(Y*9_DRDH8Ml_F`XLB&7M
zLinA9d&{x*4W0!wTawOVBD917e_2z6ZDna>`1OCKoaj-PQ
zv>eZ2XhyxD{%QLlW}_tBuJT57ywJfE%gUBlWc14A;cYJ!mZ%k2rmyqE*hh!08K~u{
zJol1&2*9kel$dhR7>+p_u(j5~SX``O#;G`t!Sbg%M9-V};}`i7gd$5=
zN#}mX=x4$gpRr`Z!x%{AR`yG~cADM9qN#8#%uM*eyviTa-$*GH|CT8NW~fJN3-x-U
zo3OqhQ&2K{%r}e=FsL!9Q?YiIRULVvXKKufU$ahmQVz%G+9#@MJ2p{?zI*Ey(!=v2
zB{_-sSDwR6K+4xi(Rb!heI>SliEq>)c%i5FRVEAZuFlFahE@Hlzhp4ZIBjy5vY3YW
zWBayAp&f_9#e*3(PW9-jWy7M|Iq+GfoUV_LxTGsmYG4YP3h!BJK%k#&0Nt2}s6%6^
zJ6k)9(uEG(Jb&hNC#Hs^_Xz4i<4C(k#_;)Sbq3O4v&K|UtM)X>T~kzD-f=`_qf|lL
zwehHw(cQ-Ii3r)8+mMXm>QC9-A)nf1C~16KdCR8c88`Xh_*Gk5Ji?hOM6KJVT!(6R
z{nOw>xs?5F63i04CNex;G~Jv&Q{yIU^=MR@JnF8a!!csE!_FmF7PfuxH&Qbb3UBKx
zp=Cxo#=-1nml~mQy_XbHTqUQ+Ztm;>Vf~H?_i^+t@+*&0;=mbz#84!0Iv~`~psHV9
zQ1w$qcHUA=)Qf+-2yCRKYo=VM!17}RfHwco%kuXB{vvU9_Mnm&7_l*33mc{RCh3JA
zF3JloQAJ9rfcXH$z72qsS2!XxQTO0x=85uGfA3MneVd7bJ6RHQZwt;!cH@02xbw7F
zeX6C7y-UEp&bv88!+W0Csie@}i6&waC-Plw%u#MvMvZUp`i5FHq3fMeuZryxfCruh
z1_lf%Il8jgA<)GyL3ep#J=urXzL;m>VhP*_Jc&VcEfx+;)~^wYGLEKtOW+3~Y*b+P
zUon=OpVh!#_`ZXg^c&C4(O}u?-Q>Yf%2U7|t4G&jyg(pMKY;T*K_%Q^*Ij1+3IXeD
zr=OJ&+t|Chu^#)ap#9_`O=h~-rRn4wjQEGkE=?tWGe;$=;tAtWtD15bDO}`nmrUAk
zTe=k0#Jeszov}%LLQVb|1y?Jp-jRP*XDat}ZV~*I#O@~N*>Xj6E+6d*l_vD0lagfw
zT?|BguJu&L^tlxjl-VvXM~og$hDjmXD^sg1y4dJw=&nQ1&F9R9*d`l`fB)h&_E
zZ{W(yC7-liNQPkhsGse+`b%9hXr--oim_K}I0)0i^q@oWwbDv!qJKwNY1Zb#Gqmom
z*tp48#t|;-8;go2pbfQa6>MK?8ceQT3TYVQG>YWslka;
zuv~QVExw%9T+7~;IeoBsx`nLXAgOAPGo<^3CD=;^_kl$37RvGhDkN188kArRm{R0&z
zUJ8ZoyeLbb$TM^lwdQh7L2Ce`fE20Jwhl(cw3RN(E64*MW@x;kDjV
z=?PZ=f(I;GUIm;kOEP}Ts@m-aqwTaqx$?&1gDV}>M!?PYvJ_?wEYv%8sVwrwF1x#F
zQt0eIm$U!uw)^G^B-HnRzV@&JMg5}_t^>^Cf4azv+Ec$wIe*f@^N02W$J%5rC3*jW
z`#)Rx$|Hb2m7=Q2`3Hv$xTX06R7gaD<4h9SmB~#Z{+eNe5;(=D(1zPnb5O3^d7h^miw=|*BV02le-YTx{rqS5%
zxrgIx{jae$Z_CFUJZjnu1)Q6l{glsFA1uHGx3?$0MDW!|sHv0K2fr-uNinxyGDrI-n_r(Sp=WCuf1gqYN>T*
zfbMq^c%g|!HXR$SID8cQpJ_Tl&=zA;xgAatXE8LTGak>>S+mSMdm2X=i*2f$EV$4c
z4r9^DXC1#*V{Eja)@ICH3S~LYa&_u#&tel*8sa>v3z8CsF99U5XXTafmMTNCmS+0b
zX%lVRHANX@8WFhe*s!YZ
z^K)7p=nVJ=;8T(iY|}70na#GwmE^msmv9f!54Yljz`2iqLF4)Qq{`rP_s0Vr(AUf1
z^^8Q8nyzoDJ~_7Na@+b|!`YkWhL*;eu6-4;F&u~|x|TL366L};J|D~Fv7jp9kh8mM
z_hfI=`12gPWMKWop>tqq-OMC|sKlRvE9c!78ML_N0=hzlEJX9H^AFBxM7vQ_`j0qhfv~N)b2_x$yZP&f8D#-(qx~MMqsd
zvStlBD77b^d22(rBBS2Ox?08JKKmOPQ*~gI+jjb>Yrcu(+LF+gyGSX*9HErrrj#7a
zVAo}A*RcbI&47-BF94pLkbRKvFUwuv3AiyQ&PldfN2ABbK$cz3$yJa
z3EE)M=!0oDA9}#&iyu;E^1b+0$DcvPUshi#2z2&~7okSHyzBnLk>d!t&O{+SOb-D#
zG{imbCZNIyeHyqvviuH}fc$xl#B}(e*44+VVy|b(qkfDn`6C*+dY5vpj4llDG)=Z{
zkDaX|?#`)%+n5bYy~Zg~*Jzf?C2tQnw~OG2Z-s8{CnqKM&@U=QC)d5k2-OR1a59?
z)%Cw^nu4AuR&V7L4@cJCaScyPRht)B@2je|4O5c@_EFTQ8
zeA&&S9jlmFX7|(dS65>nv|Ea~!RMn>(_S~#urk3n?Rb-|+LrVryy(RBGgR2F$;ZAo
z=4wiWyq>MO2-1oUdUBoQ<7F?^m1G9UX%6=rI(br^6rXm_dSbxLsoJD`n>|G?cvr9`)GDON=G!sgi&
z_kom@ZmdywwY4(k{4WC#S!XDt2pkq913>+pa)kG984JpMfcFnY1`)sgHwoqs<-Joi
zIvSnd^L5JP7eo2{iGuIqD_1*!#3pWX4%QB0S|h3=yxqys#!o?v4(K8q@J$&
z$#U!{ZOXD(yMtv@PlP^@Qr>H~`~j5`0dKv-X;+9kcXCIn{=lA%=-y(~b}gxv&!+|_
z#gck5QwP}JqPz5eN4NF(0%W%A@`Q?
zeS<$6&DCfCn*nncsf=T!i`lq4k@Gj&nJT
zBc1I$>ty1DrEo0aQMz`-N2myGNfC;9y)YbNlV|uIYBIe7DxWx
z79R+0{i*G`ba8O!rAZ=s5Vf4bRZ)@&f71lcqZQrQ6IqCCz$eMLMKVSVA3E)-zdu|%NKYQ+8XKI$2C%pwMXJO
z;@Rv=^aX8oF&cb7aG#7wnU8?2w&{Zyj|wMNiw=uPx$WU7s_^?KwNv5!^+$yO+XZ0Z
zJ&;J8%Hdn&-_6G3*`}p9NQVw~00r+Aa2G};OjQsxFdrPR*y(}2*K{Ez(@*xEGf$ir
znjD=-IL!P7`lj?6V2(rW0-2BM<;Zh;_(en2^$QOB`Gp3MJ^PqH$*8V+$)dQVWWr&n
z5Lsukl*=E)UQ3*-hTxM3aOX=1T@m9h(v9Cx3(^Ys_}09y1F9i(+(Yj
zj{x>nS{EEq9NUNCD9uQzdy5Khh{E=FK#w8w)>NBIp$
z$2N6>rYiELPc5lb{r*y5Bfscct5G-3{~VuGy=IM#$=6=tl5?z$DNktvmk0e$x|${{
z!O?6Cm81CaKbir}7b?xu#!QOLaq%ArPvAV1kq3U(T=|n1fN!Axq+%9FB;@_Y@Op=q2!2zF<
zdqSj#=!VJ0@3loQ_z?}46vrNI#sep~6PPnU!1f(aj
z#**q!n!WWHa1{dx7Atr@ltT!GWQXew!vv$AH3-$#T3pGrbDV7VX~W_d1rpZu1fCfz
zjk`PkntUd<{3}{JN!X+Via1nTRnqE|rR3?zvlQW}Mjx|JZPHIY!LGN(S|e!&!AEyEOe1-uzgi{KlLEn48X-;hGs@OD@^D7spv`=sr*Wo2~n|J8m-k
z-Z+cEsED01)`t*&yJg=y6&TM;qwau@?r4C^_CD_oZNk~aRb$F3y#NRDb=s5u^FK!-
zmHxP>yKrsu=0Hd@d6{An|Nr8){~LVf1+M<*baXX9Wy~W^n=!q8@h{^h({T993F73<
zqN|$bUdgq4Sk~j!rm~3TZilxtVx9r}mi|yyAt4}?e)E*%F7jH6-8}@r4
zgEw_5@bOZylWnmRqqRZbNV^%vdhM#Fq|~}Ynp1o~+p%C;$uHXPH+(n>k9|+mjB%e<
zrw)%~LJB2d?WA}0B7v`mnO)O*YoQG3%u?6Z2;Ms1?H2=NXxlhnpIn~N#0I9h*V#QN
zu{L!S!vg!o-#-JU-7}G>1fy-3ayc2lq$N6Xi^}
zAJ@Th#KvlUo$~(4LDuP)?(xUEmJ;i;FvK+d2CPqY@wrH@#^;N(CvAqWA&Oy25Y}hK
zb}8c~k~)@1nhkwTNj4O4>H}CVqb$V$dFDX$)p;(x?a#Wnl$$yDMUF1#1_tw`kq~eJ
z5?3z;p+oSewDU?#I2<(?-$@P&xu$&WnZz}g4Uy88^XFeO_jg`kPZ{cF=VuFV0M507
zj3_JT?Vd8OBy1wWEt6sNp^{3
zY%ylW?ma)|r||`}w^8zyJ9hGrx1@x~|`K&h`C%&$V|ZI}VVQrvdm3%i4y
zozFqxulZWakhPt*!CVuncl5=QCkr!t*`WlPyonl{khWM<+KW
zkUzEv!?g6vi&;C6qg@c76eLz3W&ZKHu~cbvli5h4(Y@6hHYZV{%@@hkhW@Cx1!>!u
zu5T^EYgYUfx`J+h%7e1S-4&WDkCOZL4}Y_ss3zZpv5*TG)BLx|uI$a4Z7meahM8jOaxzZoaVTOaT
zULopU=;afRjeOx0=Fjg$+V$s93yEx8z;`R9U}gZdo*qmf&q`-5Yu12+0n_nkz%#7{
z*2(H(7e2xTjtDCmHm!ki)krWd()Al%6DZ8*;4!DIC{+2BmJH?tiFNt4vYlF6Kp8OE
zvH=^|{=Pd#2Y;&Wp@vSe9OWR&p#T3K-1s(5ASHzx9nRdo`v?pd9}sgrD2zvi1X9%^
z(Wc2UQsowhLr6DezYfIkL-)~oAwyju_w^xc9&da|6P97Whm;mixjA%ce%_!OJ-Cr!
z=lZz-XT?=B~B
zfj&q(gnZ4K;+x;BQ_+7YFGdCfBb~Uf`Jgvj?(>I|UhKxll!>*_8^p2v&Nx*Nhau2=
zfG48h(Xcz7@K00fkQsXC=kI8
zjV9EEpqe`nkDFG7vPi%b-~|2Kzvo3Sob8ZJ|D|){J6IT#AQVc91n4Np6as#{JJ6ks
ze&mmc0k5heUALv5
zJk#D9kYGYL0$)oT!L3B?b4-PNvdcn~6bQEPK`bcwG~`%J7I$pys7ftqFp#v&qnHfhQV~RtWj1A6Dyda_yk|gH!&{n5qc*^k)q{;1Q$UFBAreC*B^D{J
zvtPrjs;0}_;`!Q^i@A<+&4e5$_mhO3DkISdp>Fo=&|CV?O``g6x?^72f&E4KlDrF#
znd+xNMhK!DT#+R4dT)y;Nl5K_y3D@+{_mt$5v9ZB=K^BQRX$1L#ck6P^^OGw((g8W
zKaXB-=V|H45LRc-%iftVEnP|EV7ko#US(7Ne=s~U3iyNnHdA`tFo
z2xGFP%xD{*V}r3NwdnozA|UW4xbQgZV21{R<69C(D&eN>r=!1u+5{-TWplD`W~chy
zmJw$CQo6-4ZMwQ|oAkVbcX?;+y412kVtkm>Jl|DUGt22@Yi_!_!jt-C5w@(@51Nft
zy#}-n$zjl+hl%M5wERm1oK>S(i1_=KS;p*5^Rqr^oqXMD@`W8IL;bi#R@s;9u>O!(
z_}d3;YMjt9B4``r5L+O!J*++{zG$~|S*y({G*Iir7BZ-gR*2uru3l@)k*V{fuW5)n
zWUYg;0mYeR$HQ=08x4{$YfQBpntYx07h~ORwGgXGzD6b~q@;}`soE!~J
za4nBXH+4#eMS*9spz(R*Ze2Cwyo?5&z9ZcyrD5RnI4A)N{`P2^B9wLWPMED1j6&dPDPse*=I&T>
z;N-fpMybuL@wuz^f$u0UBERn7g(=tamznE)8u{{nNiD)Iica|{AER~=G9-*{y0Tr$
z_~dB6*N<{fVXLI-n@RV?uHH+dJlBbC`y_I!Wp{Ep-79NkzlBU$X0NmN%>xXQ+i1;T
z3w6rr~Itj!Qxd?n@t%bteGXTvJr7{~e#T8{@}F7pwWi2MPCj^$Sjt6$>MlrH*+7ppn?
zONi<~@dN?3sC+&%oej9Or0hz`iN!JCLo>%cjD({*wZz
z*dtEQ68DJb-;Mm}V5vb+b5Jl13`}zaKh{iV-TpFGPJeiZe+qNtoW?Mv298z5w+$Xl*d&Hg32p
zhi42wT!JNJBCaXDa3b0aC8^+)l9KrF>iKN(pTB{()KG#vkk%AJeS
z+^BY}@%FRZmepD{+HOT|4L6=AS1c}=(R@>HVVY~?{X&wW)8cx9rw)~__z<#hkF0*^2c&gK#Tn`OI>+0mBU3(W%wb)3`e%fk1!`?oM!bcY?dSJ0U=T;1*nh1b250?$E*AgUg)U|J_^j
z-c-%h9181d=yUq)?^|oHwRV(}f+RXB2`UT>47#+Gm@^C0uLdY$V!UA
zy#D*hZ7)uQfr;6d786nPTt3!E(!v{jR4`c}^7-<>_MXKW2Uj%)!PMldL``TJd9S>V4P;CqlLea|T&O@3@qq5pLhSVey={4fQ=`OlGuK>eRX
z3@r7Z!#bOQ%9(tx>69a(t@vaSrd8(IJZb;~1qJ0@hhWN+AJYOg#Aq+Yy8xlStmTbd
z?pGfiS{-45=+Mvt1L`*)r%!&4F3l*^%mib;2w7X@-3!C>7jqI{!sN|X=q?nHvWUL0
zDF1=$e4yvO)Dcja;s2t_q}pb5B(znW*sZG
za=)h?=s7t&M&|m24Rv&Kk54Cn(%!
z75sH%deY|@cs)*6TilLH0~Dg-zLt~{?(M
z)xK!m$WJ9<)BEQ5&yh>%i1YJVfFF}#OBXzMLGd^@6z-KIb5%Q{e(o16T!-y?o7dTT
zjy2`#sP-Z7hETCnYMPv8E*(oCgN~Pf#5()k{fX%He;Mx6%2CH7+g@nbZCu=UOk+cX
zKy(|DGGS4OP09Aw^n9-OfBdrH+d`?FFILPUfav57K^weVkc1w8QjQ3Eo>RFRzL&}E
zylryoY_wY;SRRZaMh||gqvb>|#!$0mDoaj-_E7U#|F
z2J_ztL-cTTEOp2giRJaavU!ojF%8-iW#aqu4=D-LzUIZSFDJ|OrbTP*=_KIqy9 O
zBG~VKhwz5hPS~y6Dw&Qc{UxQ%(zw4A`cp3LvCOux(w~$1mdq(cx!Q)s6gAklQl~l7
zTn;Bef~j#D=l+RdYYC)06DE$?$`MqG#qgNUs~hM>;bVj&drwk;5PRdkfqtLdPIfn^
zX)(!t^&zEyD!|Ae8mJvMY^>oE@WQinx%lmHqnrSuS!E!=aeLgu$kfg3dbYkcIha^M
zqeen{Y;8$JinhbS$`6kOGF3-+(al2h*gr>UwR!sy4u`djr+%b@VoIce4512=n3HjP0pSJb`dEj2pMr=+A-(Ov?A^#1qam
zed7KPR!rg2WR2{v7lS_>d-o)K)$EQ5a(M>7KDL_<#`-HW|#fs`mQGo}lj
zZBW7%4Zk*9+)f({RJE(}G*_Bc2^zPnNsYOtD-D5;GK
zCsgm&PGemN`QI*LbBqE7MfzP4LiKYV(#abxRR<##aTs$RWz#yC@|^1UbnPIND5yQG
z1}BFsxcZcm&77UgIbUK~OFPnBLa({&4Mql$|LYo7(*@Xt&!mrugRdF85+=I=q{*
zUu%h8fncqwb{95@37Y{GJYVc_mQ>JWZO+ZZ(x6_hk-$dv##D>p_UKl~t-fM<8sv(i
ziW>qfLp<7#yDd#5TWY{5&n-dX8)i8Y(1!&i^@?{^A%P)lWB=C9&)|dfL|kOVl0UTw
zy6T+IFDt1qLQIT;AH6b7Zx(Gr34UsI;2o7ko;Ti#h^q%&O8dBvs)hSN4E;E|pz#w>
z5jY;Uca?f&D{9PXtl-l%5+5{
zixfGVuaZT_k?k*{`TBahkllJ44;sW)Q9cU-)2*D!((ZONuixTI;D-YX+L~>!(@ncY
zL;LHN!C_-=nLDU0?6lM3`G?VjrFez;Z?E6fT1B#DP}DC$G*UG*qVvLvdK!z;sp;ri
zi-vM|GTYuSWp{hk7Zy2xO>g7P?DIM8mY*KVGoiH~Gl@%BO&5A9eJ}d=fPD$72#!ss
zJY8wTq~D%V^a)CnEfS-m#Me#MOy~L
zC+t+Va8|)wsp-_uPj|ltBp%c-<+b+_5CQB_(#s?_df76mWW(#8Tl%K$+1uT8KOKD7
zxm58$TW(`z{?n(B4P>YbcgS!r!RU)+{A92gYm_{<6YL)c>fT8dI0i=J-G3}tH1`7XD;
zpqsv|FDDh*p5~n|F(e{){dW$IZ*gUX>9wt5zbYi;6O>1iSmJM%?y!eL@oH?HjGzlN
zkdiGojCzN^j}mTH-8>GuhK7{Q>iX8L?S8(#{;!WSO{qhOLnxb@oAcH@Pc{3LkPI7*
z><_1ZFa_SSlEK-N=0s&Q&baOC<%3h;R1&vDFn;R$`RlriaVEnkRQpTa9xYVqR;1^|
z$5TDna5~f^CMK$lLCGPWqbL;Xo#;N8<*#%M&lmY)nS^|E?!0%fAR#;>
zME3V6@OcPg;BGR4%uOvr12-c*MfK
zAtLGGSyIy~wca1$^J_oNB($CevN)F8Hm~1rF8RbmwlFA?6|z6=rTO0!E0~D7>c|Ag
zWEveD@9*z#eR;uoM&SFFOw5dK_)Fc9>qLZx_V?5W3o*@t?}p6nC>+v8Z$3=%DET~9sd
z$&raNd>yD`H(n-`auDwR{@$%t*KxIfzyAy2r7qF{JCS*y_$z@x3P(#yTV4$r+?n>{
zsuNmU!1Qb7RCAI528wJ<)%Xl1-+p?*79QK@*eMSe`MRtvVes3YPXM2~b09)?4Axdw
zR^FW%{BwI3TpHX7plE+p-B_TN=g+^CtF37-Pz9hcP(^c9{jV+4@=c|+v|iTuyy7T!
z1Osj^`>#^Gyzcyd6yFvl(5bh>GnD9^F
z(B{#j%VMv7?MYOMb~;A7kGM|^HAl2!U~71X^){4F|M8p9xqQjE=xDGAE*KRR+Tc;w
zibHn&@b@Ua0D;ruZx;#%pXKiEu1?G0!ni%GE4j;*-^m~ESaoJM~q*uoDBs7lJ_=Mtj%xSd!ajDG>M7tJ39Ku%)<7Q-pQEv
zhr?Qmr0=JG#^jnKw$^x;p|eJ~?N^#tl-3)*D$Tr5Tu{J#T31(BQ&STz0dDItkT)QG
z9A>WK6f(4ayrOw>f9&s{y;qRZGw{04xQ!(!DJ$b#obVFZOwZ|5RCYS}p{o3)&M&FX
zkMQV$!ZoIgo{622Xe@WYvRir2PedeW;HI_Ooy$=ztUg(H4q;6A+GBOYub^gQ-onjz
zrLWMv@1Sd@SaCd)Jx9QYQ{PRgoehrhcJ5l=q9p6ZIJ&I1$$Ff8qF{1!k#~;tSX!)4#Fty(xCdJ#(CaLT?qrY7k_>b@-sCOJB_X5Z5
zI}<0dv9SaB0fBLan_`BBhU2Ger>o8RbLGX2jg5ty1*qoCx0pysNZI)hFKcxXnD!QC
z{6k(z!>b&gH;iXHqitUZWwj9<_kU+RJ^6m>fIO+a^tK)c1M=J@!k7;-g}Nm4K1tM6EBd
z66bNV1qAHPlvQ+Eq!ui4RDbGst-VWeAm>e~c^IlMe1KKV*<0pYL|K!M|3;9?eca?s
zGxXblJWFxVU*KUwIVFV2u!G%$;6-4wP3YY_)2EI0`?KT_+~Wqlo-`8wZXwqbF46pN
zreQaoYDoH+ojP%KFz(M)=!1`=N2l|A{yh_iNg-J)+9|RWdmjy3piUR7K*y(hOYx#H
zr8L9h-Yz~%e--IQpuB96APnXgn7ArNxVgE}DBq2Uh@eb+YhhypR!~w>GP1E$>PFiR
z&l~7Mp%CzPJxqF++Fj{x7ZMTzvG}SeC8hP^rMq62yNMy{KHp2rv&4LZ_^@+ua6dm^
z)eWbR%)-lGP5wSO!!$41;
zm)+`qQdU~3o#iXDBE5Kh&z`8Z0+)(eTU*PrvECUV#KV)eM)r|`p_hh#W@ZK(ru$q|
zU>ekU)U3ZVVNG(6>5G3+K
zOngvevGsRyHL~NG|7lk>r8*)Pfxh7vDdcU;M4!2tg}0t_)knzh`Xw?lU-1op8wG`l
z4qS%clc^iev>Kny%ueccV+|oIJ85ZYI-H|WM4zq)4j!Ixi~Ny_!{8*$#fl24AFSl>
zABhu2p^FtR
z85c@uKid%2i;JG)=^Ub`w@CzO4jR=7C@olMPr@LGR;@R}UhVOD6}C?G-<-j$aPsmR
zvEVx4GDZ}65iP}FETa?nkCEP8btnncS}R{O2)KFp2=Ll;?6IK4S>Im@pc)$MVUcL^
zd!#xOQ4f2nYK)b;J3Bhsx@untj%)RBad3Y6II}$@hkwOsS@?K_fJrI1YQ^!|9fHIc
zJK1~YKH6+lbV%TGw$|EsFr@bMxRQR#e?r+eIx5@id{Lp>I2$`I!n*>YS6T7*wKsdX
z`bNU8v0q<$w-#VBw>21NVNdDI!6>%8_l&hZF`*_iEHMK4Y-jg*U0k2WwsBeEKnNOkb)HTxLbHpcp%kCXnLSgcPMTi#9+2LF{LBKN!
z0f9OHldgz)V1p44taTiUDa*5FsXLh7s=HlsiFh(wFubtSg-?k6`UEz(t82QgExJ93
zkO@^G)3enjxA+#Zj*6BR6GKd-FM1EV?{_eOqBj7Q68}O_?F*n)Ofj^aO>CnE6p@{6
zaC^G;t3bEKwG?V)-JNzjDfG(kbMrGk13ggMu|FC=N6lnsO
zXlO)}d>*HA8Js6uaYFWUWpmrZiAw@n-HcHLV!;H8ijxi<_lQB*VQJxFUp}PzAI(=R
znY{f5v=hvy-`+_+JU+77b8l~Nf4(9tds{jZd=7y4^W&|g(z`Q!)y1!x5{|Ycj8m9b%;2ULwpEcCKL2@xOuL*>#UFXi@hx1!q-~4n3?0v
z;C|+}wUNnJ6&8juV_;xp#s$sNjK!tl6B0tEZiTQt7v9m23b1bFj5cCqD)9Oq6xDC&
z59
zIoR_*(Go4?eG=;H>nq7%ig#~>@iobwSa5Q3Dvt?}w9XhS3Z`K`aNuFB5Z#*UJa7H0
z6?RiWa0x}Kb@xXy@5=G5mRMVTLLNn7WMpJMHr#1Xiq;K83PcwMIQJg{-&$i)(c{2Q
zj39XA;rOKpKzF27Yg$SOZOx9`%AN3!(Gk8wP1kk
zY)l1TUu#BSv>PkpueNC?rDWv1(o<4H_opteuX~P@aEDS92`^eG&D5T23oq{<|P>2MtgC+0+@;GLCOHzm4>eu
zgOOS6RyZx_JpemQyLfsys$%go5)a$yFOttlN=g#5@SO;boB@3aMnSj&5uIG_jP_T&
z*Unk0wRop(DOt(R!QnJ~m#^2{+eLALlVy}?&;;*O1vyaJ-MQT4D%a20db19zl(~N2((#@bB0VcCR)Z8kev2_Ipw3r3{g1
ziuQn4NqQYB_2kf^Twnp@d-q#4dtT(#*Z(b-32gWB^i(@=u&_umMER|%tOGqdvazy4
z`D*D+2=ohFjSqzgAB(e?Wvd=(?}m1Cv9`WgEn6hMUbO8v?sVvSwuI~Bz108W`}htb
zB@=^zik4b+51v0Am6sS89Ax4o^4=QwY9{e6F(E;NI96ItPOr@HPp9x>Ye`AT(j$_D
zgv7-1BfjmugN?UTy>70zr@xq3-%mgM4|J!L@bC}^Vnv?Ctm;~m2}(~renNka)K|FZF?-l~>A@m23x5%*Zn#uG(2afY)MUsaGeJOX
zymzB;AVHhwx1$q%6B8ACGj;zP+oj%!uoB{-FN6rh#9@Mhg;YpOOZOi>+~Y^!mgga)
zdJ%KgEVp^NWY@vy#XE(N+#JkE)(s*dAfQtU>dlV7|9eoJ^Y)|9UWdoodJJjlFOO56
zAYT)4I^Hc80@pCamL{27HZ5!vo%c)
zrVBHTx%r}+K+!-Y%@fMyD6&a|Kcg_vOi4}<;cM9&4;W#vsChL|aqF6_J2
zF#L8reztyv)!@Z?o0RbI@RAI9R8@$@+jBy{&kT%=w%7YpT9T~c_q~`x+iAv5I=}Uc
z3cH0>=U?8Y}DYtkt
z_wq
z&tfXE)rc5J7MysUb-J*td!&i*>frJ0cHj9!x
zi+O;x7(PmoK)VXnUh?W-VBybX!1gZlV1o`{r?E?9*f=Zo8LW;
zMaiGkCK8K_%q6tG
z)Gj`|qo<(}UtSAFk1(gI7n*xiFS^nePv?8=-FtOH7-!nr+*Fgp|Mcn8{K1gsl?&YM
zX#v3WdaCM*aed)FF<6So&2JDWmyf@@&6H8%y3ME&ea*}`nyaSTErEr9qN4eL0ZPto
zI|_aB2Jeaw@#C|!BH-Wn_h6=c)So_{|J%tHzSe@8CCvWtN0yO0R{lx(p%%d*j*Nt~
zb7EpdUV+|78fII4Js^%kcx-H>w)V2UExJ&^c_^U8Vj2j2*1XsBcY99mz
z;icdZ1S|bHK8_)VSH;=2dAX6uB)DmYVJEinr2ZYOQK5s94e2r)&xSyMzRfoKW0mns
zY}eN-HW%P9j7_?M2ZaMxK}Hnt#&&k?M%|z(=FBWHJ=lILLTkV7f2|Lil%k@AVkmNO
zbJiPtNJ$|rHYbJu`Gu71+SUSmtp`K?c?Plj+5nl&^Ve~S6Bmv3o?4(x6jg!m~
z-V`7`(EYnJKv#6Hw*${lI)gAVF?-L1EYgAi1%Z>3)7&`(4i2tx8G_|4jEzICo|^gO
z?gJAFl~#B&iURdIRMVD_czN{^{fKc
z`2FLPl~rlHu2O$8M0XJm74SPXl$DJMKj$@m1-PH@%ahkwcFB6P%id)p*Jj;V1{daY
zXF&nH-E6&$=7b4q(G{=#q06L?q}_ksq)vnvi`ZN2D_D10tn-Uj%@X^PRy=mH48cvBdY?#C>4Ag>li
zMsCfBRL~ZIKc~|w03XEw)LI%
zB$dDyaDK|u)wAAA4q@66x!c#T7#0(>m)^s5At6hd(%9J8k^zqd&+6qCm~y31`2Bp~
zS48qDN2vg2ILBbsVg+K~8BD=s7g|6gsiIgE*97kw^fI^$q%=^V+g)ClTVq%u;YOH^EyViW%h!%rVa4d%)a%OXTo81xGwL6{>
za5c`gKbhH&Cx_(cbG-P=j-JJSmD?R>Fd8>eVNj+7!1a!tz|Q|E9n~Lzu5pFe)wtpR
zCG0<4(R+h7PnMBb2|{n29}jiHbeO44LeJN#yU$o!Mvgac|BtdEpwi7{JXRjrmZ>h+
zl_NRkF56fj4(s1>y_@>6hB;AG%!V5M+Im6ha_d(9N=$ZF{N4i$gT<$*sTmJ9%a+P@
zEZ`G}SP|37P7jMTnn!(?Zzq|_f3^(rg9?~7ifvteXg%H{BsN(4Ox)hfX}*im$ih(3
zk!^Nek#{i(9+#}B%c?%voXuL_n3(qv*doBkPEIGAOeBEMb`djV&(Ix48$j!D{L@p}
zVAqW~7C^qew>S7i;=h8qrXS#F*aX>of&DKVB6a_$ru`|^7!eV1XIL6xGP~XTF
z>*<~32<|3$W*XJma93b>n1jV@d~av4=Eun1KkeMk#N_Sado^u+dC7V;uaXnrc@c{0
z&~}eA3W7dnA3#1bGc&6)?3@@G>7hAZ0vdoNTKc2sok6C{=#zg-)~!(5w*2N>qYx%%V?`eX4vlD-#PcHrXtrr
z(t&9^SLS!=b}k>fv2YU|7iU<)0&xGmlB?Z1Wm)2FGT=
zSsrw@rA~LOTjw+qf4~@FAC57z%ZRzf-$7TriZa;8+xXY=-&I{~K(I^@p|B-(S5x0J
zg`v0SOokec!90m~E+CW~8I#nYZ^~X;mLdl0qpK-QIRc}8=Twsmjr*57Zz~mP)_!&?
z(x^*=t2gCjWN_SY%YIfCcB`x#!gOUb-X9!mNnASZg0&CS^!E^8K_IV@Y&
zOLjEa+zqqqCk>4Qes7bnJCjcORoOo781mLGSnC2-k&joOSeR_RdY6>7^HYB!!TuL>
zm-jT()LIppv=3l~^V>%KDt+|W=(gzd>8W78(%Z9jU#(%Z8deJa5t)D|I<`Hh`NT}y
zH$k*7-bq>6-|u09uo1AapC>Bt9?Cuh{XSP_s>`(pI#!Yc(z!x}_`cB8DAE_rm6MaB
zBbN_dr9qCtjf%EK)T@zk(QOnYKs$Hu#uwn9HSp~l_;08B?-QmfHpiCJbFY>eO)Hxt
zQobzSD4Z^%tAKCcsAz>BoLI0i%pU*^dsN6_^uEo5;WPVyo!(^a7%K9uAO6v&+qFM~`$b^~thWu3;6l4iI1|(?lm6H{C_r&9oNU_23NMMEscC_
z;a?NO^Qx@F2}&Z7F@xqJPCES|@bo>RA-N2f-deex9-_W#K=2}o`h!8VQtehU;ByJr
zL3wlyH$ODL2i{Fm_@
z-&TA{3Ay`UZgt|Iu8wd9Ew%jNfgR|_2mWvc(YGtq(ts6>;^t8XpHM_~Z4H}1l8azw
zR!B|#RTuED*|S1S)i9~&fX12SQ^>Ql6iMShGyC6p)CGqAd-Z>I`M<;%kY)Slch4e2
zxE*toR28zDOr01<4?SDrh0@3pZod&&+H~$Fx!A`kW_vn~T%^Sd$rJzkBfykpW*y-W
zx*Y1~SsG>I?_#=>F`|NhMMOd%MpOaf#W|nveOxxTwZG%mrPo3{uhCOe^~s9l&{~sK(AV$%fV!nyZ$0+#(i8l>b-w8hI
zw0xdQKvi%YS?d11*mYVku2+aa%4wGb+@6E)0}>m&*4da8Ms``FsR6nfDN$T1X^&t>
zD!H|pV12$h!m%8NZTrJPmBmr(HD+@lTdG_t@5ocfttEz;;
z+Y040DoOhdx&sTv_f$H_0PF<1>jEy-$o!`7*%ysWa-1@Vn24w?c;&>lty~u86T-3W
znU`01XK|g;@#2QN&9MBLH7%j0=KEhKKe)9-4EL*~BmySg!|h{B??q&nolJzQ6h_03uE&9xkm|ZIFnR}O
zMAnHcGaPQ>+a|bY$_Gy~W;N*80j)B2WB*jJpU>^}>9f_>j6`&SZ9Bd?wMT6_5X`-PpGPxv*8+KpAg
z7`W+_bX)1@sD_TuRd2#Y`<2CBjkpW)5FBk+TAD2;DTp|U#2e-bVraqL=bKN&s=t)1
z{0C#nfkndMzj`KUKuG*598e`x{4~|#Rz8u%TcO(;&BoU5{nyp0#)3(=&hqr=q}xhX
zFN=J8TLOshA)5N%og1f;5CsGnP7Wv1vgpiLYO9-7rWP>w1GSz;%m`FAu%uI2(9tln
zxUIfO&umx!sL1-d`GlbE405WY+I+~8rO=L<>vVik){&{nHn5UWi7xDC7*LfgMysPb
zkf0~aGl$cFFj7M$d}oWk9(9slLUPtXda=*XT#M$&$IoToN5wyx?DyadiiQ0X=|@u_
zB6l{a8ZC9RDVpRPzPN)$O`;`IjMhys<+lT*o|HervCtys@=+1Z-2(3z&=<3>x
zGIiWNnIEFRM+24HtBO=$kltJw<9@nJO(*I?m!NTZd||N?9l{S#N~@sHSgyC>G8>^h
zrXe6wIbrGAt^(}Fpi7e}e?f~NmG7B>matLrMzaC0&mQ|YcVVqM7GU4DN>o96$0-5S
zvD{7KP&k@!#OC#ss(bGUeH1jYvxPYbuupUuWb++u?zy1g4g&E(nlseME5l#DDD$xP
z7FovL-Ees3PyKfxf8$7aKsPJTJ;Y;-nQ1}eK!u%gzQzfm^*^~Cm!$2t2UPoyk(^uD
zv&*G(XJ6!SXq678<$Yq
zR(E$>`Kis8UDvg7%eo~&3ok@Ko>okD%p5MhW~%~-lahJ^CqSlJGOjU-ibGF^=xT^I
z`5><@u$F4G?i?=^^ZM3&_225;UIfzIP9cu2C(Gh=ZxNXYHWI)f^QvYG!X|{AmN0QH
zR{lQkyqFkkOCW`c;QwcL^=5CWLmAuHmidRQ)2}ik#B$9^!#btvoJsBKKulVI;Jl@W
z?f_fDfP)*LU&3crx0aXd8ks!xOuwBOWqZo917F^>M0W~f`1*-5wMfeP;KE`k#$BDo
zzId4|4oQHS+gMCl?U`YIE)#n++UqaXhAPC;akDzPZcyB={Wp
z_+)}W_)LLBQzIqkZKsiiUWJ^B_mkj&Ft&x4QDA>fkJ_sQ=Q1_hn5
z!&~?}1ir-$oI(Ujzv~~r*}uwqj+npn-+xi~cvcrceFu}#l!08VPsWoI=iyr2zj|>s
zDa0x$IgS}{yVeLe^8mVZyp_N;%n(X=RZ_m9XhZJ}b
zA3~NXdw$kQ3X)(rBv^$Q;dYmiq~N%Nb9-j{{+S^fIwpE!Zd1O|qt)CHeuO_f9$@B&
z9#If?K2=Vn+r;?y*B|udTaw*VGz*Ym^ryq}?+nR*fgEUiqxaMr`ka}6A71yO)?WoM
zJx_Pp($!wn%@y#qO0PB9{%!m$Cgk@V1P0Dgi1M8$We|@+m%$u%Bm$%A
zr6n!Yhea*jYk+>3TZsTZ`b3r+cGfe32RKtP3(sR)soVr9Va5OM-m}4SDpTIBYTh0o
zEJkQRzfn`_cCsAi(kQG~xlX}DCX|fUHkN4=p0^2m^1R|vc0e(GZ?ii_Tx)(vx_>&m
zSeftRIB>l5C52IhxzsW!NJLsxPe!QYqa=vS
zH0dI(ZM*p8VCMjXG+=eQ4G`GW=OM^QFLOLiXKOe9c#)vqM#ebu`-h|MD3mGwa`8G5
zn`i>&Okv{*vW6VR91dCI8^(ql20Hpmz1s9xvV_?cAMiwWM>i`R76>cs)xgLxx9@4l
zl=6&uf);!JrvZy2RFgn*9Q52yH5i-j#{nI6v(c31yaL2Ime%Kq!2p1wzmr7saH@%5
z;WoDThqp1WPhKtnka{=ev;&D~7v#h%*nY!+=QDkYPZ9o|4rlhaV2kQaSH3NNpX8q3
zXPu@mJ$SkK?U;oyC`5)l_3eDR%%mUmTJ{mKfi?b76Dt&njS3*tKMh(5_=Qa;C**ZG
zEl(CQc})j;Xhx_|FHun+Z*1&TSMusT;@Wy*+(_p~&(}c#8?HcafE7qroe7gYKi>>+
z|9k%2A%im8r9~jU?UWK+ykkKxN65cE8G1iB20xH`feqZTKy=kifm(eUsF6Ggav#o)
zI24wg*SE$df_KYn_Q`Rh!!Mn~1~I?Y;#T3MP8irw>Hq}4zCM`IX|(^^6O794u%3C*
zndWV}GCw|s@>GvhmxIo0|K78%8VCcefS!^YzTUz<
z;PM(1Jx2}@zhy1nKW^5}jVzEquSEHATNtMfiNG?W;<pp*dt1C#$UQ3tDo^rGZ*|(*`5&$iRO8NQ$b|fV{`%$*S5;{2+zIpn
zBXw4?$xLyvZ3OsUZc{2dA7-|f`~{yDHh_M4mh|7w(uoC!h=qr>eMV}Mo8vJ7ndlW@
z<)(wl_Vz|NJLAGOYO^`q8H~kf061Y18Dh)+&LAr|S<;kNUZ=(6ph@Wccsko)_xep}
z*7u6~u&;B!tY(T|IsMx%adud?Xl+$h5oo>#5d^}9B5o)u$Vp4fG-KzOo2cO7-9dF0
z*8pyiix7@T;|2(EHMf_pgl?U2HLk-ZL=85^n*V71bm5nyFoIRNcBk{Mb*9dzU_Xgp
z-6PrUI$JMyaL>L^+d{icI{SY2o4`O9Aya0%e~ioUqWVU5{9WpAOEt)F+^k
zcejES(`@|R|Do9#lPb^)t@~x$ZdXbK_f>70tY2HJwk1bMz`5@nXd4nQD(4hw>@=31
zW~7=6M^kunc)faVns$B##8R*bxC3N;+V?d0Ug(L{WF~nW5CcBvBVhSGCyZvNeb?AN
zF-6{6rC^gpwY~vI%Uh4kFV?pMdy(Br!?Vq@8j1%zwPp%kL7A>6FbmK+B1uXzWOLwt
zvabG%tHD1jC$4y~-^D_aubG=K)7^=GZ?~+?^&qZS^7Qv&T5!_cd%YtO7~gq$1n?jDas0H>3S!e>fBFOQu)D>`lE`Y{ov#c>+$1~&gehRBywkjF
zKmssa=LaCPd{q>)f<=+u!UdYie*=X7R2M*#Ddo`#v=|KNA@j($r;Wwh4f_B(hpB`ImvAU}>$
z$UlHaOY-jw8YZTuii#-ZdZPm+j@TzPA^8c^Z)M5cq^E#m1|$)90apb`6?tmR{`eH}
z`S--qX`YqL95J`7+PDc05V{uwGuyMVu@QSHD8vE*IPyJ3&P1v=8XR_LF)|Voh+7>l
zdl>IItZJ1>>x@Q|A%H?3-a9pgeWTd$P|9j5o6HD%6ufnFvI2xk>lgNIxlC)Qmjo}w
zfL@hC(08bbQotMDMeCWJhe51NXVbO`NQ|+@mXy#Dsg*7(hjZe{gj&4f6-U;J%`v_Q
z#!7_igjH2lXJ-}`QnVF~>Ya8*@CRn*=F~+*V9QzV%1cU^RhccMeuM9fz6BjnfRULo
z2l`BQ&>6lAgJ$4KWAt0aBqe2kXJ=)NGfKuA1Ewg7D(NNST+BKytwI;=hYwcUL*KHq
zvmI$^X+Pxy(Tqvd4~&f4F&{r(1?J_w_2sl*^?ZHt8$jx_0h&`PvY|zwwWOIX=C-u5
zVu@W{)f437o!e%(xVQi!Za5&dS}`Dgg*}GjGHhSB+UgOvCJH@9%Ov8ko&|zdOG(5L
zmadlC4Yo^wtu^&~s0_$_j^V4AL85H9=M4~%kSKYaL(%z-x&p0ciclmB+r8_Xn|a=@
z10Ar;N!}FCioR&N@5qGq+tO%hDl8Iqh*2?`pPye-+8Y6<&A#U|D(`czAfg
z$b}R#3K}WrF%afFHnXz&FcuCZ^xNCpq5L$~<=iuB>gr)(=Bt1Jkzn-4|LH=Kvmh&L
z4T#RB(g8t~i2Rb0$l>PDHITBR(-cqbFCfAbz;HHEZDaDeqp2y?iI0yDI6ZYeKaC~1
zYIonCK5mw-s|p(y)0nD)cW_TlPe(mzG#%pidthf`asZ;q!C-s9&z<67(AG=m
zAN}u3UoTz*nM-}i_$_W>hoai~`!oC}9Bk~M18xBUeO3rm4!1m@8@~-6JZP&>{U#0l
z5V#_TwHADgpBPIS?=!nDHD^`zT!-gV+>lk!Ib}2dYM-?t3=a?UxgXOpFeH-we4yEs
z2H!b?8_cyl