addons:
apt:
+ sources:
+ - ubuntu-toolchain-r-test
packages:
- gdb
- python-dbg
- python3-dbg
+ - gcc-6
+ - g++-6
+ # GCC-7 currently takes 5-7 *minutes* to download on travis
+ #- gcc-7
+ #- g++-7
cache:
pip: true
- USE_CCACHE=1
- CCACHE_SLOPPINESS=pch_defines,time_macros
- CCACHE_COMPRESS=1
- - CCACHE_MAXSIZE=100M
- - PATH="/usr/lib/ccache:$PATH"
+ - CCACHE_MAXSIZE=150M
+ - PATH="/usr/lib/ccache:$HOME/gcc-symlinks:$PATH"
matrix:
- BACKEND=c
- BACKEND=cpp
matrix:
include:
+ #- python: 3.7-dev
+ # env: BACKEND=c PY=3 CC=gcc-7
- os: osx
osx_image: xcode6.4
env: BACKEND=c PY=2
before_install:
- |
+ if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
+ mkdir "$HOME/gcc-symlinks"
+ ln -s /usr/bin/gcc-6 $HOME/gcc-symlinks/gcc
+ ln -s /usr/bin/g++-6 $HOME/gcc-symlinks/g++
+
+ if [ -n "$CC" ]; then "$CC" --version; else gcc --version; fi
+ fi
+
+ - |
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then # Install Miniconda
curl -s -o miniconda.sh https://repo.continuum.io/miniconda/Miniconda$PY-latest-MacOSX-x86_64.sh;
bash miniconda.sh -b -p $HOME/miniconda && rm miniconda.sh;
install:
- python -c 'import sys; print("Python %s" % (sys.version,))'
- - if [ -n "${TRAVIS_PYTHON_VERSION##*-dev}" -a -n "${TRAVIS_PYTHON_VERSION##2.6*}" ]; then pip install -r test-requirements.txt $( [ -z "${TRAVIS_PYTHON_VERSION##pypy*}" ] || echo " -r test-requirements-cpython.txt" ) ; fi
+ - if [ -n "${TRAVIS_PYTHON_VERSION##*-dev}" -a -n "${TRAVIS_PYTHON_VERSION##2.6*}" ]; then pip install -r test-requirements.txt $( [ -z "${TRAVIS_PYTHON_VERSION##pypy*}" ] || echo " -r test-requirements-cpython.txt" ) $( [ -n "${TRAVIS_PYTHON_VERSION##3.3*}" ] || echo " tornado<5.0" ) ; fi
- CFLAGS="-O2 -ggdb -Wall -Wextra $(python -c 'import sys; print("-fno-strict-aliasing" if sys.version_info[0] == 2 else "")')" python setup.py build
before_script: ccache -s || true
Cython Changelog
================
+0.28 (2018-03-13)
+=================
+
+Features added
+--------------
+
+* Cdef classes can now multiply inherit from ordinary Python classes.
+ (The primary base must still be a c class, possibly ``object``, and
+ the other bases must *not* be cdef classes.)
+
+* Type inference is now supported for Pythran compiled NumPy expressions.
+ Patch by Nils Braun. (Github issue #1954)
+
+* The ``const`` modifier can be applied to memoryview declarations to allow
+ read-only buffers as input. (Github issues #1605, #1869)
+
+* C code in the docstring of a ``cdef extern`` block is copied verbatimly
+ into the generated file.
+ Patch by Jeroen Demeyer. (Github issue #1915)
+
+* When compiling with gcc, the module init function is now tuned for small
+ code size instead of whatever compile flags were provided externally.
+ Cython now also disables some code intensive optimisations in that function
+ to further reduce the code size. (Github issue #2102)
+
+* Decorating an async coroutine with ``@cython.iterable_coroutine`` changes its
+ type at compile time to make it iterable. While this is not strictly in line
+ with PEP-492, it improves the interoperability with old-style coroutines that
+ use ``yield from`` instead of ``await``.
+
+* The IPython magic has preliminary support for JupyterLab.
+ (Github issue #1775)
+
+* The new TSS C-API in CPython 3.7 is supported and has been backported.
+ Patch by Naotoshi Seo. (Github issue #1932)
+
+* Cython knows the new ``Py_tss_t`` type defined in PEP-539 and automatically
+ initialises variables declared with that type to ``Py_tss_NEEDS_INIT``,
+ a value which cannot be used outside of static assignments.
+
+* The set methods ``.remove()`` and ``.discard()`` are optimised.
+ Patch by Antoine Pitrou. (Github issue #2042)
+
+* ``dict.pop()`` is optimised.
+ Original patch by Antoine Pitrou. (Github issue #2047)
+
+* Iteration over sets and frozensets is optimised.
+ (Github issue #2048)
+
+* Safe integer loops (< range(2^30)) are automatically optimised into C loops.
+
+* ``alist.extend([a,b,c])`` is optimised into sequential ``list.append()`` calls
+ for short literal sequences.
+
+* Calls to builtin methods that are not specifically optimised into C-API calls
+ now use a cache that avoids repeated lookups of the underlying C function.
+ (Github issue #2054)
+
+* Single argument function calls can avoid the argument tuple creation in some cases.
+
+* Some redundant extension type checks are avoided.
+
+* Formatting C enum values in f-strings is faster, as well as some other special cases.
+
+* String formatting with the '%' operator is optimised into f-strings in simple cases.
+
+* Subscripting (item access) is faster in some cases.
+
+* Some ``bytearray`` operations have been optimised similar to ``bytes``.
+
+* Some PEP-484/526 container type declarations are now considered for
+ loop optimisations.
+
+* Indexing into memoryview slices with ``view[i][j]`` is now optimised into
+ ``view[i, j]``.
+
+* Python compatible ``cython.*`` types can now be mixed with type declarations
+ in Cython syntax.
+
+* Name lookups in the module and in classes are faster.
+
+* Python attribute lookups on extension types without instance dict are faster.
+
+* Some missing signals were added to ``libc/signal.pxd``.
+ Patch by Jeroen Demeyer. (Github issue #1914)
+
+* The warning about repeated extern declarations is now visible by default.
+ (Github issue #1874)
+
+* The exception handling of the function types used by CPython's type slot
+ functions was corrected to match the de-facto standard behaviour, so that
+ code that uses them directly benefits from automatic and correct exception
+ propagation. Patch by Jeroen Demeyer. (Github issue #1980)
+
+* Defining the macro ``CYTHON_NO_PYINIT_EXPORT`` will prevent the module init
+ function from being exported as symbol, e.g. when linking modules statically
+ in an embedding setup. Patch by AraHaan. (Github issue #1944)
+
+Bugs fixed
+----------
+
+* If a module name is explicitly provided for an ``Extension()`` that is compiled
+ via ``cythonize()``, it was previously ignored and replaced by the source file
+ name. It can now be used to override the target module name, e.g. for compiling
+ prefixed accelerator modules from Python files. (Github issue #2038)
+
+* The arguments of the ``num_threads`` parameter of parallel sections
+ were not sufficiently validated and could lead to invalid C code.
+ (Github issue #1957)
+
+* Catching exceptions with a non-trivial exception pattern could call into
+ CPython with a live exception set. This triggered incorrect behaviour
+ and crashes, especially in CPython 3.7.
+
+* The signature of the special ``__richcmp__()`` method was corrected to recognise
+ the type of the first argument as ``self``. It was previously treated as plain
+ object, but CPython actually guarantees that it always has the correct type.
+ Note: this can change the semantics of user code that previously relied on
+ ``self`` being untyped.
+
+* Some Python 3 exceptions were not recognised as builtins when running Cython
+ under Python 2.
+
+* Some async helper functions were not defined in the generated C code when
+ compiling simple async code. (Github issue #2075)
+
+* Line tracing did not include generators and coroutines.
+ (Github issue #1949)
+
+* C++ declarations for ``unordered_map`` were corrected.
+ Patch by Michael Schatzow. (Github issue #1484)
+
+* Iterator declarations in C++ ``deque`` and ``vector`` were corrected.
+ Patch by Alex Huszagh. (Github issue #1870)
+
+* The const modifiers in the C++ ``string`` declarations were corrected, together
+ with the coercion behaviour of string literals into C++ strings.
+ (Github issue #2132)
+
+* Some declaration types in ``libc.limits`` were corrected.
+ Patch by Jeroen Demeyer. (Github issue #2016)
+
+* ``@cython.final`` was not accepted on Python classes with an ``@cython.cclass``
+ decorator. (Github issue #2040)
+
+* Cython no longer creates useless and incorrect ``PyInstanceMethod`` wrappers for
+ methods in Python 3. Patch by Jeroen Demeyer. (Github issue #2105)
+
+* The builtin ``bytearray`` type could not be used as base type of cdef classes.
+ (Github issue #2106)
+
+Other changes
+-------------
+
+
0.27.3 (2017-11-03)
===================
* The new METH_FASTCALL calling convention for PyCFunctions is supported
in CPython 3.6. See https://bugs.python.org/issue27810
-* Initial support for using Cython modules in Pyston. Patch by Daetalus.
+* Initial support for using Cython modules in Pyston.
+ Patch by Boxiang Sun.
* Dynamic Python attributes are allowed on cdef classes if an attribute
``cdef dict __dict__`` is declared in the class. Patch by empyrical.
import cython
from .. import __version__
+import os
+import shutil
+import hashlib
+import subprocess
import collections
-import re, os, sys, time
+import re, sys, time
from glob import iglob
+from io import open as io_open
+from os.path import relpath as _relpath
+from distutils.extension import Extension
+from distutils.util import strtobool
try:
import gzip
except ImportError:
gzip_open = open
gzip_ext = ''
-import shutil
-import subprocess
-import os
-
-try:
- import hashlib
-except ImportError:
- import md5 as hashlib
-
-try:
- from io import open as io_open
-except ImportError:
- from codecs import open as io_open
-
-try:
- from os.path import relpath as _relpath
-except ImportError:
- # Py<2.6
- def _relpath(path, start=os.path.curdir):
- if not path:
- raise ValueError("no path specified")
- start_list = os.path.abspath(start).split(os.path.sep)
- path_list = os.path.abspath(path).split(os.path.sep)
- i = len(os.path.commonprefix([start_list, path_list]))
- rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
- if not rel_list:
- return os.path.curdir
- return os.path.join(*rel_list)
try:
import pythran
except:
PythranAvailable = False
-from distutils.extension import Extension
-from distutils.util import strtobool
-
from .. import Utils
from ..Utils import (cached_function, cached_method, path_exists,
safe_makedirs, copy_file_to_dir_if_newer, is_package_dir)
cython_sources = [s for s in pattern.sources
if os.path.splitext(s)[1] in ('.py', '.pyx')]
if cython_sources:
- filepattern = cython_sources[0]
- if len(cython_sources) > 1:
- print("Warning: Multiple cython sources found for extension '%s': %s\n"
- "See http://cython.readthedocs.io/en/latest/src/userguide/sharing_declarations.html "
- "for sharing declarations among Cython files." % (pattern.name, cython_sources))
+ filepattern = cython_sources[0]
+ if len(cython_sources) > 1:
+ print("Warning: Multiple cython sources found for extension '%s': %s\n"
+ "See http://cython.readthedocs.io/en/latest/src/userguide/sharing_declarations.html "
+ "for sharing declarations among Cython files." % (pattern.name, cython_sources))
else:
# ignore non-cython modules
module_list.append(pattern)
for file in nonempty(sorted(extended_iglob(filepattern)), "'%s' doesn't match any files" % filepattern):
if os.path.abspath(file) in to_exclude:
continue
- pkg = deps.package(file)
module_name = deps.fully_qualified_name(file)
if '*' in name:
if module_name in explicit_modules:
continue
- elif name != module_name:
- print("Warning: Extension name '%s' does not match fully qualified name '%s' of '%s'" % (
- name, module_name, file))
+ elif name:
module_name = name
+ if module_name == 'cython':
+ raise ValueError('cython is a special module, cannot be used as a module name')
+
if module_name not in seen:
try:
kwds = deps.distutils_info(file, aliases, base).values
deps = create_dependency_tree(ctx, quiet=quiet)
build_dir = getattr(options, 'build_dir', None)
- modules_by_cfile = {}
+ def copy_to_build_dir(filepath, root=os.getcwd()):
+ filepath_abs = os.path.abspath(filepath)
+ if os.path.isabs(filepath):
+ filepath = filepath_abs
+ if filepath_abs.startswith(root):
+ # distutil extension depends are relative to cwd
+ mod_dir = join_path(build_dir,
+ os.path.dirname(_relpath(filepath, root)))
+ copy_once_if_newer(filepath_abs, mod_dir)
+
+ modules_by_cfile = collections.defaultdict(list)
to_compile = []
for m in module_list:
if build_dir:
- root = os.getcwd() # distutil extension depends are relative to cwd
- def copy_to_build_dir(filepath, root=root):
- filepath_abs = os.path.abspath(filepath)
- if os.path.isabs(filepath):
- filepath = filepath_abs
- if filepath_abs.startswith(root):
- mod_dir = join_path(build_dir,
- os.path.dirname(_relpath(filepath, root)))
- copy_once_if_newer(filepath_abs, mod_dir)
for dep in m.depends:
copy_to_build_dir(dep)
+ cy_sources = [
+ source for source in m.sources
+ if os.path.splitext(source)[1] in ('.pyx', '.py')]
+ if len(cy_sources) == 1:
+ # normal "special" case: believe the Extension module name to allow user overrides
+ full_module_name = m.name
+ else:
+ # infer FQMN from source files
+ full_module_name = None
+
new_sources = []
for source in m.sources:
base, ext = os.path.splitext(source)
fingerprint = deps.transitive_fingerprint(source, extra)
else:
fingerprint = None
- to_compile.append((priority, source, c_file, fingerprint, quiet,
- options, not exclude_failures, module_metadata.get(m.name)))
+ to_compile.append((
+ priority, source, c_file, fingerprint, quiet,
+ options, not exclude_failures, module_metadata.get(m.name),
+ full_module_name))
new_sources.append(c_file)
- if c_file not in modules_by_cfile:
- modules_by_cfile[c_file] = [m]
- else:
- modules_by_cfile[c_file].append(m)
+ modules_by_cfile[c_file].append(m)
else:
new_sources.append(source)
if build_dir:
# TODO: Share context? Issue: pyx processing leaks into pxd module
@record_results
-def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None, raise_on_failure=True, embedded_metadata=None, progress=""):
- from ..Compiler.Main import compile, default_options
+def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None,
+ raise_on_failure=True, embedded_metadata=None, full_module_name=None,
+ progress=""):
+ from ..Compiler.Main import compile_single, default_options
from ..Compiler.Errors import CompileError, PyrexError
if fingerprint:
if not os.path.exists(options.cache):
- try:
- os.mkdir(options.cache)
- except:
- if not os.path.exists(options.cache):
- raise
+ safe_makedirs(options.cache)
# Cython-generated c files are highly compressible.
# (E.g. a compression ratio of about 10 for Sage).
fingerprint_file = join_path(
any_failures = 0
try:
- result = compile([pyx_file], options)
+ result = compile_single(pyx_file, options, full_module_name=full_module_name)
if result.num_errors > 0:
any_failures = 1
except (EnvironmentError, PyrexError) as e:
self.dedent()
def visit_IfStatNode(self, node):
- # The IfClauseNode is handled directly without a seperate match
+ # The IfClauseNode is handled directly without a separate match
# for clariy.
self.startline(u"if ")
self.visit(node.if_clauses[0].condition)
css.append(HtmlFormatter().get_style_defs('.cython'))
return '\n'.join(css)
- _js = """
- function toggleDiv(id) {
- theDiv = id.nextElementSibling
- if (theDiv.style.display != 'block') theDiv.style.display = 'block';
- else theDiv.style.display = 'none';
- }
- """.strip()
-
_css_template = textwrap.dedent("""
body.cython { font-family: courier; font-size: 12; }
.cython.code .c_call { color: #0000FF; }
""")
+ # on-click toggle function to show/hide C source code
+ _onclick_attr = ' onclick="{0}"'.format((
+ "(function(s){"
+ " s.display = s.display === 'block' ? 'none' : 'block'"
+ "})(this.nextElementSibling.style)"
+ ).replace(' ', '') # poor dev's JS minification
+ )
+
def save_annotation(self, source_filename, target_filename, coverage_xml=None):
with Utils.open_source_file(source_filename) as f:
code = f.read()
<style type="text/css">
{css}
</style>
- <script>
- {js}
- </script>
</head>
<body class="cython">
<p><span style="border-bottom: solid 1px grey;">Generated by Cython {watermark}</span>{more_info}</p>
<span style="background-color: #FFFF00">Yellow lines</span> hint at Python interaction.<br />
Click on a line that starts with a "<code>+</code>" to see the C code that Cython generated for it.
</p>
- ''').format(css=self._css(), js=self._js, watermark=Version.watermark,
+ ''').format(css=self._css(), watermark=Version.watermark,
filename=os.path.basename(source_filename) if source_filename else '',
more_info=coverage_info)
]
calls['py_macro_api'] + calls['pyx_macro_api'])
if c_code:
- onclick = " onclick='toggleDiv(this)'"
+ onclick = self._onclick_attr
expandsymbol = '+'
else:
onclick = ''
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % entry.type.dtype.struct_nesting_depth())
code.putln(code.error_goto_if("%s == -1" % getbuffer, pos))
code.putln("}")
- # An exception raised in arg parsing cannot be catched, so no
+ # An exception raised in arg parsing cannot be caught, so no
# need to care about the buffer then.
put_unpack_buffer_aux_into_scope(entry, code)
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
flags = get_flags(buffer_aux, buffer_type)
- code.putln("{") # Set up necesarry stack for getbuffer
+ code.putln("{") # Set up necessary stack for getbuffer
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth())
getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below
def mangle_dtype_name(dtype):
- # Use prefixes to seperate user defined types from builtins
+ # Use prefixes to separate user defined types from builtins
# (consider "typedef float unsigned_int")
if dtype.is_pyobject:
return "object"
and return the name of the type info struct.
Structs with two floats of the same size are encoded as complex numbers.
- One can seperate between complex numbers declared as struct or with native
+ One can separate between complex numbers declared as struct or with native
encoding by inspecting to see if the fields field of the type is
filled in.
"""
("set", "PySet_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("clear", "T", "r", "PySet_Clear"),
# discard() and remove() have a special treatment for unhashable values
-# BuiltinMethod("discard", "TO", "r", "PySet_Discard"),
+ BuiltinMethod("discard", "TO", "r", "__Pyx_PySet_Discard",
+ utility_code=UtilityCode.load("py_set_discard", "Optimize.c")),
+ BuiltinMethod("remove", "TO", "r", "__Pyx_PySet_Remove",
+ utility_code=UtilityCode.load("py_set_remove", "Optimize.c")),
# update is actually variadic (see Github issue #1645)
# BuiltinMethod("update", "TO", "r", "__Pyx_PySet_Update",
# utility_code=UtilityCode.load_cached("PySet_Update", "Builtins.c")),
utility = builtin_utility_code.get(name)
if name == 'frozenset':
objstruct_cname = 'PySetObject'
+ elif name == 'bytearray':
+ objstruct_cname = 'PyByteArrayObject'
elif name == 'bool':
objstruct_cname = None
elif name == 'Exception':
options.capi_reexport_cincludes = True
elif option == "--fast-fail":
Options.fast_fail = True
+ elif option == "--cimport-from-pyx":
+ Options.cimport_from_pyx = True
elif option in ('-Werror', '--warning-errors'):
Options.warning_errors = True
elif option in ('-Wextra', '--warning-extra'):
}
uncachable_builtins = [
- # builtin names that cannot be cached because they may or may not
- # be available at import time
+ # Global/builtin names that cannot be cached because they may or may not
+ # be available at import time, for various reasons:
+ ## - Py3.7+
+ 'breakpoint', # might deserve an implementation in Cython
+ ## - Py3.4+
+ '__loader__',
+ '__spec__',
+ ## - Py3+
+ 'BlockingIOError',
+ 'BrokenPipeError',
+ 'ChildProcessError',
+ 'ConnectionAbortedError',
+ 'ConnectionError',
+ 'ConnectionRefusedError',
+ 'ConnectionResetError',
+ 'FileExistsError',
+ 'FileNotFoundError',
+ 'InterruptedError',
+ 'IsADirectoryError',
+ 'ModuleNotFoundError',
+ 'NotADirectoryError',
+ 'PermissionError',
+ 'ProcessLookupError',
+ 'RecursionError',
+ 'ResourceWarning',
+ #'StopAsyncIteration', # backported
+ 'TimeoutError',
+ '__build_class__',
+ 'ascii', # might deserve an implementation in Cython
+ #'exec', # implemented in Cython
+ ## - Py2.7+
+ 'memoryview',
+ ## - platform specific
'WindowsError',
- '_', # e.g. gettext
+ ## - others
+ '_', # e.g. used by gettext
]
special_py_methods = set([
is_self_assignment = re.compile(r" *(\w+) = (\1);\s*$").match
+class IncludeCode(object):
+ """
+ An include file and/or verbatim C code to be included in the
+ generated sources.
+ """
+ # attributes:
+ #
+ # pieces {order: unicode}: pieces of C code to be generated.
+ # For the included file, the key "order" is zero.
+ # For verbatim include code, the "order" is the "order"
+ # attribute of the original IncludeCode where this piece
+ # of C code was first added. This is needed to prevent
+ # duplication if the same include code is found through
+ # multiple cimports.
+ # location int: where to put this include in the C sources, one
+ # of the constants INITIAL, EARLY, LATE
+ # order int: sorting order (automatically set by increasing counter)
+
+ # Constants for location. If the same include occurs with different
+ # locations, the earliest one takes precedense.
+ INITIAL = 0
+ EARLY = 1
+ LATE = 2
+
+ counter = 1 # Counter for "order"
+
+ def __init__(self, include=None, verbatim=None, late=True, initial=False):
+ self.order = self.counter
+ type(self).counter += 1
+ self.pieces = {}
+
+ if include:
+ if include[0] == '<' and include[-1] == '>':
+ self.pieces[0] = u'#include {0}'.format(include)
+ late = False # system include is never late
+ else:
+ self.pieces[0] = u'#include "{0}"'.format(include)
+
+ if verbatim:
+ self.pieces[self.order] = verbatim
+
+ if initial:
+ self.location = self.INITIAL
+ elif late:
+ self.location = self.LATE
+ else:
+ self.location = self.EARLY
+
+ def dict_update(self, d, key):
+ """
+ Insert `self` in dict `d` with key `key`. If that key already
+ exists, update the attributes of the existing value with `self`.
+ """
+ if key in d:
+ other = d[key]
+ other.location = min(self.location, other.location)
+ other.pieces.update(self.pieces)
+ else:
+ d[key] = self
+
+ def sortkey(self):
+ return self.order
+
+ def mainpiece(self):
+ """
+ Return the main piece of C code, corresponding to the include
+ file. If there was no include file, return None.
+ """
+ return self.pieces.get(0)
+
+ def write(self, code):
+ # Write values of self.pieces dict, sorted by the keys
+ for k in sorted(self.pieces):
+ code.putln(self.pieces[k])
+
+
def get_utility_dir():
# make this a function and not global variables:
# http://trac.cython.org/cython_trac/ticket/475
hashes/equals by instance
proto C prototypes
- impl implemenation code
+ impl implementation code
init code to call on module initialization
requires utility code dependencies
proto_block the place in the resulting file where the prototype should
def inject_string_constants(self, impl, output):
"""Replace 'PYIDENT("xyz")' by a constant Python identifier cname.
"""
- if 'PYIDENT(' not in impl:
+ if 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl:
return False, impl
replacements = {}
def externalise(matchobj):
- name = matchobj.group(1)
+ key = matchobj.groups()
try:
- cname = replacements[name]
+ cname = replacements[key]
except KeyError:
- cname = replacements[name] = output.get_interned_identifier(
- StringEncoding.EncodedString(name)).cname
+ str_type, name = key
+ cname = replacements[key] = output.get_py_string_const(
+ StringEncoding.EncodedString(name), identifier=str_type == 'IDENT').cname
return cname
- impl = re.sub(r'PYIDENT\("([^"]+)"\)', externalise, impl)
- assert 'PYIDENT(' not in impl
+ impl = re.sub(r'PY(IDENT|UNICODE)\("([^"]+)"\)', externalise, impl)
+ assert 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl
return bool(replacements), impl
def inject_unbound_methods(self, impl, output):
utility_code = set()
def externalise(matchobj):
- type_cname, method_name, args = matchobj.groups()
- args = [arg.strip() for arg in args[1:].split(',')]
- if len(args) == 1:
- call = '__Pyx_CallUnboundCMethod0'
- utility_code.add("CallUnboundCMethod0")
- elif len(args) == 2:
- call = '__Pyx_CallUnboundCMethod1'
- utility_code.add("CallUnboundCMethod1")
- else:
- assert False, "CALL_UNBOUND_METHOD() requires 1 or 2 call arguments"
-
- cname = output.get_cached_unbound_method(type_cname, method_name, len(args))
- return '%s(&%s, %s)' % (call, cname, ', '.join(args))
-
- impl = re.sub(r'CALL_UNBOUND_METHOD\(([a-zA-Z_]+),\s*"([^"]+)"((?:,\s*[^),]+)+)\)', externalise, impl)
+ type_cname, method_name, obj_cname, args = matchobj.groups()
+ args = [arg.strip() for arg in args[1:].split(',')] if args else []
+ assert len(args) < 3, "CALL_UNBOUND_METHOD() does not support %d call arguments" % len(args)
+ return output.cached_unbound_method_call_code(obj_cname, type_cname, method_name, args)
+
+ impl = re.sub(
+ r'CALL_UNBOUND_METHOD\('
+ r'([a-zA-Z_]+),' # type cname
+ r'\s*"([^"]+)",' # method name
+ r'\s*([^),]+)' # object cname
+ r'((?:,\s*[^),]+)*)' # args*
+ r'\)', externalise, impl)
assert 'CALL_UNBOUND_METHOD(' not in impl
for helper in sorted(utility_code):
'global_var',
'string_decls',
'decls',
+ 'late_includes',
'all_the_rest',
'pystring_table',
'cached_builtins',
prefix = Naming.const_prefix
return "%s%s" % (prefix, name_suffix)
- def get_cached_unbound_method(self, type_cname, method_name, args_count):
- key = (type_cname, method_name, args_count)
+ def get_cached_unbound_method(self, type_cname, method_name):
+ key = (type_cname, method_name)
try:
cname = self.cached_cmethods[key]
except KeyError:
'umethod', '%s_%s' % (type_cname, method_name))
return cname
+ def cached_unbound_method_call_code(self, obj_cname, type_cname, method_name, arg_cnames):
+ # admittedly, not the best place to put this method, but it is reused by UtilityCode and ExprNodes ...
+ utility_code_name = "CallUnboundCMethod%d" % len(arg_cnames)
+ self.use_utility_code(UtilityCode.load_cached(utility_code_name, "ObjectHandling.c"))
+ cache_cname = self.get_cached_unbound_method(type_cname, method_name)
+ args = [obj_cname] + arg_cnames
+ return "__Pyx_%s(&%s, %s)" % (
+ utility_code_name,
+ cache_cname,
+ ', '.join(args),
+ )
+
def add_cached_builtin_decl(self, entry):
if entry.is_builtin and entry.is_const:
if self.should_declare(entry.cname, entry):
decl = self.parts['decls']
init = self.parts['init_globals']
cnames = []
- for (type_cname, method_name, _), cname in sorted(self.cached_cmethods.items()):
+ for (type_cname, method_name), cname in sorted(self.cached_cmethods.items()):
cnames.append(cname)
method_name_cname = self.get_interned_identifier(StringEncoding.EncodedString(method_name)).cname
decl.putln('static __Pyx_CachedCFunction %s = {0, &%s, 0, 0, 0};' % (
as well
- labels, temps, exc_vars: One must construct a scope in which these can
exist by calling enter_cfunc_scope/exit_cfunc_scope (these are for
- sanity checking and forward compatabilty). Created insertion points
+ sanity checking and forward compatibility). Created insertion points
looses this scope and cannot access it.
- marker: Not copied to insertion point
- filename_table, filename_list, input_file_contents: All codewriters
self.put_xdecref_memoryviewslice(cname, have_gil=have_gil)
return
- prefix = nanny and '__Pyx' or 'Py'
- X = null_check and 'X' or ''
+ prefix = '__Pyx' if nanny else 'Py'
+ X = 'X' if null_check else ''
if clear:
if clear_before_decref:
self.putln(" #define unlikely(x) __builtin_expect(!!(x), 0)")
self.putln("#endif")
+
class PyrexCodeWriter(object):
# f file output file
# level int indentation level
The result is a tuple (StatListNode, ModuleScope), i.e.
everything that is needed from the pxd after it is processed.
- A purer approach would be to seperately compile the pxd code,
+ A purer approach would be to separately compile the pxd code,
but the result would have to be slightly more sophisticated
than pure strings (functions + wanted interned strings +
wanted utility code + wanted cached objects) so for now this
cname='<error>')
entry.in_cinclude = True
+ def is_cpp(self):
+ # Allow C++ utility code in C++ contexts.
+ return self.context.cpp
+
def lookup_type(self, name):
# This function should go away when types are all first-level objects.
type = parse_basic_type(name)
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" % (src_type,))
- elif not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
- copying=self.is_memview_copy_assignment):
- if src.type.dtype.same_as(dst_type.dtype):
- msg = "Memoryview '%s' not conformable to memoryview '%s'."
- tup = src.type, dst_type
- else:
- msg = "Different base types for memoryviews (%s, %s)"
- tup = src.type.dtype, dst_type.dtype
+ else:
+ if src.type.writable_needed:
+ dst_type.writable_needed = True
+ if not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
+ copying=self.is_memview_copy_assignment):
+ if src.type.dtype.same_as(dst_type.dtype):
+ msg = "Memoryview '%s' not conformable to memoryview '%s'."
+ tup = src.type, dst_type
+ else:
+ msg = "Different base types for memoryviews (%s, %s)"
+ tup = src.type.dtype, dst_type.dtype
- error(self.pos, msg % tup)
+ error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
def may_be_none(self):
return True
+ def coerce_to(self, dst_type, env):
+ if not (dst_type.is_pyobject or dst_type.is_memoryviewslice or dst_type.is_error):
+ # Catch this error early and loudly.
+ error(self.pos, "Cannot assign None to %s" % dst_type)
+ return super(NoneNode, self).coerce_to(dst_type, env)
+
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
- return node
+ return node
elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
node.type = dst_type
return node
else PyrexTypes.c_char_ptr_type)
return CastNode(node, dst_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
- node.type = dst_type
- return node
+ # Exclude the case of passing a C string literal into a non-const C++ string.
+ if not dst_type.is_cpp_class or dst_type.is_const:
+ node.type = dst_type
+ return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
if atype is None:
atype = unspecified_type if as_target and env.directives['infer_types'] != False else py_object_type
self.entry = env.declare_var(name, atype, self.pos, is_cdef=not as_target)
+ self.entry.annotation = annotation
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
def check_const(self):
entry = self.entry
- if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
+ if entry is not None and not (
+ entry.is_const or
+ entry.is_cfunction or
+ entry.is_builtin or
+ entry.type.is_const):
self.not_const()
return False
return True
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
- setter = 'PyObject_SetItem'
+ code.globalstate.use_utility_code(UtilityCode.load_cached("SetNameInClass", "ObjectHandling.c"))
+ setter = '__Pyx_SetNameInClass'
else:
assert False, repr(entry)
code.put_error_if_neg(
code.putln('%s = %s;' % (self.result(), result))
else:
result = rhs.result_as(self.ctype())
- code.putln('%s = %s;' % (self.result(), result))
+
+ if is_pythran_expr(self.type):
+ code.putln('new (&%s) decltype(%s){%s};' % (self.result(), self.result(), result))
+ else:
+ code.putln('%s = %s;' % (self.result(), result))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
# in most cases, indexing will return a safe reference to an object in a container,
# so we consider the result safe if the base object is
return self.base.is_ephemeral() or self.base.type in (
- basestring_type, str_type, bytes_type, unicode_type)
+ basestring_type, str_type, bytes_type, bytearray_type, unicode_type)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
return False
if isinstance(self.index, SliceNode):
# slicing!
- if base_type in (bytes_type, str_type, unicode_type,
+ if base_type in (bytes_type, bytearray_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
if index_func is not None:
return index_func.type.return_type
+ if is_pythran_expr(base_type) and is_pythran_expr(index_type):
+ index_with_type = (self.index, index_type)
+ return PythranExpr(pythran_indexing_type(base_type, [index_with_type]))
+
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
else:
# not using 'uchar' to enable fast and safe error reporting as '-1'
self.type = PyrexTypes.c_int_type
- elif is_slice and base_type in (bytes_type, str_type, unicode_type, list_type, tuple_type):
+ elif is_slice and base_type in (bytes_type, bytearray_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
item_type = None
else:
indices = [self.index]
- base_type = self.base.type
+ base = self.base
+ base_type = base.type
replacement_node = None
if base_type.is_memoryviewslice:
# memoryviewslice indexing or slicing
from . import MemoryView
+ if base.is_memview_slice:
+ # For memory views, "view[i][j]" is the same as "view[i, j]" => use the latter for speed.
+ merged_indices = base.merged_indices(indices)
+ if merged_indices is not None:
+ base = base.base
+ base_type = base.type
+ indices = merged_indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim)
if have_slices:
- replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=self.base)
+ replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=base)
else:
- replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=self.base)
+ replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=base)
elif base_type.is_buffer or base_type.is_pythran_expr:
if base_type.is_pythran_expr or len(indices) == base_type.ndim:
# Buffer indexing
is_buffer_access = True
indices = [index.analyse_types(env) for index in indices]
if base_type.is_pythran_expr:
- do_replacement = all(index.type.is_int or index.is_slice or index.type.is_pythran_expr for index in indices)
+ do_replacement = all(
+ index.type.is_int or index.is_slice or index.type.is_pythran_expr
+ for index in indices)
if do_replacement:
for i,index in enumerate(indices):
if index.is_slice:
else:
do_replacement = all(index.type.is_int for index in indices)
if do_replacement:
- replacement_node = BufferIndexNode(self.pos, indices=indices, base=self.base)
+ replacement_node = BufferIndexNode(self.pos, indices=indices, base=base)
# On cloning, indices is cloned. Otherwise, unpack index into indices.
assert not isinstance(self.index, CloneNode)
if not self.is_temp:
# all handled in self.calculate_result_code()
return
+
+ utility_code = None
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
- code.globalstate.use_utility_code(
- TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
+ utility_code = TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c")
else:
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("DictGetItem", "ObjectHandling.c"))
+ utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
+ elif self.base.type is py_object_type and self.index.type in (str_type, unicode_type):
+ # obj[str] is probably doing a dict lookup
+ function = "__Pyx_PyObject_Dict_GetItem"
+ utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
else:
- function = "PyObject_GetItem"
+ function = "__Pyx_PyObject_GetItem"
+ code.globalstate.use_utility_code(
+ TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
+ utility_code = UtilityCode.load_cached("ObjectGetItem", "ObjectHandling.c")
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
function = "__Pyx_GetItemInt_Unicode"
error_value = '(Py_UCS4)-1'
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c"))
+ utility_code = UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c")
elif self.base.type is bytearray_type:
assert self.index.type.is_int
assert self.type.is_int
function = "__Pyx_GetItemInt_ByteArray"
error_value = '-1'
- code.globalstate.use_utility_code(
- UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c"))
+ utility_code = UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c")
elif not (self.base.type.is_cpp_class and self.exception_check):
assert False, "unexpected type %s and base type %s for indexing" % (
self.type, self.base.type)
+ if utility_code is not None:
+ code.globalstate.use_utility_code(utility_code)
+
if self.index.type.is_int:
index_code = self.index.result()
else:
def analyse_buffer_index(self, env, getting):
if is_pythran_expr(self.base.type):
- self.type = PythranExpr(pythran_indexing_type(self.base.type, self.indices))
+ index_with_type_list = [(idx, idx.type) for idx in self.indices]
+ self.type = PythranExpr(pythran_indexing_type(self.base.type, index_with_type_list))
else:
self.base = self.base.coerce_to_simple(env)
self.type = self.base.type.dtype
def nogil_check(self, env):
if self.is_buffer_access or self.is_memview_index:
- if env.directives['boundscheck']:
- warning(self.pos, "Use boundscheck(False) for faster access",
- level=1)
-
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
self.type = error_type
"""
ndarray[1, 2, 3] and memslice[1, 2, 3]
"""
+ if self.in_nogil_context:
+ if self.is_buffer_access or self.is_memview_index:
+ if code.globalstate.directives['boundscheck']:
+ warning(self.pos, "Use boundscheck(False) for faster access", level=1)
+
# Assign indices to temps of at least (s)size_t to allow further index calculations.
index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices]
if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type):
obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False)
# We have got to do this because we have to declare pythran objects
- # at the beggining of the functions.
+ # at the beginning of the functions.
# Indeed, Cython uses "goto" statement for error management, and
# RAII doesn't work with that kind of construction.
# Moreover, the way Pythran expressions are made is that they don't
indices = self.indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim)
+ if not getting:
+ self.writable_needed = True
+ if self.base.is_name or self.base.is_attribute:
+ self.base.entry.type.writable_needed = True
+
self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim)
axes = []
else:
return MemoryCopySlice(self.pos, self)
+ def merged_indices(self, indices):
+ """Return a new list of indices/slices with 'indices' merged into the current ones
+ according to slicing rules.
+ Is used to implement "view[i][j]" => "view[i, j]".
+ Return None if the indices cannot (easily) be merged at compile time.
+ """
+ if not indices:
+ return None
+ # NOTE: Need to evaluate "self.original_indices" here as they might differ from "self.indices".
+ new_indices = self.original_indices[:]
+ indices = indices[:]
+ for i, s in enumerate(self.original_indices):
+ if s.is_slice:
+ if s.start.is_none and s.stop.is_none and s.step.is_none:
+ # Full slice found, replace by index.
+ new_indices[i] = indices[0]
+ indices.pop(0)
+ if not indices:
+ return new_indices
+ else:
+ # Found something non-trivial, e.g. a partial slice.
+ return None
+ elif not s.type.is_int:
+ # Not a slice, not an integer index => could be anything...
+ return None
+ if indices:
+ if len(new_indices) + len(indices) > self.base.type.ndim:
+ return None
+ new_indices += indices
+ return new_indices
+
def is_simple(self):
if self.is_ellipsis_noop:
# TODO: fix SimpleCallNode.is_simple()
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
- elif base_type in (bytes_type, str_type, unicode_type,
+ elif base_type in (bytes_type, bytearray_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
return False
return ExprNode.may_be_none(self)
+ def set_py_result_type(self, function, func_type=None):
+ if func_type is None:
+ func_type = function.type
+ if func_type is Builtin.type_type and (
+ function.is_name and
+ function.entry and
+ function.entry.is_builtin and
+ function.entry.name in Builtin.types_that_construct_their_instance):
+ # calling a builtin type that returns a specific object type
+ if function.entry.name == 'float':
+ # the following will come true later on in a transform
+ self.type = PyrexTypes.c_double_type
+ self.result_ctype = PyrexTypes.c_double_type
+ else:
+ self.type = Builtin.builtin_types[function.entry.name]
+ self.result_ctype = py_object_type
+ self.may_return_none = False
+ elif function.is_name and function.type_entry:
+ # We are calling an extension type constructor. As long as we do not
+ # support __new__(), the result type is clear
+ self.type = function.type_entry.type
+ self.result_ctype = py_object_type
+ self.may_return_none = False
+ else:
+ self.type = py_object_type
+
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
+ elif attr == 'typeof':
+ if len(self.args) != 1:
+ error(self.args.pos, "only one type allowed.")
+ operand = self.args[0].analyse_types(env)
+ return operand.type
def explicit_args_kwds(self):
return self.args, None
has_pythran_args &= is_pythran_supported_node_or_none(arg)
self.is_numpy_call_with_exprs = bool(has_pythran_args)
if self.is_numpy_call_with_exprs:
- self.args = None
env.add_include_file("pythonic/numpy/%s.hpp" % self.function.attribute)
- self.type = PythranExpr(pythran_func_type(self.function.attribute, self.arg_tuple.args))
- self.may_return_none = True
- self.is_temp = 1
+ return NumPyMethodCallNode.from_node(
+ self,
+ function=self.function,
+ arg_tuple=self.arg_tuple,
+ type=PythranExpr(pythran_func_type(self.function.attribute, self.arg_tuple.args)),
+ )
elif func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env)
self.args = None
- if func_type is Builtin.type_type and function.is_name and \
- function.entry and \
- function.entry.is_builtin and \
- function.entry.name in Builtin.types_that_construct_their_instance:
- # calling a builtin type that returns a specific object type
- if function.entry.name == 'float':
- # the following will come true later on in a transform
- self.type = PyrexTypes.c_double_type
- self.result_ctype = PyrexTypes.c_double_type
- else:
- self.type = Builtin.builtin_types[function.entry.name]
- self.result_ctype = py_object_type
- self.may_return_none = False
- elif function.is_name and function.type_entry:
- # We are calling an extension type constructor. As
- # long as we do not support __new__(), the result type
- # is clear
- self.type = function.type_entry.type
- self.result_ctype = py_object_type
- self.may_return_none = False
- else:
- self.type = py_object_type
+ self.set_py_result_type(function, func_type)
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
for i in range(min(max_nargs, actual_nargs)):
formal_arg = func_type.args[i]
formal_type = formal_arg.type
- if formal_type.is_const:
- formal_type = formal_type.const_base_type
arg = args[i].coerce_to(formal_type, env)
if formal_arg.not_none:
# C methods must do the None checks at *call* time
return False # skip allocation of unused result temp
return True
+ def generate_evaluation_code(self, code):
+ function = self.function
+ if function.is_name or function.is_attribute:
+ code.globalstate.use_entry_utility_code(function.entry)
+
+ if not function.type.is_pyobject or len(self.arg_tuple.args) > 1 or (
+ self.arg_tuple.args and self.arg_tuple.is_literal):
+ super(SimpleCallNode, self).generate_evaluation_code(code)
+ return
+
+ # Special case 0-args and try to avoid explicit tuple creation for Python calls with 1 arg.
+ arg = self.arg_tuple.args[0] if self.arg_tuple.args else None
+ subexprs = (self.self, self.coerced_self, function, arg)
+ for subexpr in subexprs:
+ if subexpr is not None:
+ subexpr.generate_evaluation_code(code)
+
+ code.mark_pos(self.pos)
+ assert self.is_temp
+ self.allocate_temp_result(code)
+
+ if arg is None:
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCallNoArg", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
+ self.result(),
+ function.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+ else:
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCallOneArg", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
+ self.result(),
+ function.py_result(),
+ arg.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)))
+
+ code.put_gotref(self.py_result())
+
+ for subexpr in subexprs:
+ if subexpr is not None:
+ subexpr.generate_disposal_code(code)
+ subexpr.free_temps(code)
+
def generate_result_code(self, code):
func_type = self.function_type()
- if self.function.is_name or self.function.is_attribute:
- code.globalstate.use_entry_utility_code(self.function.entry)
if func_type.is_pyobject:
- if func_type is not type_type and not self.arg_tuple.args and self.arg_tuple.is_literal:
- code.globalstate.use_utility_code(UtilityCode.load_cached(
- "PyObjectCallNoArg", "ObjectHandling.c"))
- code.putln(
- "%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
- self.result(),
- self.function.py_result(),
- code.error_goto_if_null(self.result(), self.pos)))
- else:
- arg_code = self.arg_tuple.py_result()
- code.globalstate.use_utility_code(UtilityCode.load_cached(
- "PyObjectCall", "ObjectHandling.c"))
- code.putln(
- "%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
- self.result(),
- self.function.py_result(),
- arg_code,
- code.error_goto_if_null(self.result(), self.pos)))
+ arg_code = self.arg_tuple.py_result()
+ code.globalstate.use_utility_code(UtilityCode.load_cached(
+ "PyObjectCall", "ObjectHandling.c"))
+ code.putln(
+ "%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
+ self.result(),
+ self.function.py_result(),
+ arg_code,
+ code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
- @classmethod
- def from_node(cls, node, **kwargs):
- ret = super(SimpleCallNode, cls).from_node(node, **kwargs)
- ret.is_numpy_call_with_exprs = node.is_numpy_call_with_exprs
- return ret
+
+class NumPyMethodCallNode(SimpleCallNode):
+ # Pythran call to a NumPy function or method.
+ #
+ # function ExprNode the function/method to call
+ # arg_tuple TupleNode the arguments as an args tuple
+
+ subexprs = ['function', 'arg_tuple']
+ is_temp = True
+ may_return_none = True
+
+ def generate_evaluation_code(self, code):
+ code.mark_pos(self.pos)
+ self.allocate_temp_result(code)
+
+ self.function.generate_evaluation_code(code)
+ assert self.arg_tuple.mult_factor is None
+ args = self.arg_tuple.args
+ for arg in args:
+ arg.generate_evaluation_code(code)
+
+ code.putln("// function evaluation code for numpy function")
+ code.putln("__Pyx_call_destructor(%s);" % self.result())
+ code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::%s{}(%s)};" % (
+ self.result(),
+ self.result(),
+ self.function.attribute,
+ ", ".join(a.pythran_result() for a in args)))
class PyMethodCallNode(SimpleCallNode):
for arg in args:
arg.generate_evaluation_code(code)
- if self.is_numpy_call_with_exprs:
- code.putln("// function evaluation code for numpy function")
- code.putln("__Pyx_call_destructor(%s);" % self.result())
- code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::%s{}(%s)};" % (
- self.result(),
- self.result(),
- self.function.attribute,
- ", ".join(a.pythran_result() for a in self.arg_tuple.args)))
- return
-
# make sure function is in temp so that we can replace the reference below if it's a method
reuse_function_temp = self.function.is_temp
if reuse_function_temp:
SimpleCallNode.__init__(self, pos, **kwargs)
+class CachedBuiltinMethodCallNode(CallNode):
+ # Python call to a method of a known Python builtin (only created in transforms)
+
+ subexprs = ['obj', 'args']
+ is_temp = True
+
+ def __init__(self, call_node, obj, method_name, args):
+ super(CachedBuiltinMethodCallNode, self).__init__(
+ call_node.pos,
+ obj=obj, method_name=method_name, args=args,
+ may_return_none=call_node.may_return_none,
+ type=call_node.type)
+
+ def may_be_none(self):
+ if self.may_return_none is not None:
+ return self.may_return_none
+ return ExprNode.may_be_none(self)
+
+ def generate_result_code(self, code):
+ type_cname = self.obj.type.cname
+ obj_cname = self.obj.py_result()
+ args = [arg.py_result() for arg in self.args]
+ call_code = code.globalstate.cached_unbound_method_call_code(
+ obj_cname, type_cname, self.method_name, args)
+ code.putln("%s = %s; %s" % (
+ self.result(), call_code,
+ code.error_goto_if_null(self.result(), self.pos)
+ ))
+ code.put_gotref(self.result())
+
+
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
- function = self.function
- if function.is_name and function.type_entry:
- # We are calling an extension type constructor. As long
- # as we do not support __new__(), the result type is clear
- self.type = function.type_entry.type
- self.result_ctype = py_object_type
- self.may_return_none = False
- else:
- self.type = py_object_type
+ self.set_py_result_type(self.function)
self.is_temp = 1
return self
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
- code.putln("#if !CYTHON_COMPILING_IN_PYPY")
- code.putln("Py_ssize_t size = Py_SIZE(sequence);")
- code.putln("#else")
- code.putln("Py_ssize_t size = PySequence_Size(sequence);") # < 0 => exception
- code.putln("#endif")
+ code.putln("Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
+ # < 0 => exception
code.putln(code.error_goto(self.pos))
code.putln("}")
if self.mult_factor or not self.args:
return tuple_type
arg_types = [arg.infer_type(env) for arg in self.args]
- if any(type.is_pyobject or type.is_unspecified or type.is_fused for type in arg_types):
+ if any(type.is_pyobject or type.is_memoryviewslice or type.is_unspecified or type.is_fused
+ for type in arg_types):
return tuple_type
- else:
- return env.declare_tuple_type(self.pos, arg_types).type
+ return env.declare_tuple_type(self.pos, arg_types).type
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
arg.starred_expr_allowed_here = True
self.args[i] = arg.analyse_types(env)
if (not self.mult_factor and
- not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_fused) for arg in self.args)):
+ not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_memoryviewslice or arg.type.is_fused)
+ for arg in self.args)):
self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type
self.is_temp = 1
return self
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
- if self.is_partly_literal:
- # underlying tuple is const, but factor is not
+
+ if self.is_literal or self.is_partly_literal:
tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
const_code = code.get_cached_constants_writer()
const_code.mark_pos(self.pos)
- self.generate_sequence_packing_code(const_code, tuple_target, plain=True)
+ self.generate_sequence_packing_code(const_code, tuple_target, plain=not self.is_literal)
const_code.put_giveref(tuple_target)
- code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
- self.result(), tuple_target, self.mult_factor.py_result(),
- code.error_goto_if_null(self.result(), self.pos)
+ if self.is_literal:
+ self.result_code = tuple_target
+ else:
+ code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
+ self.result(), tuple_target, self.mult_factor.py_result(),
+ code.error_goto_if_null(self.result(), self.pos)
))
- code.put_gotref(self.py_result())
- elif self.is_literal:
- # non-empty cached tuple => result is global constant,
- # creation code goes into separate code writer
- self.result_code = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
- code = code.get_cached_constants_writer()
- code.mark_pos(self.pos)
- self.generate_sequence_packing_code(code)
- code.put_giveref(self.py_result())
+ code.put_gotref(self.py_result())
else:
self.type.entry.used = True
self.generate_sequence_packing_code(code)
code.put_incref(self.result(), py_object_type)
-class BoundMethodNode(ExprNode):
- # Helper class used in the implementation of Python
- # class definitions. Constructs an bound method
- # object from a class and a function.
- #
- # function ExprNode Function object
- # self_object ExprNode self object
-
- subexprs = ['function']
-
- def analyse_types(self, env):
- self.function = self.function.analyse_types(env)
- self.type = py_object_type
- self.is_temp = 1
- return self
-
- gil_message = "Constructing a bound method"
-
- def generate_result_code(self, code):
- code.putln(
- "%s = __Pyx_PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % (
- self.result(),
- self.function.py_result(),
- self.self_object.py_result(),
- self.self_object.py_result(),
- code.error_goto_if_null(self.result(), self.pos)))
- code.put_gotref(self.py_result())
-
-class UnboundMethodNode(ExprNode):
- # Helper class used in the implementation of Python
- # class definitions. Constructs an unbound method
- # object from a class and a function.
- #
- # function ExprNode Function object
-
- type = py_object_type
- is_temp = 1
-
- subexprs = ['function']
-
- def analyse_types(self, env):
- self.function = self.function.analyse_types(env)
- return self
-
- def may_be_none(self):
- return False
-
- gil_message = "Constructing an unbound method"
-
- def generate_result_code(self, code):
- class_cname = code.pyclass_stack[-1].classobj.result()
- code.putln(
- "%s = __Pyx_PyMethod_New(%s, 0, %s); %s" % (
- self.result(),
- self.function.py_result(),
- class_cname,
- code.error_goto_if_null(self.result(), self.pos)))
- code.put_gotref(self.py_result())
-
-
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# functions. Constructs a PyCFunction object
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
- allow_pyobject=False)
+ allow_pyobject=False, allow_memoryview=True)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
def allocate_temp_result(self, code):
if self.temp_code:
- raise RuntimeError("temp allocated mulitple times")
+ raise RuntimeError("temp allocated multiple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
self.literal = literal.coerce_to_pyobject(env)
return self
+ def analyse_as_type(self, env):
+ self.operand = self.operand.analyse_types(env)
+ return self.operand.type
+
def may_be_none(self):
return False
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
# so we can safely infer the Py2 type for bytes here
- string_types = (bytes_type, str_type, basestring_type, unicode_type)
+ string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
def infer_builtin_types_operation(self, type1, type2):
# let's assume that whatever builtin type you multiply a string with
# will either return a string of the same type or fail with an exception
- string_types = (bytes_type, str_type, basestring_type, unicode_type)
+ string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2.is_builtin_type:
return type1
if type2 in string_types and type1.is_builtin_type:
operator=self.operator,
operand1=operand1, operand2=operand2)
- def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label, fall_through):
+ def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
outer_labels = (and_label, or_label)
else:
my_label = or_label = code.new_label('next_or')
self.operand1.generate_bool_evaluation_code(
- code, final_result_temp, and_label, or_label, end_label, my_label)
+ code, final_result_temp, final_result_type, and_label, or_label, end_label, my_label)
and_label, or_label = outer_labels
code.put_label(my_label)
self.operand2.generate_bool_evaluation_code(
- code, final_result_temp, and_label, or_label, end_label, fall_through)
+ code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through)
def generate_evaluation_code(self, code):
self.allocate_temp_result(code)
+ result_type = PyrexTypes.py_object_type if self.type.is_pyobject else self.type
or_label = and_label = None
end_label = code.new_label('bool_binop_done')
- self.generate_bool_evaluation_code(code, self.result(), and_label, or_label, end_label, end_label)
+ self.generate_bool_evaluation_code(code, self.result(), result_type, and_label, or_label, end_label, end_label)
code.put_label(end_label)
gil_message = "Truth-testing Python object"
test_result = self.arg.result()
return (test_result, self.arg.type.is_pyobject)
- def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label, fall_through):
+ def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
# x => x
code.putln("} else {")
self.value.generate_evaluation_code(code)
self.value.make_owned_reference(code)
- code.putln("%s = %s;" % (final_result_temp, self.value.result()))
+ code.putln("%s = %s;" % (final_result_temp, self.value.result_as(final_result_type)))
self.value.generate_post_assignment_code(code)
# disposal: {not (and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF"
return True
+ elif self.operand2.type is Builtin.set_type:
+ self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
+ self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySetContains", "ObjectHandling.c")
+ self.special_bool_cmp_function = "__Pyx_PySet_ContainsTF"
+ return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
is_memslice_nonecheck = False
def infer_type(self, env):
- # TODO: Actually implement this (after merging with -unstable).
+ type1 = self.operand1.infer_type(env)
+ type2 = self.operand2.infer_type(env)
+
+ if is_pythran_expr(type1) or is_pythran_expr(type2):
+ if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
+ return PythranExpr(pythran_binop_type(self.operator, type1, type2))
+
+ # TODO: implement this for other types.
return py_object_type
def type_dependencies(self, env):
def generate_result_code(self, code):
self.type.create_from_py_utility_code(self.env)
- code.putln("%s = %s(%s);" % (self.result(),
- self.type.from_py_function,
- self.arg.py_result()))
-
- error_cond = self.type.error_condition(self.result())
- code.putln(code.error_goto_if(error_cond, self.pos))
+ code.putln(self.type.from_py_call_code(
+ self.arg.py_result(),
+ self.result(),
+ self.pos,
+ code
+ ))
class CastNode(CoercionNode):
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
+ def reanalyse(self):
+ if self.type != self.arg.type or not self.arg.is_temp:
+ return self
+ if not self.type.typeobj_is_available():
+ return self
+ if self.arg.may_be_none() and self.notnone:
+ return self.arg.as_none_safe_node("Cannot convert NoneType to %.200s" % self.type.name)
+ return self.arg
+
def calculate_constant_result(self):
# FIXME
pass
Builtin.set_type: 'PySet_GET_SIZE',
Builtin.frozenset_type: 'PySet_GET_SIZE',
Builtin.bytes_type: 'PyBytes_GET_SIZE',
+ Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
Builtin.unicode_type: '__Pyx_PyUnicode_IS_TRUE',
}
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
- code.putln("%s = (%s != Py_None) && (%s(%s) != 0);" % (
- self.result(),
- self.arg.py_result(),
- test_func,
- self.arg.py_result()))
+ checks = ["(%s != Py_None)" % self.arg.py_result()] if self.arg.may_be_none() else []
+ checks.append("(%s(%s) != 0)" % (test_func, self.arg.py_result()))
+ code.putln("%s = %s;" % (self.result(), '&&'.join(checks)))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
"""
Create a new local scope for the copied node and append it to
self.nodes. A new local scope is needed because the arguments with the
- fused types are aready in the local scope, and we need the specialized
+ fused types are already in the local scope, and we need the specialized
entries created after analyse_declarations on each specialized version
of the (CFunc)DefNode.
f2s is a dict mapping each fused type to its specialized version
def _fused_instance_checks(self, normal_types, pyx_code, env):
"""
- Genereate Cython code for instance checks, matching an object to
+ Generate Cython code for instance checks, matching an object to
specialized types.
"""
for specialized_type in normal_types:
coerce_from_py_func=memslice_type.from_py_function,
dtype=dtype)
decl_code.putln(
- "{{memviewslice_cname}} {{coerce_from_py_func}}(object)")
+ "{{memviewslice_cname}} {{coerce_from_py_func}}(object, int)")
pyx_code.context.update(
specialized_type_name=specialized_type.specialization_string,
u"""
# try {{dtype}}
if itemsize == -1 or itemsize == {{sizeof_dtype}}:
- memslice = {{coerce_from_py_func}}(arg)
+ memslice = {{coerce_from_py_func}}(arg, 0)
if memslice.memview:
__PYX_XDEC_MEMVIEW(&memslice, 1)
# print 'found a match for the buffer through format parsing'
# The first thing to find a match in this loop breaks out of the loop
pyx_code.put_chunk(
u"""
+ """ + (u"arg_is_pythran_compatible = False" if pythran_types else u"") + u"""
if ndarray is not None:
if isinstance(arg, ndarray):
dtype = arg.dtype
- arg_is_pythran_compatible = True
+ """ + (u"arg_is_pythran_compatible = True" if pythran_types else u"") + u"""
elif __pyx_memoryview_check(arg):
arg_base = arg.base
if isinstance(arg_base, ndarray):
if dtype is not None:
itemsize = dtype.itemsize
kind = ord(dtype.kind)
- # We only support the endianess of the current compiler
+ dtype_signed = kind == 'i'
+ """)
+ pyx_code.indent(2)
+ if pythran_types:
+ pyx_code.put_chunk(
+ u"""
+ # Pythran only supports the endianness of the current compiler
byteorder = dtype.byteorder
if byteorder == "<" and not __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
- if byteorder == ">" and __Pyx_Is_Little_Endian():
+ elif byteorder == ">" and __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
- dtype_signed = kind == 'i'
if arg_is_pythran_compatible:
cur_stride = itemsize
- for dim,stride in zip(reversed(arg.shape),reversed(arg.strides)):
- if stride != cur_stride:
+ shape = arg.shape
+ strides = arg.strides
+ for i in range(arg.ndim-1, -1, -1):
+ if (<Py_ssize_t>strides[i]) != cur_stride:
arg_is_pythran_compatible = False
break
- cur_stride *= dim
+ cur_stride *= <Py_ssize_t> shape[i]
else:
- arg_is_pythran_compatible = not (arg.flags.f_contiguous and arg.ndim > 1)
- """)
- pyx_code.indent(2)
+ arg_is_pythran_compatible = not (arg.flags.f_contiguous and (<Py_ssize_t>arg.ndim) > 1)
+ """)
pyx_code.named_insertion_point("numpy_dtype_checks")
self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types)
pyx_code.dedent(2)
self._buffer_parse_format_string_check(
pyx_code, decl_code, specialized_type, env)
- def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types):
+ def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types):
"""
If we have any buffer specializations, write out some variable
declarations and imports.
cdef Py_ssize_t itemsize
cdef bint dtype_signed
cdef char kind
- cdef bint arg_is_pythran_compatible
itemsize = -1
- arg_is_pythran_compatible = False
+ """)
+
+ if pythran_types:
+ pyx_code.local_variable_declarations.put_chunk(u"""
+ cdef bint arg_is_pythran_compatible
+ cdef Py_ssize_t cur_stride
""")
pyx_code.imports.put_chunk(
pyx_code.local_variable_declarations.put_chunk(
u"""
cdef bint {{dtype_name}}_is_signed
- {{dtype_name}}_is_signed = <{{dtype_type}}> -1 < 0
+ {{dtype_name}}_is_signed = not (<{{dtype_type}}> -1 > 0)
""")
def _split_fused_types(self, arg):
default_idx += 1
if all_buffer_types:
- self._buffer_declarations(pyx_code, decl_code, all_buffer_types)
+ self._buffer_declarations(pyx_code, decl_code, all_buffer_types, pythran_types)
env.use_utility_code(Code.UtilityCode.load_cached("Import", "ImportExport.c"))
env.use_utility_code(Code.UtilityCode.load_cached("ImportNumPyArray", "ImportExport.c"))
except ImportError:
basestring = str
-from . import Errors
# Do not import Parsing here, import it when needed, because Parsing imports
# Nodes, which globally needs debug command line options initialized to set a
# conditional metaclass. These options are processed by CmdLine called from
# main() in this file.
# import Parsing
+from . import Errors
from .StringEncoding import EncodedString
from .Scanning import PyrexScanner, FileSourceDescriptor
from .Errors import PyrexError, CompileError, error, warning
verbose = 0
+
class CompilationData(object):
# Bundles the information that is passed from transform to transform.
# (For now, this is only)
# result CompilationResult
pass
+
class Context(object):
# This class encapsulates the context needed for compiling
# one or more Cython implementation files along with their
pxd = self.search_include_directories(qualified_name, ".pxd", pos, sys_path=sys_path)
if pxd is None: # XXX Keep this until Includes/Deprecated is removed
if (qualified_name.startswith('python') or
- qualified_name in ('stdlib', 'stdio', 'stl')):
+ qualified_name in ('stdlib', 'stdio', 'stl')):
standard_include_path = os.path.abspath(os.path.normpath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
deprecated_include_path = os.path.join(standard_include_path, 'Deprecated')
from ..Parser import ConcreteSyntaxTree
except ImportError:
raise RuntimeError(
- "Formal grammer can only be used with compiled Cython with an available pgen.")
+ "Formal grammar can only be used with compiled Cython with an available pgen.")
ConcreteSyntaxTree.p_module(source_filename)
except UnicodeDecodeError as e:
#import traceback
pass
result.c_file = None
+
def get_output_filename(source_filename, cwd, options):
if options.cplus:
c_suffix = ".cpp"
else:
return suggested_file_name
+
def create_default_resultobj(compilation_source, options):
result = CompilationResult()
result.main_source_file = compilation_source.source_desc.filename
result.embedded_metadata = options.embedded_metadata
return result
+
def run_pipeline(source, options, full_module_name=None, context=None):
from . import Pipeline
return result
-#------------------------------------------------------------------------
+# ------------------------------------------------------------------------
#
# Main Python entry points
#
-#------------------------------------------------------------------------
+# ------------------------------------------------------------------------
class CompilationSource(object):
"""
- Contains the data necesarry to start up a compilation pipeline for
+ Contains the data necessary to start up a compilation pipeline for
a single compilation unit.
"""
def __init__(self, source_desc, full_module_name, cwd):
self.full_module_name = full_module_name
self.cwd = cwd
+
class CompilationOptions(object):
"""
Options to the Cython compiler:
processed.add(source)
return results
+
def compile(source, options = None, full_module_name = None, **kwds):
"""
compile(source [, options], [, <option> = <value>]...)
Compile one or more Pyrex implementation files, with optional timestamp
- checking and recursing on dependecies. The source argument may be a string
- or a sequence of strings If it is a string and no recursion or timestamp
+ checking and recursing on dependencies. The source argument may be a string
+ or a sequence of strings. If it is a string and no recursion or timestamp
checking is requested, a CompilationResult is returned, otherwise a
CompilationResultSet is returned.
"""
else:
return compile_multiple(source, options)
-#------------------------------------------------------------------------
+
+# ------------------------------------------------------------------------
#
# Main command-line entry point
#
-#------------------------------------------------------------------------
+# ------------------------------------------------------------------------
+
def setuptools_main():
return main(command_line = 1)
+
def main(command_line = 0):
args = sys.argv[1:]
any_failures = 0
sys.exit(1)
-
-#------------------------------------------------------------------------
+# ------------------------------------------------------------------------
#
# Set the default options depending on the platform
#
-#------------------------------------------------------------------------
+# ------------------------------------------------------------------------
default_options = dict(
show_version = 0,
format_flag = "PyBUF_FORMAT"
-memview_c_contiguous = "(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
-memview_f_contiguous = "(PyBUF_F_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
-memview_any_contiguous = "(PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE)"
-memview_full_access = "PyBUF_FULL"
-#memview_strided_access = "PyBUF_STRIDED"
-memview_strided_access = "PyBUF_RECORDS"
+memview_c_contiguous = "(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT)"
+memview_f_contiguous = "(PyBUF_F_CONTIGUOUS | PyBUF_FORMAT)"
+memview_any_contiguous = "(PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT)"
+memview_full_access = "PyBUF_FULL_RO"
+#memview_strided_access = "PyBUF_STRIDED_RO"
+memview_strided_access = "PyBUF_RECORDS_RO"
MEMVIEW_DIRECT = '__Pyx_MEMVIEW_DIRECT'
MEMVIEW_PTR = '__Pyx_MEMVIEW_PTR'
import cython
cython.declare(Naming=object, Options=object, PyrexTypes=object, TypeSlots=object,
error=object, warning=object, py_object_type=object, UtilityCode=object,
- EncodedString=object)
+ EncodedString=object, re=object)
import json
import os
+import re
import operator
from .PyrexTypes import CPtrType
from . import Future
from .Errors import error, warning
from .PyrexTypes import py_object_type
from ..Utils import open_new_file, replace_suffix, decode_filename
-from .Code import UtilityCode
+from .Code import UtilityCode, IncludeCode
from .StringEncoding import EncodedString
from .Pythran import has_np_pythran
self.scope.utility_code_list.extend(scope.utility_code_list)
+ for inc in scope.c_includes.values():
+ self.scope.process_include(inc)
+
def extend_if_not_in(L1, L2):
for x in L2:
if x not in L1:
L1.append(x)
- extend_if_not_in(self.scope.include_files, scope.include_files)
extend_if_not_in(self.scope.included_files, scope.included_files)
- extend_if_not_in(self.scope.python_include_files,
- scope.python_include_files)
if merge_scope:
# Ensure that we don't generate import code for these entries!
code.putln("")
code.putln("/* Implementation of '%s' */" % env.qualified_name)
+ code = globalstate['late_includes']
+ code.putln("/* Late includes */")
+ self.generate_includes(env, modules, code, early=False)
+
code = globalstate['all_the_rest']
self.generate_cached_builtins_decls(env, code)
code.putln("")
code.putln("#define PY_SSIZE_T_CLEAN")
- for filename in env.python_include_files:
- code.putln('#include "%s"' % filename)
+ for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey):
+ if inc.location == inc.INITIAL:
+ inc.write(code)
code.putln("#ifndef Py_PYTHON_H")
code.putln(" #error Python headers needed to compile C extensions, "
"please install development version of Python.")
self._put_setup_code(code, "CppInitCode")
else:
self._put_setup_code(code, "CInitCode")
+ self._put_setup_code(code, "PythonCompatibility")
self._put_setup_code(code, "MathInitCode")
if options.c_line_in_traceback:
code.putln("#define %s" % Naming.h_guard_prefix + self.api_name(env))
code.putln("#define %s" % Naming.api_guard_prefix + self.api_name(env))
- self.generate_includes(env, cimported_modules, code)
+ code.putln("/* Early includes */")
+ self.generate_includes(env, cimported_modules, code, late=False)
code.putln("")
code.putln("#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)")
code.putln("#define CYTHON_WITHOUT_ASSERTIONS")
code.putln(" #define DL_IMPORT(_T) _T")
code.putln("#endif")
- def generate_includes(self, env, cimported_modules, code):
+ def generate_includes(self, env, cimported_modules, code, early=True, late=True):
includes = []
- for filename in env.include_files:
- byte_decoded_filenname = str(filename)
- if byte_decoded_filenname[0] == '<' and byte_decoded_filenname[-1] == '>':
- code.putln('#include %s' % byte_decoded_filenname)
- else:
- code.putln('#include "%s"' % byte_decoded_filenname)
-
- code.putln_openmp("#include <omp.h>")
+ for inc in sorted(env.c_includes.values(), key=IncludeCode.sortkey):
+ if inc.location == inc.EARLY:
+ if early:
+ inc.write(code)
+ elif inc.location == inc.LATE:
+ if late:
+ inc.write(code)
+ if early:
+ code.putln_openmp("#include <omp.h>")
def generate_filename_table(self, code):
from os.path import isabs, basename
# If that raises an AttributeError, call the __getattr__ if defined.
#
# In both cases, defined can be in this class, or any base class.
- def lookup_here_or_base(n, type=None):
+ def lookup_here_or_base(n, tp=None, extern_return=None):
# Recursive lookup
- if type is None:
- type = scope.parent_type
- r = type.scope.lookup_here(n)
- if r is None and \
- type.base_type is not None:
- return lookup_here_or_base(n, type.base_type)
- else:
- return r
+ if tp is None:
+ tp = scope.parent_type
+ r = tp.scope.lookup_here(n)
+ if r is None:
+ if tp.is_external and extern_return is not None:
+ return extern_return
+ if tp.base_type is not None:
+ return lookup_here_or_base(n, tp.base_type)
+ return r
+
+ has_instance_dict = lookup_here_or_base("__dict__", extern_return="extern")
getattr_entry = lookup_here_or_base("__getattr__")
getattribute_entry = lookup_here_or_base("__getattribute__")
code.putln("")
"PyObject *v = %s(o, n);" % (
getattribute_entry.func_cname))
else:
+ if not has_instance_dict and scope.parent_type.is_final_type:
+ # Final with no dict => use faster type attribute lookup.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObject_GenericGetAttrNoDict", "ObjectHandling.c"))
+ generic_getattr_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict"
+ elif not has_instance_dict or has_instance_dict == "extern":
+ # No dict in the known ancestors, but don't know about extern ancestors or subtypes.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("PyObject_GenericGetAttr", "ObjectHandling.c"))
+ generic_getattr_cfunc = "__Pyx_PyObject_GenericGetAttr"
+ else:
+ generic_getattr_cfunc = "PyObject_GenericGetAttr"
code.putln(
- "PyObject *v = PyObject_GenericGetAttr(o, n);")
+ "PyObject *v = %s(o, n);" % generic_getattr_cfunc)
if getattr_entry is not None:
code.putln(
"if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {")
code.putln("return -1;")
code.putln("}")
code.putln("")
- code.putln(UtilityCode.load_cached("ImportStar", "ImportExport.c").impl)
+ code.putln(UtilityCode.load_as_string("ImportStar", "ImportExport.c")[1])
code.exit_cfunc_scope() # done with labels
def generate_module_init_func(self, imported_modules, env, code):
+ subfunction = self.mod_init_subfunction(self.scope, code)
+
code.enter_cfunc_scope(self.scope)
code.putln("")
- header2 = "PyMODINIT_FUNC init%s(void)" % env.module_name
- header3 = "PyMODINIT_FUNC %s(void)" % self.mod_init_func_cname('PyInit', env)
+ code.putln(UtilityCode.load_as_string("PyModInitFuncType", "ModuleSetupCode.c")[0])
+ header2 = "__Pyx_PyMODINIT_FUNC init%s(void)" % env.module_name
+ header3 = "__Pyx_PyMODINIT_FUNC %s(void)" % self.mod_init_func_cname('PyInit', env)
code.putln("#if PY_MAJOR_VERSION < 3")
- code.putln("%s; /*proto*/" % header2)
+ # Optimise for small code size as the module init function is only executed once.
+ code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header2)
code.putln(header2)
code.putln("#else")
- code.putln("%s; /*proto*/" % header3)
+ code.putln("%s CYTHON_SMALL_CODE; /*proto*/" % header3)
code.putln(header3)
# CPython 3.5+ supports multi-phase module initialisation (gives access to __spec__, __file__, etc.)
code.putln("")
# main module init code lives in Py_mod_exec function, not in PyInit function
- code.putln("static int %s(PyObject *%s)" % (
+ code.putln("static int %s(PyObject *%s) CYTHON_SMALL_CODE " % (
self.mod_init_func_cname(Naming.pymodule_exec_func_cname, env),
Naming.pymodinit_module_arg))
code.putln("#endif") # PEP489
Naming.module_cname,
Naming.pymodinit_module_arg,
))
+ code.putln("#elif PY_MAJOR_VERSION >= 3")
+ # Hack: enforce single initialisation also on reimports under different names on Python 3 (with PEP 3121/489).
+ code.putln("if (%s) return __Pyx_NewRef(%s);" % (
+ Naming.module_cname,
+ Naming.module_cname,
+ ))
code.putln("#endif")
if profile or linetrace:
tempdecl_code.put_trace_declarations()
code.put_trace_frame_init()
- code.putln("#if CYTHON_REFNANNY")
- code.putln("__Pyx_RefNanny = __Pyx_RefNannyImportAPI(\"refnanny\");")
- code.putln("if (!__Pyx_RefNanny) {")
- code.putln(" PyErr_Clear();")
- code.putln(" __Pyx_RefNanny = __Pyx_RefNannyImportAPI(\"Cython.Runtime.refnanny\");")
- code.putln(" if (!__Pyx_RefNanny)")
- code.putln(" Py_FatalError(\"failed to import 'refnanny' module\");")
- code.putln("}")
- code.putln("#endif")
+ refnanny_import_code = UtilityCode.load_as_string("ImportRefnannyAPI", "ModuleSetupCode.c")[1]
+ code.putln(refnanny_import_code.rstrip())
code.put_setup_refcount_context(header3)
env.use_utility_code(UtilityCode.load("CheckBinaryVersion", "ModuleSetupCode.c"))
code.putln("/*--- Constants init code ---*/")
code.put_error_if_neg(self.pos, "__Pyx_InitCachedConstants()")
- code.putln("/*--- Global init code ---*/")
- self.generate_global_init_code(env, code)
+ code.putln("/*--- Global type/function init code ---*/")
+
+ with subfunction("Global init code") as inner_code:
+ self.generate_global_init_code(env, inner_code)
- code.putln("/*--- Variable export code ---*/")
- self.generate_c_variable_export_code(env, code)
+ with subfunction("Variable export code") as inner_code:
+ self.generate_c_variable_export_code(env, inner_code)
- code.putln("/*--- Function export code ---*/")
- self.generate_c_function_export_code(env, code)
+ with subfunction("Function export code") as inner_code:
+ self.generate_c_function_export_code(env, inner_code)
- code.putln("/*--- Type init code ---*/")
- self.generate_type_init_code(env, code)
+ with subfunction("Type init code") as inner_code:
+ self.generate_type_init_code(env, inner_code)
- code.putln("/*--- Type import code ---*/")
- for module in imported_modules:
- self.generate_type_import_code_for_module(module, env, code)
+ with subfunction("Type import code") as inner_code:
+ for module in imported_modules:
+ self.generate_type_import_code_for_module(module, env, inner_code)
- code.putln("/*--- Variable import code ---*/")
- for module in imported_modules:
- self.generate_c_variable_import_code_for_module(module, env, code)
+ with subfunction("Variable import code") as inner_code:
+ for module in imported_modules:
+ self.generate_c_variable_import_code_for_module(module, env, inner_code)
- code.putln("/*--- Function import code ---*/")
- for module in imported_modules:
- self.specialize_fused_types(module)
- self.generate_c_function_import_code_for_module(module, env, code)
+ with subfunction("Function import code") as inner_code:
+ for module in imported_modules:
+ self.specialize_fused_types(module)
+ self.generate_c_function_import_code_for_module(module, env, inner_code)
code.putln("/*--- Execution code ---*/")
code.mark_pos(None)
code.exit_cfunc_scope()
+ def mod_init_subfunction(self, scope, orig_code):
+ """
+ Return a context manager that allows deviating the module init code generation
+ into a separate function and instead inserts a call to it.
+
+ Can be reused sequentially to create multiple functions.
+ The functions get inserted at the point where the context manager was created.
+ The call gets inserted where the context manager is used (on entry).
+ """
+ prototypes = orig_code.insertion_point()
+ prototypes.putln("")
+ function_code = orig_code.insertion_point()
+ function_code.putln("")
+
+ class ModInitSubfunction(object):
+ def __init__(self, code_type):
+ cname = '_'.join(code_type.lower().split())
+ assert re.match("^[a-z0-9_]+$", cname)
+ self.cfunc_name = "__Pyx_modinit_%s" % cname
+ self.description = code_type
+ self.tempdecl_code = None
+ self.call_code = None
+
+ def __enter__(self):
+ self.call_code = orig_code.insertion_point()
+ code = function_code
+ code.enter_cfunc_scope(scope)
+ prototypes.putln("static int %s(void); /*proto*/" % self.cfunc_name)
+ code.putln("static int %s(void) {" % self.cfunc_name)
+ code.put_declare_refcount_context()
+ self.tempdecl_code = code.insertion_point()
+ code.put_setup_refcount_context(self.cfunc_name)
+ # Leave a grepable marker that makes it easy to find the generator source.
+ code.putln("/*--- %s ---*/" % self.description)
+ return code
+
+ def __exit__(self, *args):
+ code = function_code
+ code.put_finish_refcount_context()
+ code.putln("return 0;")
+
+ self.tempdecl_code.put_temp_declarations(code.funcstate)
+ self.tempdecl_code = None
+
+ needs_error_handling = code.label_used(code.error_label)
+ if needs_error_handling:
+ code.put_label(code.error_label)
+ for cname, type in code.funcstate.all_managed_temps():
+ code.put_xdecref(cname, type)
+ code.put_finish_refcount_context()
+ code.putln("return -1;")
+ code.putln("}")
+ code.exit_cfunc_scope()
+ code.putln("")
+
+ if needs_error_handling:
+ self.call_code.use_label(orig_code.error_label)
+ self.call_code.putln("if (unlikely(%s() != 0)) goto %s;" % (
+ self.cfunc_name, orig_code.error_label))
+ else:
+ self.call_code.putln("(void)%s();" % self.cfunc_name)
+ self.call_code = None
+
+ return ModInitSubfunction
+
def generate_module_import_setup(self, env, code):
module_path = env.directives['set_initial_path']
if module_path == 'SOURCEFILE':
else:
self.generate_base_type_import_code(env, entry, code)
self.generate_exttype_vtable_init_code(entry, code)
- self.generate_type_ready_code(env, entry, code)
- self.generate_typeptr_assignment_code(entry, code)
+ if entry.type.early_init:
+ self.generate_type_ready_code(entry, code)
def generate_base_type_import_code(self, env, entry, code):
base_type = entry.type.base_type
not type.is_external or type.is_subclassed,
error_code))
- def generate_type_ready_code(self, env, entry, code):
- # Generate a call to PyType_Ready for an extension
- # type defined in this module.
- type = entry.type
- typeobj_cname = type.typeobj_cname
- scope = type.scope
- if scope: # could be None if there was an error
- if entry.visibility != 'extern':
- for slot in TypeSlots.slot_table:
- slot.generate_dynamic_init_code(scope, code)
- code.putln(
- "if (PyType_Ready(&%s) < 0) %s" % (
- typeobj_cname,
- code.error_goto(entry.pos)))
- # Don't inherit tp_print from builtin types, restoring the
- # behavior of using tp_repr or tp_str instead.
- code.putln("%s.tp_print = 0;" % typeobj_cname)
- # Fix special method docstrings. This is a bit of a hack, but
- # unless we let PyType_Ready create the slot wrappers we have
- # a significant performance hit. (See trac #561.)
- for func in entry.type.scope.pyfunc_entries:
- is_buffer = func.name in ('__getbuffer__', '__releasebuffer__')
- if (func.is_special and Options.docstrings and
- func.wrapperbase_cname and not is_buffer):
- slot = TypeSlots.method_name_to_slot[func.name]
- preprocessor_guard = slot.preprocessor_guard_code()
- if preprocessor_guard:
- code.putln(preprocessor_guard)
- code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
- code.putln("{")
- code.putln(
- 'PyObject *wrapper = PyObject_GetAttrString((PyObject *)&%s, "%s"); %s' % (
- typeobj_cname,
- func.name,
- code.error_goto_if_null('wrapper', entry.pos)))
- code.putln(
- "if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {")
- code.putln(
- "%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
- func.wrapperbase_cname))
- code.putln(
- "%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname))
- code.putln(
- "((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % (
- func.wrapperbase_cname))
- code.putln("}")
- code.putln("}")
- code.putln('#endif')
- if preprocessor_guard:
- code.putln('#endif')
- if type.vtable_cname:
- code.putln(
- "if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
- typeobj_cname,
- type.vtabptr_cname,
- code.error_goto(entry.pos)))
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
- if not type.scope.is_internal and not type.scope.directives['internal']:
- # scope.is_internal is set for types defined by
- # Cython (such as closures), the 'internal'
- # directive is set by users
- code.putln(
- 'if (PyObject_SetAttrString(%s, "%s", (PyObject *)&%s) < 0) %s' % (
- Naming.module_cname,
- scope.class_name,
- typeobj_cname,
- code.error_goto(entry.pos)))
- weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
- if weakref_entry:
- if weakref_entry.type is py_object_type:
- tp_weaklistoffset = "%s.tp_weaklistoffset" % typeobj_cname
- if type.typedef_flag:
- objstruct = type.objstruct_cname
- else:
- objstruct = "struct %s" % type.objstruct_cname
- code.putln("if (%s == 0) %s = offsetof(%s, %s);" % (
- tp_weaklistoffset,
- tp_weaklistoffset,
- objstruct,
- weakref_entry.cname))
- else:
- error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
- if scope.lookup_here("__reduce_cython__") if not scope.is_closure_class_scope else None:
- # Unfortunately, we cannot reliably detect whether a
- # superclass defined __reduce__ at compile time, so we must
- # do so at runtime.
- code.globalstate.use_utility_code(
- UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c'))
- code.putln('if (__Pyx_setup_reduce((PyObject*)&%s) < 0) %s' % (
- typeobj_cname,
- code.error_goto(entry.pos)))
+ def generate_type_ready_code(self, entry, code):
+ Nodes.CClassDefNode.generate_type_ready_code(entry, code)
def generate_exttype_vtable_init_code(self, entry, code):
# Generate code to initialise the C method table of an
cast,
meth_entry.func_cname))
- def generate_typeptr_assignment_code(self, entry, code):
- # Generate code to initialise the typeptr of an extension
- # type defined in this module to point to its type object.
- type = entry.type
- if type.typeobj_cname:
- code.putln(
- "%s = &%s;" % (
- type.typeptr_cname, type.typeobj_cname))
-
def generate_cfunction_declaration(entry, env, code, definition):
from_cy_utility = entry.used and entry.utility_code_definition
if entry.used and entry.inline_func_in_pxd or (not entry.in_cinclude and (
return doc
-def analyse_type_annotation(annotation, env):
+def analyse_type_annotation(annotation, env, assigned_value=None):
base_type = None
+ is_ambiguous = False
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
warning(annotation.pos,
warning(annotation.pos, "Duplicate type declarations found in signature annotation")
arg_type = annotation.analyse_as_type(env)
if annotation.is_name and not annotation.cython_attribute and annotation.name in ('int', 'long', 'float'):
+ # Map builtin numeric Python types to C types in safe cases.
+ if assigned_value is not None and arg_type is not None and not arg_type.is_pyobject:
+ assigned_type = assigned_value.infer_type(env)
+ if assigned_type and assigned_type.is_pyobject:
+ # C type seems unsafe, e.g. due to 'None' default value => ignore annotation type
+ is_ambiguous = True
+ arg_type = None
# ignore 'int' and require 'cython.int' to avoid unsafe integer declarations
if arg_type in (PyrexTypes.c_long_type, PyrexTypes.c_int_type, PyrexTypes.c_float_type):
arg_type = PyrexTypes.c_double_type if annotation.name == 'float' else py_object_type
"Python type declaration in signature annotation does not refer to a Python type")
base_type = CAnalysedBaseTypeNode(
annotation.pos, type=arg_type, is_arg=True)
+ elif is_ambiguous:
+ warning(annotation.pos, "Ambiguous types in annotation, ignoring")
else:
warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
return base_type, arg_type
class CDefExternNode(StatNode):
- # include_file string or None
- # body StatNode
+ # include_file string or None
+ # verbatim_include string or None
+ # body StatListNode
child_attrs = ["body"]
def analyse_declarations(self, env):
- if self.include_file:
- env.add_include_file(self.include_file)
old_cinclude_flag = env.in_cinclude
env.in_cinclude = 1
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
+ if self.include_file or self.verbatim_include:
+ # Determine whether include should be late
+ stats = self.body.stats
+ if not env.directives['preliminary_late_includes_cy28']:
+ late = False
+ elif not stats:
+ # Special case: empty 'cdef extern' blocks are early
+ late = False
+ else:
+ late = all(isinstance(node, CVarDefNode) for node in stats)
+ env.add_include_file(self.include_file, self.verbatim_include, late)
+
def analyse_expressions(self, env):
return self
scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos)
for arg in func_type.args[len(func_type.args) - self.optional_arg_count:]:
- scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=1)
+ scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject=True, allow_memoryview=True)
struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name)
annotation = self.annotation
if not annotation:
return None
- base_type, arg_type = analyse_type_annotation(annotation, env)
+ base_type, arg_type = analyse_type_annotation(annotation, env, assigned_value=self.default)
if base_type is not None:
self.base_type = base_type
return arg_type
type = template_node.analyse_as_type(env)
if type is None:
error(template_node.pos, "unknown type in template argument")
- return error_type
+ type = error_type
template_types.append(type)
self.type = base_type.specialize_here(self.pos, template_types)
code_object = self.code_object.calculate_result_code(code) if self.code_object else None
code.put_trace_frame_init(code_object)
+ # ----- Special check for getbuffer
+ if is_getbuffer_slot:
+ self.getbuffer_check(code)
+
# ----- set up refnanny
if use_refnanny:
tempvardecl_code.put_declare_refcount_context()
code.put_var_incref(entry)
# Note: defaults are always incref-ed. For def functions, we
- # we aquire arguments from object converstion, so we have
+ # we acquire arguments from object conversion, so we have
# new references. If we are a cdef function, we need to
# incref our arguments
elif is_cdef and entry.type.is_memoryviewslice and len(entry.cf_assignments) > 1:
error(arg.pos, "Invalid use of 'void'")
elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice):
error(arg.pos, "Argument type '%s' is incomplete" % arg.type)
- return env.declare_arg(arg.name, arg.type, arg.pos)
+ entry = env.declare_arg(arg.name, arg.type, arg.pos)
+ if arg.annotation:
+ entry.annotation = arg.annotation
+ return entry
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
#
# Special code for the __getbuffer__ function
#
- def getbuffer_init(self, code):
- info = self.local_scope.arg_entries[1].cname
- # Python 3.0 betas have a bug in memoryview which makes it call
- # getbuffer with a NULL parameter. For now we work around this;
- # the following block should be removed when this bug is fixed.
- code.putln("if (%s != NULL) {" % info)
- code.putln("%s->obj = Py_None; __Pyx_INCREF(Py_None);" % info)
- code.put_giveref("%s->obj" % info) # Do not refnanny object within structs
+ def _get_py_buffer_info(self):
+ py_buffer = self.local_scope.arg_entries[1]
+ try:
+ # Check builtin definition of struct Py_buffer
+ obj_type = py_buffer.type.base_type.scope.entries['obj'].type
+ except (AttributeError, KeyError):
+ # User code redeclared struct Py_buffer
+ obj_type = None
+ return py_buffer, obj_type
+
+ # Old Python 3 used to support write-locks on buffer-like objects by
+ # calling PyObject_GetBuffer() with a view==NULL parameter. This obscure
+ # feature is obsolete, it was almost never used (only one instance in
+ # `Modules/posixmodule.c` in Python 3.1) and it is now officially removed
+ # (see bpo-14203). We add an extra check here to prevent legacy code from
+ # from trying to use the feature and prevent segmentation faults.
+ def getbuffer_check(self, code):
+ py_buffer, _ = self._get_py_buffer_info()
+ view = py_buffer.cname
+ code.putln("if (%s == NULL) {" % view)
+ code.putln("PyErr_SetString(PyExc_BufferError, "
+ "\"PyObject_GetBuffer: view==NULL argument is obsolete\");")
+ code.putln("return -1;")
code.putln("}")
+ def getbuffer_init(self, code):
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.put_init_to_py_none("%s->obj" % view, obj_type)
+ code.put_giveref("%s->obj" % view) # Do not refnanny object within structs
+ else:
+ code.putln("%s->obj = NULL;" % view)
+
def getbuffer_error_cleanup(self, code):
- info = self.local_scope.arg_entries[1].cname
- code.putln("if (%s != NULL && %s->obj != NULL) {"
- % (info, info))
- code.put_gotref("%s->obj" % info)
- code.putln("__Pyx_DECREF(%s->obj); %s->obj = NULL;"
- % (info, info))
- code.putln("}")
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.putln("if (%s->obj != NULL) {" % view)
+ code.put_gotref("%s->obj" % view)
+ code.put_decref_clear("%s->obj" % view, obj_type)
+ code.putln("}")
+ else:
+ code.putln("Py_CLEAR(%s->obj);" % view)
def getbuffer_normal_cleanup(self, code):
- info = self.local_scope.arg_entries[1].cname
- code.putln("if (%s != NULL && %s->obj == Py_None) {" % (info, info))
- code.put_gotref("Py_None")
- code.putln("__Pyx_DECREF(Py_None); %s->obj = NULL;" % info)
- code.putln("}")
+ py_buffer, obj_type = self._get_py_buffer_info()
+ view = py_buffer.cname
+ if obj_type and obj_type.is_pyobject:
+ code.putln("if (%s->obj == Py_None) {" % view)
+ code.put_gotref("%s->obj" % view)
+ code.put_decref_clear("%s->obj" % view, obj_type)
+ code.putln("}")
def get_preprocessor_guard(self):
if not self.entry.is_special:
name_declarator, type = formal_arg.analyse(scope, nonempty=1)
cfunc_args.append(PyrexTypes.CFuncTypeArg(name=name_declarator.name,
cname=None,
+ annotation=formal_arg.annotation,
type=py_object_type,
pos=formal_arg.pos))
cfunc_type = PyrexTypes.CFuncType(return_type=py_object_type,
arg_code_list.append(arg_decl_code(self.star_arg))
if self.starstar_arg:
arg_code_list.append(arg_decl_code(self.starstar_arg))
- arg_code = ', '.join(arg_code_list)
+ if arg_code_list:
+ arg_code = ', '.join(arg_code_list)
+ else:
+ arg_code = 'void' # No arguments
dc = self.return_type.declaration_code(self.entry.pyfunc_cname)
decls_code = code.globalstate['decls']
entry = arg.entry
code.putln("%s = %s;" % (entry.cname, item))
else:
- func = arg.type.from_py_function
- if func:
+ if arg.type.from_py_function:
if arg.default:
# C-typed default arguments must be handled here
code.putln('if (%s) {' % item)
- rhs = "%s(%s)" % (func, item)
- if arg.type.is_enum:
- rhs = arg.type.cast_code(rhs)
- code.putln("%s = %s; %s" % (
- arg.entry.cname,
- rhs,
- code.error_goto_if(arg.type.error_condition(arg.entry.cname), arg.pos)))
+ code.putln(arg.type.from_py_call_code(
+ item, arg.entry.cname, arg.pos, code))
if arg.default:
code.putln('} else {')
code.putln("%s = %s;" % (
code.putln('switch (pos_args) {')
for i, arg in enumerate(all_args[:last_required_arg+1]):
if max_positional_args > 0 and i <= max_positional_args:
+ if i != 0:
+ code.putln('CYTHON_FALLTHROUGH;')
if self.star_arg and i == max_positional_args:
code.putln('default:')
else:
- if i != 0:
- code.putln('CYTHON_FALLTHROUGH;')
code.putln('case %2d:' % i)
pystring_cname = code.intern_identifier(arg.name)
if arg.default:
continue
code.putln('if (kw_args > 0) {')
# don't overwrite default argument
- code.putln('PyObject* value = PyDict_GetItem(%s, %s);' % (
+ code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, %s);' % (
Naming.kwds_cname, pystring_cname))
code.putln('if (value) { values[%d] = value; kw_args--; }' % i)
code.putln('}')
else:
- code.putln('if (likely((values[%d] = PyDict_GetItem(%s, %s)) != 0)) kw_args--;' % (
+ code.putln('if (likely((values[%d] = __Pyx_PyDict_GetItemStr(%s, %s)) != 0)) kw_args--;' % (
i, Naming.kwds_cname, pystring_cname))
if i < min_positional_args:
if i == 0:
else:
code.putln('if (kw_args == 1) {')
code.putln('const Py_ssize_t index = %d;' % first_optional_arg)
- code.putln('PyObject* value = PyDict_GetItem(%s, *%s[index]);' % (
+ code.putln('PyObject* value = __Pyx_PyDict_GetItemStr(%s, *%s[index]);' % (
Naming.kwds_cname, Naming.pykwdlist_cname))
code.putln('if (value) { values[index] = value; kw_args--; }')
if len(optional_args) > 1:
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
- func = new_type.from_py_function
# copied from CoerceFromPyTypeNode
- if func:
- lhs = arg.entry.cname
- rhs = "%s(%s)" % (func, arg.hdr_cname)
- if new_type.is_enum:
- rhs = PyrexTypes.typecast(new_type, PyrexTypes.c_long_type, rhs)
- code.putln("%s = %s; %s" % (
- lhs,
- rhs,
- code.error_goto_if(new_type.error_condition(arg.entry.cname), arg.pos)))
+ if new_type.from_py_function:
+ code.putln(new_type.from_py_call_code(
+ arg.hdr_cname,
+ arg.entry.cname,
+ arg.pos,
+ code,
+ ))
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % new_type)
is_generator = True
is_coroutine = False
+ is_iterable_coroutine = False
is_asyncgen = False
gen_type_name = 'Generator'
needs_closure = True
code.putln('{')
code.putln('__pyx_CoroutineObject *gen = __Pyx_%s_New('
- '(__pyx_coroutine_body_t) %s, (PyObject *) %s, %s, %s, %s); %s' % (
+ '(__pyx_coroutine_body_t) %s, %s, (PyObject *) %s, %s, %s, %s); %s' % (
self.gen_type_name,
- body_cname, Naming.cur_scope_cname, name, qualname, module_name,
+ body_cname, self.code_object.calculate_result_code(code) if self.code_object else 'NULL',
+ Naming.cur_scope_cname, name, qualname, module_name,
code.error_goto_if_null('gen', self.pos)))
code.put_decref(Naming.cur_scope_cname, py_object_type)
if self.requires_classobj:
is_coroutine = True
+class IterableAsyncDefNode(AsyncDefNode):
+ gen_type_name = 'IterableCoroutine'
+ is_iterable_coroutine = True
+
+
class AsyncGenNode(AsyncDefNode):
gen_type_name = 'AsyncGen'
is_asyncgen = True
linetrace = code.globalstate.directives['linetrace']
if profile or linetrace:
tempvardecl_code.put_trace_declarations()
+ code.funcstate.can_trace = True
+ code_object = self.code_object.calculate_result_code(code) if self.code_object else None
+ code.put_trace_frame_init(code_object)
# ----- Resume switch point.
code.funcstate.init_closure_temps(lenv.scope_class.type.scope)
# FIXME: this silences a potential "unused" warning => try to avoid unused closures in more cases
code.putln("CYTHON_MAYBE_UNUSED_VAR(%s);" % Naming.cur_scope_cname)
+ if profile or linetrace:
+ code.funcstate.can_trace = False
+
code.mark_pos(self.pos)
code.putln("")
code.putln("/* function exit code */")
# on normal generator termination, we do not take the exception propagation
# path: no traceback info is required and not creating it is much faster
if not self.is_inlined and not self.body.is_terminator:
+ if self.is_async_gen_body:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
code.putln('PyErr_SetNone(%s);' % (
'__Pyx_PyExc_StopAsyncIteration' if self.is_async_gen_body else 'PyExc_StopIteration'))
# ----- Error cleanup
class OverrideCheckNode(StatNode):
# A Node for dispatching to the def method if it
- # is overriden.
+ # is overridden.
#
# py_func
#
if self.is_py3_style_class:
error(self.classobj.pos, "Python3 style class could not be represented as C class")
return
- bases = self.classobj.bases.args
- if len(bases) == 0:
- base_class_name = None
- base_class_module = None
- elif len(bases) == 1:
- base = bases[0]
- path = []
- from .ExprNodes import AttributeNode, NameNode
- while isinstance(base, AttributeNode):
- path.insert(0, base.attribute)
- base = base.obj
- if isinstance(base, NameNode):
- path.insert(0, base.name)
- base_class_name = path[-1]
- if len(path) > 1:
- base_class_module = u'.'.join(path[:-1])
- else:
- base_class_module = None
- else:
- error(self.classobj.bases.args.pos, "Invalid base class")
- else:
- error(self.classobj.bases.args.pos, "C class may only have one base class")
- return None
+ from . import ExprNodes
return CClassDefNode(self.pos,
visibility='private',
module_name=None,
class_name=self.name,
- base_class_module=base_class_module,
- base_class_name=base_class_name,
+ bases=self.classobj.bases or ExprNodes.TupleNode(self.pos, args=[]),
decorators=self.decorators,
body=self.body,
in_pxd=False,
# module_name string or None For import of extern type objects
# class_name string Unqualified name of class
# as_name string or None Name to declare as in this scope
- # base_class_module string or None Module containing the base class
- # base_class_name string or None Name of the base class
+ # bases TupleNode Base class(es)
# objstruct_name string or None Specified C name of object struct
# typeobj_name string or None Specified C name of type object
# in_pxd boolean Is in a .pxd file
self.module.has_extern_class = 1
env.add_imported_module(self.module)
- if self.base_class_name:
- if self.base_class_module:
- base_class_scope = env.find_imported_module(self.base_class_module.split('.'), self.pos)
- if not base_class_scope:
- error(self.pos, "'%s' is not a cimported module" % self.base_class_module)
- return
+ if self.bases.args:
+ base = self.bases.args[0]
+ base_type = base.analyse_as_type(env)
+ if base_type in (PyrexTypes.c_int_type, PyrexTypes.c_long_type, PyrexTypes.c_float_type):
+ # Use the Python rather than C variant of these types.
+ base_type = env.lookup(base_type.sign_and_name()).type
+ if base_type is None:
+ error(base.pos, "First base of '%s' is not an extension type" % self.class_name)
+ elif base_type == PyrexTypes.py_object_type:
+ base_class_scope = None
+ elif not base_type.is_extension_type and \
+ not (base_type.is_builtin_type and base_type.objstruct_cname):
+ error(base.pos, "'%s' is not an extension type" % base_type)
+ elif not base_type.is_complete():
+ error(base.pos, "Base class '%s' of type '%s' is incomplete" % (
+ base_type.name, self.class_name))
+ elif base_type.scope and base_type.scope.directives and \
+ base_type.is_final_type:
+ error(base.pos, "Base class '%s' of type '%s' is final" % (
+ base_type, self.class_name))
+ elif base_type.is_builtin_type and \
+ base_type.name in ('tuple', 'str', 'bytes'):
+ error(base.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
+ % base_type.name)
else:
- base_class_scope = env
- if self.base_class_name == 'object':
- # extension classes are special and don't need to inherit from object
- if base_class_scope is None or base_class_scope.lookup('object') is None:
- self.base_class_name = None
- self.base_class_module = None
- base_class_scope = None
- if base_class_scope:
- base_class_entry = base_class_scope.find(self.base_class_name, self.pos)
- if base_class_entry:
- if not base_class_entry.is_type:
- error(self.pos, "'%s' is not a type name" % self.base_class_name)
- elif not base_class_entry.type.is_extension_type and \
- not (base_class_entry.type.is_builtin_type and
- base_class_entry.type.objstruct_cname):
- error(self.pos, "'%s' is not an extension type" % self.base_class_name)
- elif not base_class_entry.type.is_complete():
- error(self.pos, "Base class '%s' of type '%s' is incomplete" % (
- self.base_class_name, self.class_name))
- elif base_class_entry.type.scope and base_class_entry.type.scope.directives and \
- base_class_entry.type.is_final_type:
- error(self.pos, "Base class '%s' of type '%s' is final" % (
- self.base_class_name, self.class_name))
- elif base_class_entry.type.is_builtin_type and \
- base_class_entry.type.name in ('tuple', 'str', 'bytes'):
- error(self.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
- % base_class_entry.type.name)
- else:
- self.base_type = base_class_entry.type
- if env.directives.get('freelist', 0) > 0:
- warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
+ self.base_type = base_type
+ if env.directives.get('freelist', 0) > 0 and base_type != PyrexTypes.py_object_type:
+ warning(self.pos, "freelists cannot be used on subtypes, only the base class can manage them", 1)
has_body = self.body is not None
if has_body and self.base_type and not self.base_type.scope:
else:
scope.implemented = 1
+ if len(self.bases.args) > 1:
+ if not has_body or self.in_pxd:
+ error(self.bases.args[1].pos, "Only declare first base in declaration.")
+ # At runtime, we check that the other bases are heap types
+ # and that a __dict__ is added if required.
+ for other_base in self.bases.args[1:]:
+ if other_base.analyse_as_type(env):
+ error(other_base.pos, "Only one extension type base class allowed.")
+ self.entry.type.early_init = 0
+ from . import ExprNodes
+ self.type_init_args = ExprNodes.TupleNode(
+ self.pos,
+ args=[ExprNodes.IdentifierStringNode(self.pos, value=self.class_name),
+ self.bases,
+ ExprNodes.DictNode(self.pos, key_value_pairs=[])])
+ elif self.base_type:
+ self.entry.type.early_init = self.base_type.is_external or self.base_type.early_init
+ self.type_init_args = None
+ else:
+ self.entry.type.early_init = 1
+ self.type_init_args = None
+
env.allocate_vtable_names(self.entry)
for thunk in self.entry.type.defered_declarations:
if self.body:
scope = self.entry.type.scope
self.body = self.body.analyse_expressions(scope)
+ if self.type_init_args:
+ self.type_init_args.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
code.mark_pos(self.pos)
if self.body:
self.body.generate_execution_code(code)
+ if not self.entry.type.early_init:
+ if self.type_init_args:
+ self.type_init_args.generate_evaluation_code(code)
+ bases = "PyTuple_GET_ITEM(%s, 1)" % self.type_init_args.result()
+ first_base = "((PyTypeObject*)PyTuple_GET_ITEM(%s, 0))" % bases
+ # Let Python do the base types compatibility checking.
+ trial_type = code.funcstate.allocate_temp(PyrexTypes.py_object_type, True)
+ code.putln("%s = PyType_Type.tp_new(&PyType_Type, %s, NULL);" % (
+ trial_type, self.type_init_args.result()))
+ code.putln(code.error_goto_if_null(trial_type, self.pos))
+ code.put_gotref(trial_type)
+ code.putln("if (((PyTypeObject*) %s)->tp_base != %s) {" % (
+ trial_type, first_base))
+ code.putln("PyErr_Format(PyExc_TypeError, \"best base '%s' must be equal to first base '%s'\",")
+ code.putln(" ((PyTypeObject*) %s)->tp_base->tp_name, %s->tp_name);" % (
+ trial_type, first_base))
+ code.putln(code.error_goto(self.pos))
+ code.putln("}")
+ code.funcstate.release_temp(trial_type)
+ code.put_incref(bases, PyrexTypes.py_object_type)
+ code.put_giveref(bases)
+ code.putln("%s.tp_bases = %s;" % (self.entry.type.typeobj_cname, bases))
+ code.put_decref_clear(trial_type, PyrexTypes.py_object_type)
+ self.type_init_args.generate_disposal_code(code)
+ self.type_init_args.free_temps(code)
+
+ self.generate_type_ready_code(self.entry, code, True)
+
+ # Also called from ModuleNode for early init types.
+ @staticmethod
+ def generate_type_ready_code(entry, code, heap_type_bases=False):
+ # Generate a call to PyType_Ready for an extension
+ # type defined in this module.
+ type = entry.type
+ typeobj_cname = type.typeobj_cname
+ scope = type.scope
+ if not scope: # could be None if there was an error
+ return
+ if entry.visibility != 'extern':
+ for slot in TypeSlots.slot_table:
+ slot.generate_dynamic_init_code(scope, code)
+ if heap_type_bases:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('PyType_Ready', 'ExtensionTypes.c'))
+ readyfunc = "__Pyx_PyType_Ready"
+ else:
+ readyfunc = "PyType_Ready"
+ code.putln(
+ "if (%s(&%s) < 0) %s" % (
+ readyfunc,
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ # Don't inherit tp_print from builtin types, restoring the
+ # behavior of using tp_repr or tp_str instead.
+ code.putln("%s.tp_print = 0;" % typeobj_cname)
+
+ # Use specialised attribute lookup for types with generic lookup but no instance dict.
+ getattr_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_getattro')
+ dictoffset_slot_func = TypeSlots.get_slot_code_by_name(scope, 'tp_dictoffset')
+ if getattr_slot_func == '0' and dictoffset_slot_func == '0':
+ if type.is_final_type:
+ py_cfunc = "__Pyx_PyObject_GenericGetAttrNoDict" # grepable
+ utility_func = "PyObject_GenericGetAttrNoDict"
+ else:
+ py_cfunc = "__Pyx_PyObject_GenericGetAttr"
+ utility_func = "PyObject_GenericGetAttr"
+ code.globalstate.use_utility_code(UtilityCode.load_cached(utility_func, "ObjectHandling.c"))
+
+ code.putln("if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) &&"
+ " likely(!%s.tp_dictoffset && %s.tp_getattro == PyObject_GenericGetAttr)) {" % (
+ typeobj_cname, typeobj_cname))
+ code.putln("%s.tp_getattro = %s;" % (
+ typeobj_cname, py_cfunc))
+ code.putln("}")
+
+ # Fix special method docstrings. This is a bit of a hack, but
+ # unless we let PyType_Ready create the slot wrappers we have
+ # a significant performance hit. (See trac #561.)
+ for func in entry.type.scope.pyfunc_entries:
+ is_buffer = func.name in ('__getbuffer__', '__releasebuffer__')
+ if (func.is_special and Options.docstrings and
+ func.wrapperbase_cname and not is_buffer):
+ slot = TypeSlots.method_name_to_slot.get(func.name)
+ preprocessor_guard = slot.preprocessor_guard_code() if slot else None
+ if preprocessor_guard:
+ code.putln(preprocessor_guard)
+ code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
+ code.putln("{")
+ code.putln(
+ 'PyObject *wrapper = PyObject_GetAttrString((PyObject *)&%s, "%s"); %s' % (
+ typeobj_cname,
+ func.name,
+ code.error_goto_if_null('wrapper', entry.pos)))
+ code.putln(
+ "if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) {")
+ code.putln(
+ "%s = *((PyWrapperDescrObject *)wrapper)->d_base;" % (
+ func.wrapperbase_cname))
+ code.putln(
+ "%s.doc = %s;" % (func.wrapperbase_cname, func.doc_cname))
+ code.putln(
+ "((PyWrapperDescrObject *)wrapper)->d_base = &%s;" % (
+ func.wrapperbase_cname))
+ code.putln("}")
+ code.putln("}")
+ code.putln('#endif')
+ if preprocessor_guard:
+ code.putln('#endif')
+ if type.vtable_cname:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('SetVTable', 'ImportExport.c'))
+ code.putln(
+ "if (__Pyx_SetVtable(%s.tp_dict, %s) < 0) %s" % (
+ typeobj_cname,
+ type.vtabptr_cname,
+ code.error_goto(entry.pos)))
+ if heap_type_bases:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('MergeVTables', 'ImportExport.c'))
+ code.putln("if (__Pyx_MergeVtables(&%s) < 0) %s" % (
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ if not type.scope.is_internal and not type.scope.directives['internal']:
+ # scope.is_internal is set for types defined by
+ # Cython (such as closures), the 'internal'
+ # directive is set by users
+ code.putln(
+ 'if (PyObject_SetAttrString(%s, "%s", (PyObject *)&%s) < 0) %s' % (
+ Naming.module_cname,
+ scope.class_name,
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ weakref_entry = scope.lookup_here("__weakref__") if not scope.is_closure_class_scope else None
+ if weakref_entry:
+ if weakref_entry.type is py_object_type:
+ tp_weaklistoffset = "%s.tp_weaklistoffset" % typeobj_cname
+ if type.typedef_flag:
+ objstruct = type.objstruct_cname
+ else:
+ objstruct = "struct %s" % type.objstruct_cname
+ code.putln("if (%s == 0) %s = offsetof(%s, %s);" % (
+ tp_weaklistoffset,
+ tp_weaklistoffset,
+ objstruct,
+ weakref_entry.cname))
+ else:
+ error(weakref_entry.pos, "__weakref__ slot must be of type 'object'")
+ if scope.lookup_here("__reduce_cython__") if not scope.is_closure_class_scope else None:
+ # Unfortunately, we cannot reliably detect whether a
+ # superclass defined __reduce__ at compile time, so we must
+ # do so at runtime.
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached('SetupReduce', 'ExtensionTypes.c'))
+ code.putln('if (__Pyx_setup_reduce((PyObject*)&%s) < 0) %s' % (
+ typeobj_cname,
+ code.error_goto(entry.pos)))
+ # Generate code to initialise the typeptr of an extension
+ # type defined in this module to point to its type object.
+ if type.typeobj_cname:
+ code.putln(
+ "%s = &%s;" % (
+ type.typeptr_cname, type.typeobj_cname))
def annotate(self, code):
+ if self.type_init_args:
+ self.type_init_args.annotate(code)
if self.body:
self.body.annotate(code)
def analyse_expressions(self, env):
self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr = self.expr.analyse_expressions(env)
+ # Repeat in case of node replacement.
+ self.expr.result_is_used = False # hint that .result() may safely be left empty
return self
def nogil_check(self, env):
def generate_execution_code(self, code):
code.mark_pos(self.pos)
+ self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result():
- code.putln("%s;" % self.expr.result())
+ result = self.expr.result()
+ if not self.expr.type.is_void:
+ result = "(void)(%s)" % result
+ code.putln("%s;" % result)
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
if not self.return_type:
# error reported earlier
return
+
+ value = self.value
if self.return_type.is_pyobject:
- code.put_xdecref(Naming.retval_cname,
- self.return_type)
+ code.put_xdecref(Naming.retval_cname, self.return_type)
+ if value and value.is_none:
+ # Use specialised default handling for "return None".
+ value = None
- if self.value:
- self.value.generate_evaluation_code(code)
+ if value:
+ value.generate_evaluation_code(code)
if self.return_type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=Naming.retval_cname,
lhs_type=self.return_type,
- lhs_pos=self.value.pos,
- rhs=self.value,
+ lhs_pos=value.pos,
+ rhs=value,
code=code,
have_gil=self.in_nogil_context)
elif self.in_generator:
UtilityCode.load_cached("ReturnWithStopIteration", "Coroutine.c"))
code.putln("%s = NULL; __Pyx_ReturnWithStopIteration(%s);" % (
Naming.retval_cname,
- self.value.py_result()))
- self.value.generate_disposal_code(code)
+ value.py_result()))
+ value.generate_disposal_code(code)
else:
- self.value.make_owned_reference(code)
+ value.make_owned_reference(code)
code.putln("%s = %s;" % (
Naming.retval_cname,
- self.value.result_as(self.return_type)))
- self.value.generate_post_assignment_code(code)
- self.value.free_temps(code)
+ value.result_as(self.return_type)))
+ value.generate_post_assignment_code(code)
+ value.free_temps(code)
else:
if self.return_type.is_pyobject:
if self.in_generator:
if self.in_async_gen:
+ code.globalstate.use_utility_code(
+ UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
code.put("PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); ")
code.putln("%s = NULL;" % Naming.retval_cname)
else:
code.mark_pos(self.pos)
end_label = code.new_label()
last = len(self.if_clauses)
- if not self.else_clause:
+ if self.else_clause:
+ # If the 'else' clause is 'unlikely', then set the preceding 'if' clause to 'likely' to reflect that.
+ self._set_branch_hint(self.if_clauses[-1], self.else_clause, inverse=True)
+ else:
last -= 1 # avoid redundant goto at end of last if-clause
for i, if_clause in enumerate(self.if_clauses):
+ self._set_branch_hint(if_clause, if_clause.body)
if_clause.generate_execution_code(code, end_label, is_last=i == last)
if self.else_clause:
code.mark_pos(self.else_clause.pos)
code.putln("}")
code.put_label(end_label)
+ def _set_branch_hint(self, clause, statements_node, inverse=False):
+ if not statements_node.is_terminator:
+ return
+ if not isinstance(statements_node, StatListNode) or not statements_node.stats:
+ return
+ # Anything that unconditionally raises exceptions should be considered unlikely.
+ if isinstance(statements_node.stats[-1], (RaiseStatNode, ReraiseStatNode)):
+ if len(statements_node.stats) > 1:
+ # Allow simple statements before the 'raise', but no conditions, loops, etc.
+ non_branch_nodes = (ExprStatNode, AssignmentNode, DelStatNode, GlobalNode, NonlocalNode)
+ for node in statements_node.stats[:-1]:
+ if not isinstance(node, non_branch_nodes):
+ return
+ clause.branch_hint = 'likely' if inverse else 'unlikely'
+
def generate_function_definitions(self, env, code):
for clause in self.if_clauses:
clause.generate_function_definitions(env, code)
# body StatNode
child_attrs = ["condition", "body"]
+ branch_hint = None
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def generate_execution_code(self, code, end_label, is_last):
self.condition.generate_evaluation_code(code)
code.mark_pos(self.pos)
- code.putln("if (%s) {" % self.condition.result())
+ condition = self.condition.result()
+ if self.branch_hint:
+ condition = '%s(%s)' % (self.branch_hint, condition)
+ code.putln("if (%s) {" % condition)
self.condition.generate_disposal_code(code)
self.condition.free_temps(code)
self.body.generate_execution_code(code)
var.release(code)
+class SetIterationNextNode(Node):
+ # Helper node for calling _PySet_NextEntry() inside of a WhileStatNode
+ # and checking the set size for changes. Created in Optimize.py.
+ child_attrs = ['set_obj', 'expected_size', 'pos_index_var',
+ 'coerced_value_var', 'value_target', 'is_set_flag']
+
+ coerced_value_var = value_ref = None
+
+ def __init__(self, set_obj, expected_size, pos_index_var, value_target, is_set_flag):
+ Node.__init__(
+ self, set_obj.pos,
+ set_obj=set_obj,
+ expected_size=expected_size,
+ pos_index_var=pos_index_var,
+ value_target=value_target,
+ is_set_flag=is_set_flag,
+ is_temp=True,
+ type=PyrexTypes.c_bint_type)
+
+ def analyse_expressions(self, env):
+ from . import ExprNodes
+ self.set_obj = self.set_obj.analyse_types(env)
+ self.expected_size = self.expected_size.analyse_types(env)
+ self.pos_index_var = self.pos_index_var.analyse_types(env)
+ self.value_target = self.value_target.analyse_target_types(env)
+ self.value_ref = ExprNodes.TempNode(self.value_target.pos, type=PyrexTypes.py_object_type)
+ self.coerced_value_var = self.value_ref.coerce_to(self.value_target.type, env)
+ self.is_set_flag = self.is_set_flag.analyse_types(env)
+ return self
+
+ def generate_function_definitions(self, env, code):
+ self.set_obj.generate_function_definitions(env, code)
+
+ def generate_execution_code(self, code):
+ code.globalstate.use_utility_code(UtilityCode.load_cached("set_iter", "Optimize.c"))
+ self.set_obj.generate_evaluation_code(code)
+
+ value_ref = self.value_ref
+ value_ref.allocate(code)
+
+ result_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
+ code.putln("%s = __Pyx_set_iter_next(%s, %s, &%s, &%s, %s);" % (
+ result_temp,
+ self.set_obj.py_result(),
+ self.expected_size.result(),
+ self.pos_index_var.result(),
+ value_ref.result(),
+ self.is_set_flag.result()
+ ))
+ code.putln("if (unlikely(%s == 0)) break;" % result_temp)
+ code.putln(code.error_goto_if("%s == -1" % result_temp, self.pos))
+ code.funcstate.release_temp(result_temp)
+
+ # evaluate all coercions before the assignments
+ code.put_gotref(value_ref.result())
+ self.coerced_value_var.generate_evaluation_code(code)
+ self.value_target.generate_assignment_code(self.coerced_value_var, code)
+ value_ref.release(code)
+
+
def ForStatNode(pos, **kw):
if 'iterator' in kw:
if kw['iterator'].is_async:
"Consider switching the directions of the relations.", 2)
self.step = self.step.analyse_types(env)
- if self.target.type.is_numeric:
- loop_type = self.target.type
+ self.set_up_loop(env)
+ target_type = self.target.type
+ if not (target_type.is_pyobject or target_type.is_numeric):
+ error(self.target.pos, "for-from loop variable must be c numeric type or Python object")
+
+ self.body = self.body.analyse_expressions(env)
+ if self.else_clause:
+ self.else_clause = self.else_clause.analyse_expressions(env)
+ return self
+
+ def set_up_loop(self, env):
+ from . import ExprNodes
+
+ target_type = self.target.type
+ if target_type.is_numeric:
+ loop_type = target_type
else:
- loop_type = PyrexTypes.c_int_type
+ if target_type.is_enum:
+ warning(self.target.pos,
+ "Integer loops over enum values are fragile. Please cast to a safe integer type instead.")
+ loop_type = PyrexTypes.c_long_type if target_type.is_pyobject else PyrexTypes.c_int_type
if not self.bound1.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type)
if not self.bound2.type.is_pyobject:
if not self.step.is_literal:
self.step = self.step.coerce_to_temp(env)
- target_type = self.target.type
- if not (target_type.is_pyobject or target_type.is_numeric):
- error(self.target.pos, "for-from loop variable must be c numeric type or Python object")
- if target_type.is_numeric:
+ if target_type.is_numeric or target_type.is_enum:
self.is_py_target = False
if isinstance(self.target, ExprNodes.BufferIndexNode):
raise error(self.pos, "Buffer or memoryview slicing/indexing not allowed as for-loop target.")
self.is_py_target = True
c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env)
self.loopvar_node = c_loopvar_node
- self.py_loopvar_node = \
- ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
- self.body = self.body.analyse_expressions(env)
- if self.else_clause:
- self.else_clause = self.else_clause.analyse_expressions(env)
- return self
+ self.py_loopvar_node = ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if self.step is not None:
self.step.generate_evaluation_code(code)
step = self.step.result()
- incop = "%s=%s" % (incop[0], step)
+ incop = "%s=%s" % (incop[0], step) # e.g. '++' => '+= STEP'
+ else:
+ step = '1'
+
from . import ExprNodes
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.allocate(code)
loopvar_type = PyrexTypes.c_long_type if self.target.type.is_enum else self.target.type
- if from_range:
+ if from_range and not self.is_py_target:
loopvar_name = code.funcstate.allocate_temp(loopvar_type, False)
else:
loopvar_name = self.loopvar_node.result()
if loopvar_type.is_int and not loopvar_type.signed and self.relation2[0] == '>':
# Handle the case where the endpoint of an unsigned int iteration
# is within step of 0.
- if not self.step:
- step = 1
code.putln("for (%s = %s%s + %s; %s %s %s + %s; ) { %s%s;" % (
loopvar_name,
self.bound1.result(), offset, step,
coerced_loopvar_node = self.py_loopvar_node
if coerced_loopvar_node is None and from_range:
- loopvar_cvalue = loopvar_name
- if self.target.type.is_enum:
- loopvar_cvalue = '(%s)%s' % (self.target.type.declaration_code(''), loopvar_cvalue)
- coerced_loopvar_node = ExprNodes.RawCNameExprNode(self.target.pos, loopvar_type, loopvar_cvalue)
+ coerced_loopvar_node = ExprNodes.RawCNameExprNode(self.target.pos, loopvar_type, loopvar_name)
if coerced_loopvar_node is not None:
coerced_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(coerced_loopvar_node, code)
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
- if self.py_loopvar_node:
+ if not from_range and self.py_loopvar_node:
# This mess is to make for..from loops with python targets behave
# exactly like those with C targets with regards to re-assignment
# of the loop variable.
code.putln("}")
- if self.py_loopvar_node:
+ if not from_range and self.py_loopvar_node:
# This is potentially wasteful, but we don't want the semantics to
# depend on whether or not the loop is a python type.
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
- if from_range:
+ if from_range and not self.is_py_target:
code.funcstate.release_temp(loopvar_name)
break_label = code.break_label
def generate_handling_code(self, code, end_label):
code.mark_pos(self.pos)
+
if self.pattern:
- code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
+ has_non_literals = not all(
+ pattern.is_literal or pattern.is_simple() and not pattern.is_temp
+ for pattern in self.pattern)
+
+ if has_non_literals:
+ # For non-trivial exception check expressions, hide the live exception from C-API calls.
+ exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
+ for _ in range(3)]
+ code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrFetchRestore", "Exceptions.c"))
+ code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % tuple(exc_vars))
+ code.globalstate.use_utility_code(UtilityCode.load_cached("FastTypeChecks", "ModuleSetupCode.c"))
+ exc_test_func = "__Pyx_PyErr_GivenExceptionMatches(%s, %%s)" % exc_vars[0]
+ else:
+ exc_vars = ()
+ code.globalstate.use_utility_code(UtilityCode.load_cached("PyErrExceptionMatches", "Exceptions.c"))
+ exc_test_func = "__Pyx_PyErr_ExceptionMatches(%s)"
+
exc_tests = []
for pattern in self.pattern:
pattern.generate_evaluation_code(code)
- exc_tests.append("__Pyx_PyErr_ExceptionMatches(%s)" % pattern.py_result())
+ exc_tests.append(exc_test_func % pattern.py_result())
- match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
- code.putln(
- "%s = %s;" % (match_flag, ' || '.join(exc_tests)))
+ match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
+ code.putln("%s = %s;" % (match_flag, ' || '.join(exc_tests)))
for pattern in self.pattern:
pattern.generate_disposal_code(code)
pattern.free_temps(code)
+
+ if has_non_literals:
+ code.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(exc_vars))
+ code.putln(' '.join(["%s = 0;" % var for var in exc_vars]))
+ for temp in exc_vars:
+ code.funcstate.release_temp(temp)
+
code.putln(
"if (%s) {" %
match_flag)
code.putln("}")
return
- exc_vars = [code.funcstate.allocate_temp(py_object_type,
- manage_ref=True)
+ exc_vars = [code.funcstate.allocate_temp(py_object_type, manage_ref=True)
for _ in range(3)]
code.put_add_traceback(self.function_name)
# We always have to fetch the exception value even if
if self.kwargs:
# Try to find num_threads and chunksize keyword arguments
pairs = []
+ seen = set()
for dictitem in self.kwargs.key_value_pairs:
+ if dictitem.key.value in seen:
+ error(self.pos, "Duplicate keyword argument found: %s" % dictitem.key.value)
+ seen.add(dictitem.key.value)
if dictitem.key.value == 'num_threads':
- self.num_threads = dictitem.value
+ if not dictitem.value.is_none:
+ self.num_threads = dictitem.value
elif self.is_prange and dictitem.key.value == 'chunksize':
- self.chunksize = dictitem.value
+ if not dictitem.value.is_none:
+ self.chunksize = dictitem.value
else:
pairs.append(dictitem)
self.num_threads.compile_time_value(env) <= 0):
error(self.pos, "argument to num_threads must be greater than 0")
- if not self.num_threads.is_simple():
+ if not self.num_threads.is_simple() or self.num_threads.type.is_pyobject:
self.num_threads = self.num_threads.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
return self
from __future__ import absolute_import
+import re
import sys
import copy
import codecs
from . import TypeSlots
from .ExprNodes import not_a_constant
import cython
-cython.declare(UtilityCode=object, EncodedString=object, bytes_literal=object,
+cython.declare(UtilityCode=object, EncodedString=object, bytes_literal=object, encoded_string=object,
Nodes=object, ExprNodes=object, PyrexTypes=object, Builtin=object,
UtilNodes=object, _py_int_types=object)
if sys.version_info[0] >= 3:
_py_int_types = int
+ _py_string_types = (bytes, str)
else:
_py_int_types = (int, long)
+ _py_string_types = (bytes, unicode)
from . import Nodes
from . import ExprNodes
from . import Options
from .Code import UtilityCode, TempitaUtilityCode
-from .StringEncoding import EncodedString, bytes_literal
-from .Errors import error
+from .StringEncoding import EncodedString, bytes_literal, encoded_string
+from .Errors import error, warning
from .ParseTreeTransforms import SkipDeclarations
try:
self.visitchildren(node)
return self._optimise_for_loop(node, node.iterator.sequence)
- def _optimise_for_loop(self, node, iterator, reversed=False):
- if iterator.type is Builtin.dict_type:
+ def _optimise_for_loop(self, node, iterable, reversed=False):
+ annotation_type = None
+ if (iterable.is_name or iterable.is_attribute) and iterable.entry and iterable.entry.annotation:
+ annotation = iterable.entry.annotation
+ if annotation.is_subscript:
+ annotation = annotation.base # container base type
+ # FIXME: generalise annotation evaluation => maybe provide a "qualified name" also for imported names?
+ if annotation.is_name:
+ if annotation.entry and annotation.entry.qualified_name == 'typing.Dict':
+ annotation_type = Builtin.dict_type
+ elif annotation.name == 'Dict':
+ annotation_type = Builtin.dict_type
+ if annotation.entry and annotation.entry.qualified_name in ('typing.Set', 'typing.FrozenSet'):
+ annotation_type = Builtin.set_type
+ elif annotation.name in ('Set', 'FrozenSet'):
+ annotation_type = Builtin.set_type
+
+ if Builtin.dict_type in (iterable.type, annotation_type):
# like iterating over dict.keys()
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_dict_iteration(
- node, dict_obj=iterator, method=None, keys=True, values=False)
+ node, dict_obj=iterable, method=None, keys=True, values=False)
+
+ if (Builtin.set_type in (iterable.type, annotation_type) or
+ Builtin.frozenset_type in (iterable.type, annotation_type)):
+ if reversed:
+ # CPython raises an error here: not a sequence
+ return node
+ return self._transform_set_iteration(node, iterable)
# C array (slice) iteration?
- if iterator.type.is_ptr or iterator.type.is_array:
- return self._transform_carray_iteration(node, iterator, reversed=reversed)
- if iterator.type is Builtin.bytes_type:
- return self._transform_bytes_iteration(node, iterator, reversed=reversed)
- if iterator.type is Builtin.unicode_type:
- return self._transform_unicode_iteration(node, iterator, reversed=reversed)
+ if iterable.type.is_ptr or iterable.type.is_array:
+ return self._transform_carray_iteration(node, iterable, reversed=reversed)
+ if iterable.type is Builtin.bytes_type:
+ return self._transform_bytes_iteration(node, iterable, reversed=reversed)
+ if iterable.type is Builtin.unicode_type:
+ return self._transform_unicode_iteration(node, iterable, reversed=reversed)
# the rest is based on function calls
- if not isinstance(iterator, ExprNodes.SimpleCallNode):
+ if not isinstance(iterable, ExprNodes.SimpleCallNode):
return node
- if iterator.args is None:
- arg_count = iterator.arg_tuple and len(iterator.arg_tuple.args) or 0
+ if iterable.args is None:
+ arg_count = iterable.arg_tuple and len(iterable.arg_tuple.args) or 0
else:
- arg_count = len(iterator.args)
- if arg_count and iterator.self is not None:
+ arg_count = len(iterable.args)
+ if arg_count and iterable.self is not None:
arg_count -= 1
- function = iterator.function
+ function = iterable.function
# dict iteration?
if function.is_attribute and not reversed and not arg_count:
- base_obj = iterator.self or function.obj
+ base_obj = iterable.self or function.obj
method = function.attribute
# in Py3, items() is equivalent to Py2's iteritems()
is_safe_iter = self.global_scope().context.language_level >= 3
node, base_obj, method, keys, values)
# enumerate/reversed ?
- if iterator.self is None and function.is_name and \
+ if iterable.self is None and function.is_name and \
function.entry and function.entry.is_builtin:
if function.name == 'enumerate':
if reversed:
# CPython raises an error here: not a sequence
return node
- return self._transform_enumerate_iteration(node, iterator)
+ return self._transform_enumerate_iteration(node, iterable)
elif function.name == 'reversed':
if reversed:
# CPython raises an error here: not a sequence
return node
- return self._transform_reversed_iteration(node, iterator)
+ return self._transform_reversed_iteration(node, iterable)
# range() iteration?
- if Options.convert_range and (node.target.type.is_int or node.target.type.is_enum):
- if iterator.self is None and function.is_name and \
- function.entry and function.entry.is_builtin and \
- function.name in ('range', 'xrange'):
- return self._transform_range_iteration(node, iterator, reversed=reversed)
+ if Options.convert_range and arg_count >= 1 and (
+ iterable.self is None and
+ function.is_name and function.name in ('range', 'xrange') and
+ function.entry and function.entry.is_builtin):
+ if node.target.type.is_int or node.target.type.is_enum:
+ return self._transform_range_iteration(node, iterable, reversed=reversed)
+ if node.target.type.is_pyobject:
+ # Assume that small integer ranges (C long >= 32bit) are best handled in C as well.
+ for arg in (iterable.arg_tuple.args if iterable.args is None else iterable.args):
+ if isinstance(arg, ExprNodes.IntNode):
+ if arg.has_constant_result() and -2**30 <= arg.constant_result < 2**30:
+ continue
+ break
+ else:
+ return self._transform_range_iteration(node, iterable, reversed=reversed)
return node
step=step, body=node.body,
else_clause=node.else_clause,
from_range=True)
+ for_node.set_up_loop(self.current_env())
if bound2_is_temp:
for_node = UtilNodes.LetNode(bound2, for_node)
PyrexTypes.CFuncTypeArg("p_is_dict", PyrexTypes.c_int_ptr_type, None),
])
+ PySet_Iterator_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("set", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("is_set", PyrexTypes.c_int_type, None),
+ PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
+ PyrexTypes.CFuncTypeArg("p_is_set", PyrexTypes.c_int_ptr_type, None),
+ ])
+
+ def _transform_set_iteration(self, node, set_obj):
+ temps = []
+ temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
+ temps.append(temp)
+ set_temp = temp.ref(set_obj.pos)
+ temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
+ temps.append(temp)
+ pos_temp = temp.ref(node.pos)
+
+ if isinstance(node.body, Nodes.StatListNode):
+ body = node.body
+ else:
+ body = Nodes.StatListNode(pos = node.body.pos,
+ stats = [node.body])
+
+ # keep original length to guard against set modification
+ set_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
+ temps.append(set_len_temp)
+ set_len_temp_addr = ExprNodes.AmpersandNode(
+ node.pos, operand=set_len_temp.ref(set_obj.pos),
+ type=PyrexTypes.c_ptr_type(set_len_temp.type))
+ temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
+ temps.append(temp)
+ is_set_temp = temp.ref(node.pos)
+ is_set_temp_addr = ExprNodes.AmpersandNode(
+ node.pos, operand=is_set_temp,
+ type=PyrexTypes.c_ptr_type(temp.type))
+
+ value_target = node.target
+ iter_next_node = Nodes.SetIterationNextNode(
+ set_temp, set_len_temp.ref(set_obj.pos), pos_temp, value_target, is_set_temp)
+ iter_next_node = iter_next_node.analyse_expressions(self.current_env())
+ body.stats[0:0] = [iter_next_node]
+
+ def flag_node(value):
+ value = value and 1 or 0
+ return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value)
+
+ result_code = [
+ Nodes.SingleAssignmentNode(
+ node.pos,
+ lhs=pos_temp,
+ rhs=ExprNodes.IntNode(node.pos, value='0', constant_result=0)),
+ Nodes.SingleAssignmentNode(
+ set_obj.pos,
+ lhs=set_temp,
+ rhs=ExprNodes.PythonCapiCallNode(
+ set_obj.pos,
+ "__Pyx_set_iterator",
+ self.PySet_Iterator_func_type,
+ utility_code=UtilityCode.load_cached("set_iter", "Optimize.c"),
+ args=[set_obj, flag_node(set_obj.type is Builtin.set_type),
+ set_len_temp_addr, is_set_temp_addr,
+ ],
+ is_temp=True,
+ )),
+ Nodes.WhileStatNode(
+ node.pos,
+ condition=None,
+ body=body,
+ else_clause=node.else_clause,
+ )
+ ]
+
+ return UtilNodes.TempsBlockNode(
+ node.pos, temps=temps,
+ body=Nodes.StatListNode(
+ node.pos,
+ stats = result_code
+ ))
+
class SwitchTransform(Visitor.EnvTransform):
"""
"""
### cleanup to avoid redundant coercions to/from Python types
- def _visit_PyTypeTestNode(self, node):
- # disabled - appears to break assignments in some cases, and
- # also drops a None check, which might still be required
+ def visit_PyTypeTestNode(self, node):
"""Flatten redundant type checks after tree changes.
"""
- old_arg = node.arg
self.visitchildren(node)
- if old_arg is node.arg or node.arg.type != node.type:
- return node
- return node.arg
+ return node.reanalyse()
def _visit_TypecastNode(self, node):
# disabled - the user may have had a reason to put a type
def visit_ExprStatNode(self, node):
"""
- Drop useless coercions.
+ Drop dead code and useless coercions.
"""
self.visitchildren(node)
if isinstance(node.expr, ExprNodes.CoerceToPyTypeNode):
node.expr = node.expr.arg
+ expr = node.expr
+ if expr is None or expr.is_none or expr.is_literal:
+ # Expression was removed or is dead code => remove ExprStatNode as well.
+ return None
+ if expr.is_name and expr.entry and (expr.entry.is_local or expr.entry.is_arg):
+ # Ignore dead references to local variables etc.
+ return None
return node
def visit_CoerceToBooleanNode(self, node):
attribute=attr_name,
is_called=True).analyse_as_type_attribute(self.current_env())
if method is None:
- return node
+ return self._optimise_generic_builtin_method_call(
+ node, attr_name, function, arg_list, is_unbound_method)
args = node.args
if args is None and node.arg_tuple:
args = node.arg_tuple.args
### builtin types
+ def _optimise_generic_builtin_method_call(self, node, attr_name, function, arg_list, is_unbound_method):
+ """
+ Try to inject an unbound method call for a call to a method of a known builtin type.
+ This enables caching the underlying C function of the method at runtime.
+ """
+ arg_count = len(arg_list)
+ if is_unbound_method or arg_count >= 3 or not (function.is_attribute and function.is_py_attr):
+ return node
+ if not function.obj.type.is_builtin_type:
+ return node
+ if function.obj.type.name in ('basestring', 'type'):
+ # these allow different actual types => unsafe
+ return node
+ return ExprNodes.CachedBuiltinMethodCallNode(
+ node, function.obj, attr_name, arg_list)
+
+ PyObject_Unicode_func_type = PyrexTypes.CFuncType(
+ Builtin.unicode_type, [
+ PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
+ ])
+
+ def _handle_simple_function_unicode(self, node, function, pos_args):
+ """Optimise single argument calls to unicode().
+ """
+ if len(pos_args) != 1:
+ if len(pos_args) == 0:
+ return ExprNodes.UnicodeNode(node.pos, value=EncodedString(), constant_result=u'')
+ return node
+ arg = pos_args[0]
+ if arg.type is Builtin.unicode_type:
+ if not arg.may_be_none():
+ return arg
+ cname = "__Pyx_PyUnicode_Unicode"
+ utility_code = UtilityCode.load_cached('PyUnicode_Unicode', 'StringTools.c')
+ else:
+ cname = "__Pyx_PyObject_Unicode"
+ utility_code = UtilityCode.load_cached('PyObject_Unicode', 'StringTools.c')
+ return ExprNodes.PythonCapiCallNode(
+ node.pos, cname, self.PyObject_Unicode_func_type,
+ args=pos_args,
+ is_temp=node.is_temp,
+ utility_code=utility_code,
+ py_name="unicode")
+
+ def visit_FormattedValueNode(self, node):
+ """Simplify or avoid plain string formatting of a unicode value.
+ This seems misplaced here, but plain unicode formatting is essentially
+ a call to the unicode() builtin, which is optimised right above.
+ """
+ self.visitchildren(node)
+ if node.value.type is Builtin.unicode_type and not node.c_format_spec and not node.format_spec:
+ if not node.conversion_char or node.conversion_char == 's':
+ # value is definitely a unicode string and we don't format it any special
+ return self._handle_simple_function_unicode(node, None, [node.value])
+ return node
+
PyDict_Copy_func_type = PyrexTypes.CFuncType(
Builtin.dict_type, [
PyrexTypes.CFuncTypeArg("dict", Builtin.dict_type, None)
_map_to_capi_len_function = {
Builtin.unicode_type: "__Pyx_PyUnicode_GET_LENGTH",
Builtin.bytes_type: "PyBytes_GET_SIZE",
+ Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
Builtin.list_type: "PyList_GET_SIZE",
Builtin.tuple_type: "PyTuple_GET_SIZE",
Builtin.set_type: "PySet_GET_SIZE",
PyTypeObjectPtr = PyrexTypes.CPtrType(
cython_scope.lookup('PyTypeObject').type)
pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
- PyrexTypes.py_object_type, [
+ ext_type, [
PyrexTypes.CFuncTypeArg("type", PyTypeObjectPtr, None),
PyrexTypes.CFuncTypeArg("args", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("kwargs", PyrexTypes.py_object_type, None),
node.pos, slot_func_cname,
pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
+ may_return_none=False,
is_temp=True)
else:
# arbitrary variable, needs a None check for safety
utility_code=load_c_utility('append')
)
+ def _handle_simple_method_list_extend(self, node, function, args, is_unbound_method):
+ """Replace list.extend([...]) for short sequence literals values by sequential appends
+ to avoid creating an intermediate sequence argument.
+ """
+ if len(args) != 2:
+ return node
+ obj, value = args
+ if not value.is_sequence_constructor:
+ return node
+ items = list(value.args)
+ if value.mult_factor is not None or len(items) > 8:
+ # Appending wins for short sequences but slows down when multiple resize operations are needed.
+ # This seems to be a good enough limit that avoids repeated resizing.
+ if False and isinstance(value, ExprNodes.ListNode):
+ # One would expect that tuples are more efficient here, but benchmarking with
+ # Py3.5 and Py3.7 suggests that they are not. Probably worth revisiting at some point.
+ # Might be related to the usage of PySequence_FAST() in CPython's list.extend(),
+ # which is probably tuned more towards lists than tuples (and rightly so).
+ tuple_node = args[1].as_tuple().analyse_types(self.current_env(), skip_children=True)
+ Visitor.recursively_replace_node(node, args[1], tuple_node)
+ return node
+ wrapped_obj = self._wrap_self_arg(obj, function, is_unbound_method, 'extend')
+ if not items:
+ # Empty sequences are not likely to occur, but why waste a call to list.extend() for them?
+ wrapped_obj.result_is_used = node.result_is_used
+ return wrapped_obj
+ cloned_obj = obj = wrapped_obj
+ if len(items) > 1 and not obj.is_simple():
+ cloned_obj = UtilNodes.LetRefNode(obj)
+ # Use ListComp_Append() for all but the last item and finish with PyList_Append()
+ # to shrink the list storage size at the very end if necessary.
+ temps = []
+ arg = items[-1]
+ if not arg.is_simple():
+ arg = UtilNodes.LetRefNode(arg)
+ temps.append(arg)
+ new_node = ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_PyList_Append", self.PyObject_Append_func_type,
+ args=[cloned_obj, arg],
+ is_temp=True,
+ utility_code=load_c_utility("ListAppend"))
+ for arg in items[-2::-1]:
+ if not arg.is_simple():
+ arg = UtilNodes.LetRefNode(arg)
+ temps.append(arg)
+ new_node = ExprNodes.binop_node(
+ node.pos, '|',
+ ExprNodes.PythonCapiCallNode(
+ node.pos, "__Pyx_ListComp_Append", self.PyObject_Append_func_type,
+ args=[cloned_obj, arg], py_name="extend",
+ is_temp=True,
+ utility_code=load_c_utility("ListCompAppend")),
+ new_node,
+ type=PyrexTypes.c_returncode_type,
+ )
+ new_node.result_is_used = node.result_is_used
+ if cloned_obj is not obj:
+ temps.append(cloned_obj)
+ for temp in temps:
+ new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
+ new_node.result_is_used = node.result_is_used
+ return new_node
+
PyByteArray_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
may_return_none=True,
utility_code=load_c_utility('dict_setdefault'))
+ PyDict_Pop_func_type = PyrexTypes.CFuncType(
+ PyrexTypes.py_object_type, [
+ PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
+ PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
+ ])
+
+ def _handle_simple_method_dict_pop(self, node, function, args, is_unbound_method):
+ """Replace dict.pop() by a call to _PyDict_Pop().
+ """
+ if len(args) == 2:
+ args.append(ExprNodes.NullNode(node.pos))
+ elif len(args) != 3:
+ self._error_wrong_arg_count('dict.pop', node, args, "2 or 3")
+ return node
+
+ return self._substitute_method_call(
+ node, function,
+ "__Pyx_PyDict_Pop", self.PyDict_Pop_func_type,
+ 'pop', is_unbound_method, args,
+ utility_code=load_c_utility('py_dict_pop'))
+
Pyx_PyInt_BinopInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None),
may_return_none=ExprNodes.PythonCapiCallNode.may_return_none,
with_none_check=True):
args = list(args)
- if with_none_check and args and not args[0].is_literal:
- self_arg = args[0]
- if is_unbound_method:
- self_arg = self_arg.as_none_safe_node(
- "descriptor '%s' requires a '%s' object but received a 'NoneType'",
- format_args=[attr_name, function.obj.name])
- else:
- self_arg = self_arg.as_none_safe_node(
- "'NoneType' object has no attribute '%{0}s'".format('.30' if len(attr_name) <= 30 else ''),
- error = "PyExc_AttributeError",
- format_args = [attr_name])
- args[0] = self_arg
+ if with_none_check and args:
+ args[0] = self._wrap_self_arg(args[0], function, is_unbound_method, attr_name)
if is_temp is None:
is_temp = node.is_temp
return ExprNodes.PythonCapiCallNode(
result_is_used = node.result_is_used,
)
+ def _wrap_self_arg(self, self_arg, function, is_unbound_method, attr_name):
+ if self_arg.is_literal:
+ return self_arg
+ if is_unbound_method:
+ self_arg = self_arg.as_none_safe_node(
+ "descriptor '%s' requires a '%s' object but received a 'NoneType'",
+ format_args=[attr_name, self_arg.type.name])
+ else:
+ self_arg = self_arg.as_none_safe_node(
+ "'NoneType' object has no attribute '%{0}s'".format('.30' if len(attr_name) <= 30 else ''),
+ error="PyExc_AttributeError",
+ format_args=[attr_name])
+ return self_arg
+
def _inject_int_default_argument(self, node, args, arg_index, type, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
if isinstance(node.operand1, ExprNodes.IntNode) and \
node.operand2.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand2, node.operand1)
+ if node.operand1.is_string_literal:
+ return self._multiply_string(node, node.operand1, node.operand2)
+ elif node.operand2.is_string_literal:
+ return self._multiply_string(node, node.operand2, node.operand1)
return self.visit_BinopNode(node)
+ def _multiply_string(self, node, string_node, multiplier_node):
+ multiplier = multiplier_node.constant_result
+ if not isinstance(multiplier, _py_int_types):
+ return node
+ if not (node.has_constant_result() and isinstance(node.constant_result, _py_string_types)):
+ return node
+ if len(node.constant_result) > 256:
+ # Too long for static creation, leave it to runtime. (-> arbitrary limit)
+ return node
+
+ build_string = encoded_string
+ if isinstance(string_node, ExprNodes.BytesNode):
+ build_string = bytes_literal
+ elif isinstance(string_node, ExprNodes.StringNode):
+ if string_node.unicode_value is not None:
+ string_node.unicode_value = encoded_string(
+ string_node.unicode_value * multiplier,
+ string_node.unicode_value.encoding)
+ elif isinstance(string_node, ExprNodes.UnicodeNode):
+ if string_node.bytes_value is not None:
+ string_node.bytes_value = bytes_literal(
+ string_node.bytes_value * multiplier,
+ string_node.bytes_value.encoding)
+ else:
+ assert False, "unknown string node type: %s" % type(string_node)
+ string_node.value = build_string(
+ string_node.value * multiplier,
+ string_node.value.encoding)
+ return string_node
+
def _calculate_constant_seq(self, node, sequence_node, factor):
if factor.constant_result != 1 and sequence_node.args:
if isinstance(factor.constant_result, _py_int_types) and factor.constant_result <= 0:
sequence_node.mult_factor = factor
return sequence_node
+ def visit_ModNode(self, node):
+ self.visitchildren(node)
+ if isinstance(node.operand1, ExprNodes.UnicodeNode) and isinstance(node.operand2, ExprNodes.TupleNode):
+ if not node.operand2.mult_factor:
+ fstring = self._build_fstring(node.operand1.pos, node.operand1.value, node.operand2.args)
+ if fstring is not None:
+ return fstring
+ return self.visit_BinopNode(node)
+
+ _parse_string_format_regex = (
+ u'(%(?:' # %...
+ u'(?:[0-9]+|[ ])?' # width (optional) or space prefix fill character (optional)
+ u'(?:[.][0-9]+)?' # precision (optional)
+ u')?.)' # format type (or something different for unsupported formats)
+ )
+
+ def _build_fstring(self, pos, ustring, format_args):
+ # Issues formatting warnings instead of errors since we really only catch a few errors by accident.
+ args = iter(format_args)
+ substrings = []
+ can_be_optimised = True
+ for s in re.split(self._parse_string_format_regex, ustring):
+ if not s:
+ continue
+ if s == u'%%':
+ substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(u'%'), constant_result=u'%'))
+ continue
+ if s[0] != u'%':
+ if s[-1] == u'%':
+ warning(pos, "Incomplete format: '...%s'" % s[-3:], level=1)
+ can_be_optimised = False
+ substrings.append(ExprNodes.UnicodeNode(pos, value=EncodedString(s), constant_result=s))
+ continue
+ format_type = s[-1]
+ try:
+ arg = next(args)
+ except StopIteration:
+ warning(pos, "Too few arguments for format placeholders", level=1)
+ can_be_optimised = False
+ break
+ if format_type in u'srfdoxX':
+ format_spec = s[1:]
+ if format_type in u'doxX' and u'.' in format_spec:
+ # Precision is not allowed for integers in format(), but ok in %-formatting.
+ can_be_optimised = False
+ elif format_type in u'rs':
+ format_spec = format_spec[:-1]
+ substrings.append(ExprNodes.FormattedValueNode(
+ arg.pos, value=arg,
+ conversion_char=format_type if format_type in u'rs' else None,
+ format_spec=ExprNodes.UnicodeNode(
+ pos, value=EncodedString(format_spec), constant_result=format_spec)
+ if format_spec else None,
+ ))
+ else:
+ # keep it simple for now ...
+ can_be_optimised = False
+
+ if not can_be_optimised:
+ # Print all warnings we can find before finally giving up here.
+ return None
+
+ try:
+ next(args)
+ except StopIteration: pass
+ else:
+ warning(pos, "Too many arguments for format placeholders", level=1)
+ return None
+
+ node = ExprNodes.JoinedStrNode(pos, values=substrings)
+ return self.visit_JoinedStrNode(node)
+
def visit_FormattedValueNode(self, node):
self.visitchildren(node)
conversion_char = node.conversion_char or 's'
visit_Node = Visitor.VisitorTransform.recurse_to_children
-class FinalOptimizePhase(Visitor.CythonTransform, Visitor.NodeRefCleanupMixin):
+class FinalOptimizePhase(Visitor.EnvTransform, Visitor.NodeRefCleanupMixin):
"""
This visitor handles several commuting optimizations, and is run
just before the C code generation phase.
- eliminate None assignment and refcounting for first assignment.
- isinstance -> typecheck for cdef types
- eliminate checks for None and/or types that became redundant after tree changes
+ - eliminate useless string formatting steps
- replace Python function calls that look like method calls by a faster PyMethodCallNode
"""
+ in_loop = False
+
def visit_SingleAssignmentNode(self, node):
"""Avoid redundant initialisation of local variables before their
first assignment.
function.type = function.entry.type
PyTypeObjectPtr = PyrexTypes.CPtrType(cython_scope.lookup('PyTypeObject').type)
node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr)
- elif (self.current_directives.get("optimize.unpack_method_calls")
- and node.is_temp and function.type.is_pyobject):
+ elif (node.is_temp and function.type.is_pyobject and self.current_directives.get(
+ "optimize.unpack_method_calls_in_pyinit"
+ if not self.in_loop and self.current_env().is_module_scope
+ else "optimize.unpack_method_calls")):
# optimise simple Python methods calls
if isinstance(node.arg_tuple, ExprNodes.TupleNode) and not (
node.arg_tuple.mult_factor or (node.arg_tuple.is_literal and node.arg_tuple.args)):
node, function=function, arg_tuple=node.arg_tuple, type=node.type))
return node
+ def visit_NumPyMethodCallNode(self, node):
+ # Exclude from replacement above.
+ self.visitchildren(node)
+ return node
+
def visit_PyTypeTestNode(self, node):
"""Remove tests for alternatively allowed None values from
type tests when we know that the argument cannot be None
return node.arg
return node
+ def visit_LoopNode(self, node):
+ """Remember when we enter a loop as some expensive optimisations might still be worth it there.
+ """
+ old_val = self.in_loop
+ self.in_loop = True
+ self.visitchildren(node)
+ self.in_loop = old_val
+ return node
+
+
class ConsolidateOverflowCheck(Visitor.CythonTransform):
"""
This class facilitates the sharing of overflow checking among all nodes
'language_level': 2,
'fast_getattr': False, # Undocumented until we come up with a better way to handle this everywhere.
'py2_import': False, # For backward compatibility of Cython's source code in Py3 source mode
+ 'preliminary_late_includes_cy28': False, # Temporary directive in 0.28, to be removed in a later version (see GH#2079).
+ 'iterable_coroutine': False, # Make async coroutines backwards compatible with the old asyncio yield-from syntax.
'c_string_type': 'bytes',
'c_string_encoding': '',
'type_version_tag': True, # enables Py_TPFLAGS_HAVE_VERSION_TAG on extension types
# optimizations
'optimize.inline_defnode_calls': True,
'optimize.unpack_method_calls': True, # increases code size when True
+ 'optimize.unpack_method_calls_in_pyinit': False, # uselessly increases code size when True
'optimize.use_switch': True,
# remove unreachable code
'old_style_globals': ('module',),
'np_pythran': ('module',),
'fast_gil': ('module',),
+ 'iterable_coroutine': ('module', 'function'),
}
from . import Nodes
from . import Options
from . import Builtin
+from . import Errors
from .Visitor import VisitorTransform, TreeVisitor
from .Visitor import CythonTransform, EnvTransform, ScopeTrackingTransform
visit_Node = VisitorTransform.recurse_to_children
-class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
+class InterpretCompilerDirectives(CythonTransform):
"""
After parsing, directives can be stored in a number of places:
- #cython-comments at the top of the file (stored in ModuleNode)
node.cython_attribute = directive
return node
+ def visit_NewExprNode(self, node):
+ self.visit(node.cppclass)
+ self.visitchildren(node)
+ return node
+
def try_to_parse_directives(self, node):
# If node is the contents of an directive (in a with statement or
# decorator), returns a list of (directivename, value) pairs.
def visit_CVarDefNode(self, node):
directives = self._extract_directives(node, 'function')
if not directives:
- return node
+ return self.visit_Node(node)
for name, value in directives.items():
if name == 'locals':
node.directive_locals = value
directives = []
realdecs = []
both = []
- for dec in node.decorators:
+ # Decorators coming first take precedence.
+ for dec in node.decorators[::-1]:
new_directives = self.try_to_parse_directives(dec.decorator)
if new_directives is not None:
for directive in new_directives:
directives.append(directive)
if directive[0] == 'staticmethod':
both.append(dec)
+ # Adapt scope type based on decorators that change it.
+ if directive[0] == 'cclass' and scope_name == 'class':
+ scope_name = 'cclass'
else:
realdecs.append(dec)
- if realdecs and isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode)):
+ if realdecs and (scope_name == 'cclass' or
+ isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode))):
raise PostParseError(realdecs[0].pos, "Cdef functions/classes cannot take arbitrary decorators.")
- else:
- node.decorators = realdecs + both
+ node.decorators = realdecs[::-1] + both[::-1]
# merge or override repeated directives
optdict = {}
- directives.reverse() # Decorators coming first take precedence
for directive in directives:
name, value = directive
if name in optdict:
def visit_FuncDefNode(self, node):
"""
- Analyse a function and its body, as that hasn't happend yet. Also
+ Analyse a function and its body, as that hasn't happened yet. Also
analyse the directive_locals set by @cython.locals().
Then, if we are a function with fused arguments, replace the function
binding = self.current_directives.get('binding')
rhs = ExprNodes.PyCFunctionNode.from_defnode(node, binding)
node.code_object = rhs.code_object
+ if node.is_generator:
+ node.gbody.code_object = node.code_object
if env.is_py_class_scope:
rhs.binding = True
# Some nodes are no longer needed after declaration
# analysis and can be dropped. The analysis was performed
- # on these nodes in a seperate recursive process from the
+ # on these nodes in a separate recursive process from the
# enclosing function or module, so we can simply drop them.
def visit_CDeclaratorNode(self, node):
# necessary to ensure that all CNameDeclaratorNodes are visited.
collector.visitchildren(node)
if node.is_async_def:
- coroutine_type = Nodes.AsyncGenNode if collector.has_yield else Nodes.AsyncDefNode
+ coroutine_type = Nodes.AsyncDefNode
if collector.has_yield:
+ coroutine_type = Nodes.AsyncGenNode
for yield_expr in collector.yields + collector.returns:
yield_expr.in_async_gen = True
+ elif self.current_directives['iterable_coroutine']:
+ coroutine_type = Nodes.IterableAsyncDefNode
elif collector.has_await:
found = next(y for y in collector.yields if y.is_await)
error(found.pos, "'await' not allowed in generators (use 'yield')")
return self.transform(node)
def visit_PrimaryCmpNode(self, node):
- type1 = node.operand1.analyse_as_type(self.local_scope)
- type2 = node.operand2.analyse_as_type(self.local_scope)
+ with Errors.local_errors(ignore=True):
+ type1 = node.operand1.analyse_as_type(self.local_scope)
+ type2 = node.operand2.analyse_as_type(self.local_scope)
if type1 and type2:
false_node = ExprNodes.BoolNode(node.pos, value=False)
error(pos, "Expected an identifier, found '%s'" % s.sy)
if s.systring == 'const':
s.next()
- base_type = p_c_base_type(s,
- self_flag = self_flag, nonempty = nonempty, templates = templates)
- return Nodes.CConstTypeNode(pos, base_type = base_type)
+ base_type = p_c_base_type(s, self_flag=self_flag, nonempty=nonempty, templates=templates)
+ if isinstance(base_type, Nodes.MemoryViewSliceTypeNode):
+ # reverse order to avoid having to write "(const int)[:]"
+ base_type.base_type_node = Nodes.CConstTypeNode(pos, base_type=base_type.base_type_node)
+ return base_type
+ return Nodes.CConstTypeNode(pos, base_type=base_type)
if looking_at_base_type(s):
#print "p_c_simple_base_type: looking_at_base_type at", s.position()
is_basic = 1
"ssize_t" : (2, 0),
"size_t" : (0, 0),
"ptrdiff_t" : (2, 0),
+ "Py_tss_t" : (1, 0),
})
sign_and_longness_words = cython.declare(
ctx.namespace = p_string_literal(s, 'u')[2]
if p_nogil(s):
ctx.nogil = 1
- body = p_suite(s, ctx)
+
+ # Use "docstring" as verbatim string to include
+ verbatim_include, body = p_suite_with_docstring(s, ctx, True)
+
return Nodes.CDefExternNode(pos,
include_file = include_file,
+ verbatim_include = verbatim_include,
body = body,
namespace = ctx.namespace)
as_name = class_name
objstruct_name = None
typeobj_name = None
- base_class_module = None
- base_class_name = None
+ bases = None
if s.sy == '(':
- s.next()
- base_class_path = [p_ident(s)]
- while s.sy == '.':
- s.next()
- base_class_path.append(p_ident(s))
- if s.sy == ',':
- s.error("C class may only have one base class", fatal=False)
- s.expect(')')
- base_class_module = ".".join(base_class_path[:-1])
- base_class_name = base_class_path[-1]
+ positional_args, keyword_args = p_call_parse_args(s, allow_genexp=False)
+ if keyword_args:
+ s.error("C classes cannot take keyword bases.")
+ bases, _ = p_call_build_packed_args(pos, positional_args, keyword_args)
+ if bases is None:
+ bases = ExprNodes.TupleNode(pos, args=[])
+
if s.sy == '[':
if ctx.visibility not in ('public', 'extern') and not ctx.api:
error(s.position(), "Name options only allowed for 'public', 'api', or 'extern' C class")
module_name = ".".join(module_path),
class_name = class_name,
as_name = as_name,
- base_class_module = base_class_module,
- base_class_name = base_class_name,
+ bases = bases,
objstruct_name = objstruct_name,
typeobj_name = typeobj_name,
in_pxd = ctx.level == 'module_pxd',
# is_pythran_expr boolean Is Pythran expr
# is_numpy_buffer boolean Is Numpy array buffer
# has_attributes boolean Has C dot-selectable attributes
- # default_value string Initial value
+ # default_value string Initial value that can be assigned before first user assignment.
+ # declaration_value string The value statically assigned on declaration (if any).
# entry Entry The Entry for this type
#
# declaration_code(entity_code,
is_numpy_buffer = 0
has_attributes = 0
default_value = ""
+ declaration_value = ""
def resolve(self):
# If a typedef, returns the base type.
def needs_nonecheck(self):
return 0
+ def _assign_from_py_code(self, source_code, result_code, error_pos, code,
+ from_py_function=None, error_condition=None, extra_args=None):
+ args = ', ' + ', '.join('%s' % arg for arg in extra_args) if extra_args else ''
+ convert_call = "%s(%s%s)" % (
+ from_py_function or self.from_py_function,
+ source_code,
+ args,
+ )
+ if self.is_enum:
+ convert_call = typecast(self, c_long_type, convert_call)
+ return '%s = %s; %s' % (
+ result_code,
+ convert_call,
+ code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
+
def public_decl(base_code, dll_linkage):
if dll_linkage:
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
- if from_py_function is None:
- from_py_function = self.from_py_function
- if error_condition is None:
- error_condition = self.error_condition(result_code)
return self.typedef_base_type.from_py_call_code(
- source_code, result_code, error_pos, code, from_py_function, error_condition)
+ source_code, result_code, error_pos, code,
+ from_py_function or self.from_py_function,
+ error_condition or self.error_condition(result_code)
+ )
def overflow_check_binop(self, binop, env, const_rhs=False):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
def same_as_resolved_type(self, other_type):
return ((other_type.is_memoryviewslice and
+ #self.writable_needed == other_type.writable_needed and # FIXME: should be only uni-directional
self.dtype.same_as(other_type.dtype) and
self.axes == other_type.axes) or
other_type is error_type)
src = self
- if src.dtype != dst.dtype:
+ #if not copying and self.writable_needed and not dst.writable_needed:
+ # return False
+
+ src_dtype, dst_dtype = src.dtype, dst.dtype
+ if dst_dtype.is_const:
+ # Requesting read-only views is always ok => consider only the non-const base type.
+ dst_dtype = dst_dtype.const_base_type
+ if src_dtype.is_const:
+ # When assigning between read-only views, compare only the non-const base types.
+ src_dtype = src_dtype.const_base_type
+ elif copying and src_dtype.is_const:
+ # Copying by value => ignore const on source.
+ src_dtype = src_dtype.const_base_type
+
+ if src_dtype != dst_dtype:
return False
if src.ndim != dst.ndim:
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
- return '%s = %s(%s); %s' % (
- result_code,
- from_py_function or self.from_py_function,
- source_code,
- code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
+ # NOTE: auto-detection of readonly buffers is disabled:
+ # writable = self.writable_needed or not self.dtype.is_const
+ writable = not self.dtype.is_const
+ return self._assign_from_py_code(
+ source_code, result_code, error_pos, code, from_py_function, error_condition,
+ extra_args=['PyBUF_WRITABLE' if writable else '0'])
def create_to_py_utility_code(self, env):
self._dtype_to_py_func, self._dtype_from_py_func = self.dtype_object_conversion_funcs(env)
if self.dtype.is_pyobject:
utility_name = "MemviewObjectToObject"
else:
- to_py = self.dtype.create_to_py_utility_code(env)
- from_py = self.dtype.create_from_py_utility_code(env)
- if not (to_py or from_py):
- return "NULL", "NULL"
+ self.dtype.create_to_py_utility_code(env)
+ to_py_function = self.dtype.to_py_function
- if not self.dtype.to_py_function:
- get_function = "NULL"
+ from_py_function = None
+ if not self.dtype.is_const:
+ self.dtype.create_from_py_utility_code(env)
+ from_py_function = self.dtype.from_py_function
- if not self.dtype.from_py_function:
+ if not (to_py_function or from_py_function):
+ return "NULL", "NULL"
+ if not to_py_function:
+ get_function = "NULL"
+ if not from_py_function:
set_function = "NULL"
utility_name = "MemviewDtypeToObject"
error_condition = (self.dtype.error_condition('value') or
'PyErr_Occurred()')
context.update(
- to_py_function = self.dtype.to_py_function,
- from_py_function = self.dtype.from_py_function,
- dtype = self.dtype.empty_declaration_code(),
- error_condition = error_condition,
+ to_py_function=to_py_function,
+ from_py_function=from_py_function,
+ dtype=self.dtype.empty_declaration_code(),
+ error_condition=error_condition,
)
utility = TempitaUtilityCode.load_cached(
name = "object"
is_pyobject = 1
default_value = "0"
+ declaration_value = "0"
buffer_defaults = None
is_extern = False
is_subclassed = False
# vtabstruct_cname string Name of C method table struct
# vtabptr_cname string Name of pointer to C method table
# vtable_cname string Name of C method table definition
+ # early_init boolean Whether to initialize early (as opposed to during module execution).
# defered_declarations [thunk] Used to declare class hierarchies in order
is_extension_type = 1
has_attributes = 1
+ early_init = 1
objtypedef_cname = None
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
- return '%s = %s(%s); %s' % (
- result_code,
- from_py_function or self.from_py_function,
- source_code,
- code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
+ return self._assign_from_py_code(
+ source_code, result_code, error_pos, code, from_py_function, error_condition)
+
+
class PythranExpr(CType):
# Pythran object of a given type
self.from_py_function = "from_python<%s>" % (self.pythran_type)
self.scope = None
- def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0):
- assert pyrex == 0
- return "%s %s" % (self.name, entity_code)
+ def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0):
+ assert not pyrex
+ return "%s %s" % (self.cname, entity_code)
def attributes_known(self):
if self.scope is None:
from . import Symtab
- self.scope = scope = Symtab.CClassScope(
- '',
- None,
- visibility="extern")
+ # FIXME: fake C scope, might be better represented by a struct or C++ class scope
+ self.scope = scope = Symtab.CClassScope('', None, visibility="extern")
scope.parent_type = self
scope.directives = {}
- # rank 3 == long
- scope.declare_var("shape", CPtrType(CIntType(3)), None, cname="_shape", is_cdef=True)
- scope.declare_var("ndim", CIntType(3), None, cname="value", is_cdef=True)
+ scope.declare_var("shape", CPtrType(c_long_type), None, cname="_shape", is_cdef=True)
+ scope.declare_var("ndim", c_long_type, None, cname="value", is_cdef=True)
return True
+ def __eq__(self, other):
+ return isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type
+
+ def __ne__(self, other):
+ return not (isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type)
+
+ def __hash__(self):
+ return hash(self.pythran_type)
+
+
class CConstType(BaseType):
is_const = 1
ForbidUse = ForbidUseClass()
-class CIntType(CNumericType):
-
- is_int = 1
- typedef_flag = 0
+class CIntLike(object):
+ """Mixin for shared behaviour of C integers and enums.
+ """
to_py_function = None
from_py_function = None
to_pyunicode_utility = None
default_format_spec = 'd'
- exception_value = -1
def can_coerce_to_pyobject(self, env):
return True
def can_coerce_from_pyobject(self, env):
return True
+ def create_to_py_utility_code(self, env):
+ if type(self).to_py_function is None:
+ self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "CIntToPy", "TypeConversion.c",
+ context={"TYPE": self.empty_declaration_code(),
+ "TO_PY_FUNCTION": self.to_py_function}))
+ return True
+
+ def create_from_py_utility_code(self, env):
+ if type(self).from_py_function is None:
+ self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
+ env.use_utility_code(TempitaUtilityCode.load_cached(
+ "CIntFromPy", "TypeConversion.c",
+ context={"TYPE": self.empty_declaration_code(),
+ "FROM_PY_FUNCTION": self.from_py_function}))
+ return True
+
@staticmethod
def _parse_format(format_spec):
padding = ' '
format_type, width, padding_char = self._parse_format(format_spec)
return "%s(%s, %d, '%s', '%s')" % (utility_code_name, cvalue, width, padding_char, format_type)
- def create_to_py_utility_code(self, env):
- if type(self).to_py_function is None:
- self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
- env.use_utility_code(TempitaUtilityCode.load_cached(
- "CIntToPy", "TypeConversion.c",
- context={"TYPE": self.empty_declaration_code(),
- "TO_PY_FUNCTION": self.to_py_function}))
- return True
- def create_from_py_utility_code(self, env):
- if type(self).from_py_function is None:
- self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
- env.use_utility_code(TempitaUtilityCode.load_cached(
- "CIntFromPy", "TypeConversion.c",
- context={"TYPE": self.empty_declaration_code(),
- "FROM_PY_FUNCTION": self.from_py_function}))
- return True
+class CIntType(CIntLike, CNumericType):
+
+ is_int = 1
+ typedef_flag = 0
+ exception_value = -1
def get_to_py_type_conversion(self):
if self.rank < list(rank_to_type_name).index('int'):
}
+class CPyTSSTType(CType):
+ #
+ # PEP-539 "Py_tss_t" type
+ #
+
+ declaration_value = "Py_tss_NEEDS_INIT"
+
+ def __repr__(self):
+ return "<Py_tss_t>"
+
+ def declaration_code(self, entity_code,
+ for_display=0, dll_linkage=None, pyrex=0):
+ if pyrex or for_display:
+ base_code = "Py_tss_t"
+ else:
+ base_code = public_decl("Py_tss_t", dll_linkage)
+ return self.base_declaration_code(base_code, entity_code)
+
+
class CPointerBaseType(CType):
# common base type for pointer/array types
#
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
+ assert not error_condition, '%s: %s' % (error_pos, error_condition)
call_code = "%s(%s, %s, %s)" % (
from_py_function or self.from_py_function,
source_code, result_code, self.size)
or_none = False
accept_none = True
accept_builtin_subtypes = False
+ annotation = None
subtypes = ['type']
- def __init__(self, name, type, pos, cname=None):
+ def __init__(self, name, type, pos, cname=None, annotation=None):
self.name = name
if cname is not None:
self.cname = cname
else:
self.cname = Naming.var_prefix + name
+ if annotation is not None:
+ self.annotation = annotation
self.type = type
self.pos = pos
self.needs_type_test = False # TODO: should these defaults be set in analyse_types()?
def specialize(self, values):
return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname)
+
class ToPyStructUtilityCode(object):
requires = None
return True
elif other_type.is_cpp_class:
return other_type.is_subclass(self)
+ elif other_type.is_string and self.cname in cpp_string_conversions:
+ return True
def attributes_known(self):
return self.scope is not None
return isinstance(type, TemplatePlaceholderType) and type.optional
-class CEnumType(CType):
+class CEnumType(CIntLike, CType):
# name string
# cname string or None
# typedef_flag boolean
self.name, self.cname, self.typedef_flag, namespace)
return self
- def can_coerce_to_pyobject(self, env):
- return True
-
- def can_coerce_from_pyobject(self, env):
- return True
-
- def create_to_py_utility_code(self, env):
- self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
- env.use_utility_code(TempitaUtilityCode.load_cached(
- "CIntToPy", "TypeConversion.c",
- context={"TYPE": self.empty_declaration_code(),
- "TO_PY_FUNCTION": self.to_py_function}))
- return True
-
- def create_from_py_utility_code(self, env):
- self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
- env.use_utility_code(TempitaUtilityCode.load_cached(
- "CIntFromPy", "TypeConversion.c",
- context={"TYPE": self.empty_declaration_code(),
- "FROM_PY_FUNCTION": self.from_py_function}))
- return True
-
- def from_py_call_code(self, source_code, result_code, error_pos, code,
- from_py_function=None, error_condition=None):
- rhs = "%s(%s)" % (
- from_py_function or self.from_py_function,
- source_code)
- return '%s = %s;%s' % (
- result_code,
- typecast(self, c_long_type, rhs),
- ' %s' % code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
-
def create_type_wrapper(self, env):
from .UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(
c_threadstate_type = CStructOrUnionType("PyThreadState", "struct", None, 1, "PyThreadState")
c_threadstate_ptr_type = CPtrType(c_threadstate_type)
+# PEP-539 "Py_tss_t" type
+c_pytss_t_type = CPyTSSTType()
+
# the Py_buffer type is defined in Builtin.py
c_py_buffer_type = CStructOrUnionType("Py_buffer", "struct", None, 1, "Py_buffer")
c_py_buffer_ptr_type = CPtrType(c_py_buffer_type)
#
(1, 0, "void"): c_void_type,
+ (1, 0, "Py_tss_t"): c_pytss_t_type,
(1, 0, "bint"): c_bint_type,
(0, 0, "Py_UNICODE"): c_py_unicode_type,
return ('[%s]' if len(indices) == 1 else '(%s)') % indexing
-def _index_type_code(idx):
+def _index_type_code(index_with_type):
+ idx, index_type = index_with_type
if idx.is_slice:
if idx.step.is_none:
func = "contiguous_slice"
n = 3
return "pythonic::types::%s(%s)" % (
func, ",".join(["0"]*n))
- elif idx.type.is_int:
- return "std::declval<%s>()" % idx.type.sign_and_name()
- elif idx.type.is_pythran_expr:
- return "std::declval<%s>()" % idx.type.pythran_type
- raise ValueError("unsupported indexing type %s!" % idx.type)
+ elif index_type.is_int:
+ return "std::declval<%s>()" % index_type.sign_and_name()
+ elif index_type.is_pythran_expr:
+ return "std::declval<%s>()" % index_type.pythran_type
+ raise ValueError("unsupported indexing type %s!" % index_type)
def _index_code(idx):
if self._escaped_description is None:
esc_desc = \
self.get_description().encode('ASCII', 'replace').decode("ASCII")
- # Use foreward slashes on Windows since these paths
+ # Use forward slashes on Windows since these paths
# will be used in the #line directives in the C/C++ files.
self._escaped_description = esc_desc.replace('\\', '/')
return self._escaped_description
return s
+def encoded_string(s, encoding):
+ assert isinstance(s, (_unicode, bytes))
+ s = EncodedString(s)
+ if encoding is not None:
+ s.encoding = encoding
+ return s
+
+
char_from_escape_sequence = {
r'\a' : u'\a',
r'\b' : u'\b',
def c_safe_identifier(cname):
# There are some C limitations on struct entry names.
- if ((cname[:2] == '__'
- and not (cname.startswith(Naming.pyrex_prefix)
- or cname in ('__weakref__', '__dict__')))
- or cname in iso_c99_keywords):
+ if ((cname[:2] == '__' and not (cname.startswith(Naming.pyrex_prefix)
+ or cname in ('__weakref__', '__dict__')))
+ or cname in iso_c99_keywords):
cname = Naming.pyrex_prefix + cname
return cname
+
class BufferAux(object):
writable_needed = False
# cname string C name of entity
# type PyrexType Type of entity
# doc string Doc string
+ # annotation ExprNode PEP 484/526 annotation
# init string Initial value
# visibility 'private' or 'public' or 'extern'
# is_builtin boolean Is an entry in the Python builtins dict
#
# buffer_aux BufferAux or None Extra information needed for buffer variables
# inline_func_in_pxd boolean Hacky special case for inline function in pxd file.
- # Ideally this should not be necesarry.
+ # Ideally this should not be necessary.
# might_overflow boolean In an arithmetic expression that could cause
# overflow (used for type inference).
# utility_code_definition For some Cython builtins, the utility code
inline_func_in_pxd = False
borrowed = 0
init = ""
+ annotation = None
visibility = 'private'
is_builtin = 0
is_cglobal = 0
def __repr__(self):
return "%s(<%x>, name=%s, type=%s)" % (type(self).__name__, id(self), self.name, self.type)
+ def already_declared_here(self):
+ error(self.pos, "Previous declaration is here")
+
def redeclared(self, pos):
error(pos, "'%s' does not match previous declaration" % self.name)
- error(self.pos, "Previous declaration is here")
+ self.already_declared_here()
def all_alternatives(self):
return [self] + self.overloaded_alternatives
warning(pos, "'%s' is a reserved name in C." % cname, -1)
entries = self.entries
if name and name in entries and not shadow:
- old_type = entries[name].type
- if self.is_cpp_class_scope and type.is_cfunction and old_type.is_cfunction and type != old_type:
- # C++ method overrides are ok
+ old_entry = entries[name]
+
+ # Reject redeclared C++ functions only if they have the same type signature.
+ cpp_override_allowed = False
+ if type.is_cfunction and old_entry.type.is_cfunction and self.is_cpp():
+ for alt_entry in old_entry.all_alternatives():
+ if type == alt_entry.type:
+ if name == '<init>' and not type.args:
+ # Cython pre-declares the no-args constructor - allow later user definitions.
+ cpp_override_allowed = True
+ break
+ else:
+ cpp_override_allowed = True
+
+ if cpp_override_allowed:
+ # C++ function/method overrides with different signatures are ok.
pass
elif self.is_cpp_class_scope and entries[name].is_inherited:
# Likewise ignore inherited classes.
pass
elif visibility == 'extern':
- warning(pos, "'%s' redeclared " % name, 0)
+ # Silenced outside of "cdef extern" blocks, until we have a safe way to
+ # prevent pxd-defined cpdef functions from ending up here.
+ warning(pos, "'%s' redeclared " % name, 1 if self.in_cinclude else 0)
elif visibility != 'ignore':
error(pos, "'%s' redeclared " % name)
+ entries[name].already_declared_here()
entry = Entry(name, cname, type, pos = pos)
entry.in_cinclude = self.in_cinclude
entry.create_wrapper = create_wrapper
else:
if not (entry.is_type and entry.type.is_cpp_class):
error(pos, "'%s' redeclared " % name)
+ entry.already_declared_here()
return None
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
if base_classes:
if entry.type.base_classes and entry.type.base_classes != base_classes:
error(pos, "Base type does not match previous declaration")
+ entry.already_declared_here()
else:
entry.type.base_classes = base_classes
if templates or entry.type.templates:
if templates != entry.type.templates:
error(pos, "Template parameters do not match previous declaration")
+ entry.already_declared_here()
def declare_inherited_attributes(entry, base_classes):
for base_class in base_classes:
# doc string Module doc string
# doc_cname string C name of module doc string
# utility_code_list [UtilityCode] Queuing utility codes for forwarding to Code.py
- # python_include_files [string] Standard Python headers to be included
- # include_files [string] Other C headers to be included
+ # c_includes {key: IncludeCode} C headers or verbatim code to be generated
+ # See process_include() for more documentation
# string_to_entry {string : Entry} Map string const to entry
# identifier_to_entry {string : Entry} Map identifier string const to entry
# context Context
self.doc_cname = Naming.moddoc_cname
self.utility_code_list = []
self.module_entries = {}
- self.python_include_files = ["Python.h"]
- self.include_files = []
+ self.c_includes = {}
self.type_names = dict(outer_scope.type_names)
self.pxd_file_loaded = 0
self.cimported_modules = []
for var_name in ['__builtins__', '__name__', '__file__', '__doc__', '__path__',
'__spec__', '__loader__', '__package__', '__cached__']:
self.declare_var(EncodedString(var_name), py_object_type, None)
+ self.process_include(Code.IncludeCode("Python.h", initial=True))
def qualifying_scope(self):
return self.parent_module
module = module.lookup_submodule(submodule)
return module
- def add_include_file(self, filename):
- if filename not in self.python_include_files \
- and filename not in self.include_files:
- self.include_files.append(filename)
+ def add_include_file(self, filename, verbatim_include=None, late=False):
+ """
+ Add `filename` as include file. Add `verbatim_include` as
+ verbatim text in the C file.
+ Both `filename` and `verbatim_include` can be `None` or empty.
+ """
+ inc = Code.IncludeCode(filename, verbatim_include, late=late)
+ self.process_include(inc)
+
+ def process_include(self, inc):
+ """
+ Add `inc`, which is an instance of `IncludeCode`, to this
+ `ModuleScope`. This either adds a new element to the
+ `c_includes` dict or it updates an existing entry.
+
+ In detail: the values of the dict `self.c_includes` are
+ instances of `IncludeCode` containing the code to be put in the
+ generated C file. The keys of the dict are needed to ensure
+ uniqueness in two ways: if an include file is specified in
+ multiple "cdef extern" blocks, only one `#include` statement is
+ generated. Second, the same include might occur multiple times
+ if we find it through multiple "cimport" paths. So we use the
+ generated code (of the form `#include "header.h"`) as dict key.
+
+ If verbatim code does not belong to any include file (i.e. it
+ was put in a `cdef extern from *` block), then we use a unique
+ dict key: namely, the `sortkey()`.
+
+ One `IncludeCode` object can contain multiple pieces of C code:
+ one optional "main piece" for the include file and several other
+ pieces for the verbatim code. The `IncludeCode.dict_update`
+ method merges the pieces of two different `IncludeCode` objects
+ if needed.
+ """
+ key = inc.mainpiece()
+ if key is None:
+ key = inc.sortkey()
+ inc.dict_update(self.c_includes, key)
+ inc = self.c_includes[key]
def add_imported_module(self, scope):
if scope not in self.cimported_modules:
- for filename in scope.include_files:
- self.add_include_file(filename)
+ for inc in scope.c_includes.values():
+ self.process_include(inc)
self.cimported_modules.append(scope)
for m in scope.cimported_modules:
self.add_imported_module(m)
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
if is_cdef:
entry.is_cglobal = 1
- if entry.type.is_pyobject:
- entry.init = 0
+ if entry.type.declaration_value:
+ entry.init = entry.type.declaration_value
self.var_entries.append(entry)
else:
entry.is_pyglobal = 1
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
- if type.is_pyobject:
- entry.init = "0"
+ if entry.type.declaration_value:
+ entry.init = entry.type.declaration_value
entry.is_local = 1
entry.in_with_gil_block = self._in_with_gil_block
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
+ orig_entry.already_declared_here()
else:
entry = self.lookup(name)
if entry is None or not entry.from_closure:
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0,
- allow_pyobject = 0):
+ allow_pyobject=False, allow_memoryview=False):
# Add an entry for an attribute.
if not cname:
cname = name
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_pyobject and not allow_pyobject:
- error(pos,
- "C struct/union member cannot be a Python object")
+ error(pos, "C struct/union member cannot be a Python object")
+ elif type.is_memoryviewslice and not allow_memoryview:
+ # Memory views wrap their buffer owner as a Python object.
+ error(pos, "C struct/union member cannot be a memory view")
if visibility != 'private':
- error(pos,
- "C struct/union member cannot be declared %s" % visibility)
+ error(pos, "C struct/union member cannot be declared %s" % visibility)
return entry
def declare_cfunction(self, name, type, pos,
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
+ orig_entry.already_declared_here()
else:
entry = self.lookup(name)
if entry is None:
cname = "%s__dealloc__%s" % (Naming.func_prefix, class_name)
name = '<del>'
type.return_type = PyrexTypes.CVoidType()
+ if name in ('<init>', '<del>') and type.nogil:
+ for base in self.type.base_classes:
+ base_entry = base.scope.lookup(name)
+ if base_entry and not base_entry.type.nogil:
+ error(pos, "Constructor cannot be called without GIL unless all base constructors can also be called without GIL")
+ error(base_entry.pos, "Base constructor defined here.")
prev_entry = self.lookup_here(name)
entry = self.declare_var(name, type, pos,
defining=defining,
# to work with this type.
for base_entry in \
base_scope.inherited_var_entries + base_scope.var_entries:
- #contructor/destructor is not inherited
+ #constructor/destructor is not inherited
if base_entry.name in ("<init>", "<del>"):
continue
#print base_entry.name, self.entries
T = F.substitute({"v" : NameNode(pos=None, name="a")})
v = F.root.stats[1].rhs.operand2.operand1
a = T.stats[1].rhs.operand2.operand1
- self.assertEquals(v.pos, a.pos)
+ self.assertEqual(v.pos, a.pos)
def test_temps(self):
TemplateTransform.temp_name_counter = 0
def test_node_path(self):
t = self._build_tree()
- self.assertEquals(2, len(find_all(t, "//DefNode")))
- self.assertEquals(2, len(find_all(t, "//NameNode")))
- self.assertEquals(1, len(find_all(t, "//ReturnStatNode")))
- self.assertEquals(1, len(find_all(t, "//DefNode//ReturnStatNode")))
+ self.assertEqual(2, len(find_all(t, "//DefNode")))
+ self.assertEqual(2, len(find_all(t, "//NameNode")))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode")))
+ self.assertEqual(1, len(find_all(t, "//DefNode//ReturnStatNode")))
def test_node_path_star(self):
t = self._build_tree()
- self.assertEquals(10, len(find_all(t, "//*")))
- self.assertEquals(8, len(find_all(t, "//DefNode//*")))
- self.assertEquals(0, len(find_all(t, "//NameNode//*")))
+ self.assertEqual(10, len(find_all(t, "//*")))
+ self.assertEqual(8, len(find_all(t, "//DefNode//*")))
+ self.assertEqual(0, len(find_all(t, "//NameNode//*")))
def test_node_path_attribute(self):
t = self._build_tree()
- self.assertEquals(2, len(find_all(t, "//NameNode/@name")))
- self.assertEquals(['fun', 'decorator'], find_all(t, "//NameNode/@name"))
+ self.assertEqual(2, len(find_all(t, "//NameNode/@name")))
+ self.assertEqual(['fun', 'decorator'], find_all(t, "//NameNode/@name"))
def test_node_path_attribute_dotted(self):
t = self._build_tree()
- self.assertEquals(1, len(find_all(t, "//ReturnStatNode/@value.name")))
- self.assertEquals(['fun'], find_all(t, "//ReturnStatNode/@value.name"))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode/@value.name")))
+ self.assertEqual(['fun'], find_all(t, "//ReturnStatNode/@value.name"))
def test_node_path_child(self):
t = self._build_tree()
- self.assertEquals(1, len(find_all(t, "//DefNode/ReturnStatNode/NameNode")))
- self.assertEquals(1, len(find_all(t, "//ReturnStatNode/NameNode")))
+ self.assertEqual(1, len(find_all(t, "//DefNode/ReturnStatNode/NameNode")))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode/NameNode")))
def test_node_path_node_predicate(self):
t = self._build_tree()
- self.assertEquals(0, len(find_all(t, "//DefNode[.//ForInStatNode]")))
- self.assertEquals(2, len(find_all(t, "//DefNode[.//NameNode]")))
- self.assertEquals(1, len(find_all(t, "//ReturnStatNode[./NameNode]")))
- self.assertEquals(Nodes.ReturnStatNode,
- type(find_first(t, "//ReturnStatNode[./NameNode]")))
+ self.assertEqual(0, len(find_all(t, "//DefNode[.//ForInStatNode]")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
+ self.assertEqual(1, len(find_all(t, "//ReturnStatNode[./NameNode]")))
+ self.assertEqual(Nodes.ReturnStatNode,
+ type(find_first(t, "//ReturnStatNode[./NameNode]")))
def test_node_path_node_predicate_step(self):
t = self._build_tree()
- self.assertEquals(2, len(find_all(t, "//DefNode[.//NameNode]")))
- self.assertEquals(8, len(find_all(t, "//DefNode[.//NameNode]//*")))
- self.assertEquals(1, len(find_all(t, "//DefNode[.//NameNode]//ReturnStatNode")))
- self.assertEquals(Nodes.ReturnStatNode,
- type(find_first(t, "//DefNode[.//NameNode]//ReturnStatNode")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
+ self.assertEqual(8, len(find_all(t, "//DefNode[.//NameNode]//*")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode]//ReturnStatNode")))
+ self.assertEqual(Nodes.ReturnStatNode,
+ type(find_first(t, "//DefNode[.//NameNode]//ReturnStatNode")))
def test_node_path_attribute_exists(self):
t = self._build_tree()
- self.assertEquals(2, len(find_all(t, "//NameNode[@name]")))
- self.assertEquals(ExprNodes.NameNode,
- type(find_first(t, "//NameNode[@name]")))
+ self.assertEqual(2, len(find_all(t, "//NameNode[@name]")))
+ self.assertEqual(ExprNodes.NameNode,
+ type(find_first(t, "//NameNode[@name]")))
def test_node_path_attribute_exists_not(self):
t = self._build_tree()
- self.assertEquals(0, len(find_all(t, "//NameNode[not(@name)]")))
- self.assertEquals(2, len(find_all(t, "//NameNode[not(@honking)]")))
+ self.assertEqual(0, len(find_all(t, "//NameNode[not(@name)]")))
+ self.assertEqual(2, len(find_all(t, "//NameNode[not(@honking)]")))
def test_node_path_and(self):
t = self._build_tree()
- self.assertEquals(1, len(find_all(t, "//DefNode[.//ReturnStatNode and .//NameNode]")))
- self.assertEquals(0, len(find_all(t, "//NameNode[@honking and @name]")))
- self.assertEquals(0, len(find_all(t, "//NameNode[@name and @honking]")))
- self.assertEquals(2, len(find_all(t, "//DefNode[.//NameNode[@name] and @name]")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode and .//NameNode]")))
+ self.assertEqual(0, len(find_all(t, "//NameNode[@honking and @name]")))
+ self.assertEqual(0, len(find_all(t, "//NameNode[@name and @honking]")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name] and @name]")))
def test_node_path_attribute_string_predicate(self):
t = self._build_tree()
- self.assertEquals(1, len(find_all(t, "//NameNode[@name = 'decorator']")))
+ self.assertEqual(1, len(find_all(t, "//NameNode[@name = 'decorator']")))
def test_node_path_recursive_predicate(self):
t = self._build_tree()
- self.assertEquals(2, len(find_all(t, "//DefNode[.//NameNode[@name]]")))
- self.assertEquals(1, len(find_all(t, "//DefNode[.//NameNode[@name = 'decorator']]")))
- self.assertEquals(1, len(find_all(t, "//DefNode[.//ReturnStatNode[./NameNode[@name = 'fun']]/NameNode]")))
+ self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name]]")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode[@name = 'decorator']]")))
+ self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode[./NameNode[@name = 'fun']]/NameNode]")))
if __name__ == '__main__':
unittest.main()
def test_load_as_string(self):
got = strip_2tup(self.cls.load_as_string(self.name))
- self.assertEquals(got, self.expected)
+ self.assertEqual(got, self.expected)
got = strip_2tup(self.cls.load_as_string(self.name, self.filename))
- self.assertEquals(got, self.expected)
+ self.assertEqual(got, self.expected)
def test_load(self):
utility = self.cls.load(self.name)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEquals(got, self.expected)
+ self.assertEqual(got, self.expected)
required, = utility.requires
got = strip_2tup((required.proto, required.impl))
- self.assertEquals(got, self.required)
+ self.assertEqual(got, self.required)
utility = self.cls.load(self.name, from_file=self.filename)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEquals(got, self.expected)
+ self.assertEqual(got, self.expected)
utility = self.cls.load_cached(self.name, from_file=self.filename)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEquals(got, self.expected)
+ self.assertEqual(got, self.expected)
class TestTempitaUtilityLoader(TestUtilityLoader):
def test_load_as_string(self):
got = strip_2tup(self.cls.load_as_string(self.name, context=self.context))
- self.assertEquals(got, self.expected_tempita)
+ self.assertEqual(got, self.expected_tempita)
def test_load(self):
utility = self.cls.load(self.name, context=self.context)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEquals(got, self.expected_tempita)
+ self.assertEqual(got, self.expected_tempita)
required, = utility.requires
got = strip_2tup((required.proto, required.impl))
- self.assertEquals(got, self.required_tempita)
+ self.assertEqual(got, self.required_tempita)
utility = self.cls.load(self.name, from_file=self.filename, context=self.context)
got = strip_2tup((utility.proto, utility.impl))
- self.assertEquals(got, self.expected_tempita)
+ self.assertEqual(got, self.expected_tempita)
class TestCythonUtilityLoader(TestTempitaUtilityLoader):
class StringParseContext(Main.Context):
- def __init__(self, name, include_directories=None, compiler_directives=None):
+ def __init__(self, name, include_directories=None, compiler_directives=None, cpp=False):
if include_directories is None:
include_directories = []
if compiler_directives is None:
compiler_directives = {}
Main.Context.__init__(self, include_directories, compiler_directives,
- create_testscope=False)
+ create_testscope=False, cpp=cpp)
self.module_name = name
def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1, absolute_fallback=True):
self.set_entry_type(entry, py_object_type)
return
- # Set of assignemnts
+ # Set of assignments
assignments = set()
assmts_resolved = set()
dependencies = {}
entry = node.entry
return spanning_type(types, entry.might_overflow, entry.pos, scope)
+ def inferred_types(entry):
+ has_none = False
+ has_pyobjects = False
+ types = []
+ for assmt in entry.cf_assignments:
+ if assmt.rhs.is_none:
+ has_none = True
+ else:
+ rhs_type = assmt.inferred_type
+ if rhs_type and rhs_type.is_pyobject:
+ has_pyobjects = True
+ types.append(rhs_type)
+ # Ignore None assignments as long as there are concrete Python type assignments.
+ # but include them if None is the only assigned Python object.
+ if has_none and not has_pyobjects:
+ types.append(py_object_type)
+ return types
+
def resolve_assignments(assignments):
resolved = set()
for assmt in assignments:
continue
entry_type = py_object_type
if assmts_resolved.issuperset(entry.cf_assignments):
- types = [assmt.inferred_type for assmt in entry.cf_assignments]
+ types = inferred_types(entry)
if types and all(types):
entry_type = spanning_type(
types, entry.might_overflow, entry.pos, scope)
def reinfer():
dirty = False
for entry in inferred:
- types = [assmt.infer_type()
- for assmt in entry.cf_assignments]
+ for assmt in entry.cf_assignments:
+ assmt.infer_type()
+ types = inferred_types(entry)
new_type = spanning_type(types, entry.might_overflow, entry.pos, scope)
if new_type != entry.type:
self.set_entry_type(entry, new_type)
# find_spanning_type() only returns 'bint' for clean boolean
# operations without other int types, so this is safe, too
return result_type
+ elif result_type.is_pythran_expr:
+ return result_type
elif result_type.is_ptr:
# Any pointer except (signed|unsigned|) char* can't implicitly
# become a PyObject, and inferring char* is now accepted, too.
return slot_code
return None
+
+def get_slot_by_name(slot_name):
+ # For now, only search the type struct, no referenced sub-structs.
+ for slot in slot_table:
+ if slot.slot_name == slot_name:
+ return slot
+ assert False, "Slot not found: %s" % slot_name
+
+
+def get_slot_code_by_name(scope, slot_name):
+ slot = get_slot_by_name(slot_name)
+ return slot.slot_code(scope)
+
+
#------------------------------------------------------------------------------------------
#
# Signatures for generic Python functions and methods.
cmpfunc = Signature("TO", "i") # typedef int (*cmpfunc)(PyObject *, PyObject *);
reprfunc = Signature("T", "O") # typedef PyObject *(*reprfunc)(PyObject *);
hashfunc = Signature("T", "h") # typedef Py_hash_t (*hashfunc)(PyObject *);
- # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
-richcmpfunc = Signature("OOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
+richcmpfunc = Signature("TOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
getiterfunc = Signature("T", "O") # typedef PyObject *(*getiterfunc) (PyObject *);
iternextfunc = Signature("T", "O") # typedef PyObject *(*iternextfunc) (PyObject *);
descrgetfunc = Signature("TOO", "O") # typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *);
#
# Nodes used as utilities and support for transforms etc.
# These often make up sets including both Nodes and ExprNodes
-# so it is convenient to have them in a seperate module.
+# so it is convenient to have them in a separate module.
#
from __future__ import absolute_import
def infer_type(self, env):
return self.subexpression.infer_type(env)
+ def may_be_none(self):
+ return self.subexpression.may_be_none()
+
def result(self):
return self.subexpression.result()
class NonManglingModuleScope(Symtab.ModuleScope):
- cpp = False
-
def __init__(self, prefix, *args, **kw):
self.prefix = prefix
self.cython_scope = None
+ self.cpp = kw.pop('cpp', False)
Symtab.ModuleScope.__init__(self, *args, **kw)
def add_imported_entry(self, name, entry, pos):
if self.scope is None:
self.scope = NonManglingModuleScope(
- self.prefix, module_name, parent_module=None, context=self)
+ self.prefix, module_name, parent_module=None, context=self, cpp=self.cpp)
return self.scope
from . import Pipeline, ParseTreeTransforms
context = CythonUtilityCodeContext(
- self.name, compiler_directives=self.compiler_directives)
+ self.name, compiler_directives=self.compiler_directives,
+ cpp=cython_scope.is_cpp() if cython_scope else False)
context.prefix = self.prefix
context.cython_scope = cython_scope
#context = StringParseContext(self.name)
for dep in self.requires:
if dep.is_cython_utility:
- dep.declare_in_scope(dest_scope)
+ dep.declare_in_scope(dest_scope, cython_scope=cython_scope)
return original_scope
# into a C function call (defined in the builtin scope)
if not function.entry:
return node
+ entry = function.entry
is_builtin = (
- function.entry.is_builtin or
- function.entry is self.current_env().builtin_scope().lookup_here(function.name))
+ entry.is_builtin or
+ entry is self.current_env().builtin_scope().lookup_here(function.name))
if not is_builtin:
if function.cf_state and function.cf_state.is_single:
# we know the value of the variable
# => see if it's usable instead
return self._delegate_to_assigned_value(
node, function, arg_list, kwargs)
+ if arg_list and entry.is_cmethod and entry.scope and entry.scope.parent_type.is_builtin_type:
+ if entry.scope.parent_type is arg_list[0].type:
+ # Optimised (unbound) method of a builtin type => try to "de-optimise".
+ return self._dispatch_to_method_handler(
+ entry.name, self_arg=None, is_unbound_method=True,
+ type_name=entry.scope.parent_type.name,
+ node=node, function=function, arg_list=arg_list, kwargs=kwargs)
return node
function_handler = self._find_handler(
"function_%s" % function.name, kwargs)
obj_type = self_arg.type
is_unbound_method = False
if obj_type.is_builtin_type:
- if (obj_type is Builtin.type_type and self_arg.is_name and
- arg_list and arg_list[0].type.is_pyobject):
+ if obj_type is Builtin.type_type and self_arg.is_name and arg_list and arg_list[0].type.is_pyobject:
# calling an unbound method like 'list.append(L,x)'
# (ignoring 'type.mro()' here ...)
type_name = self_arg.name
if not c_file:
return None # unknown file
rel_file_path, code = self._parse_lines(c_file, filename)
+ if code is None:
+ return None # no source found
return CythonModuleReporter(c_file, filename, rel_file_path, code)
def _find_source_files(self, filename):
class CyGDBError(gdb.GdbError):
"""
- Base class for Cython-command related erorrs
+ Base class for Cython-command related errors
"""
def __init__(self, *args):
##################################################################
import re
-import atexit
import warnings
import tempfile
import textwrap
"""
def __init__(self):
- self.fd, self.filename = tempfile.mkstemp()
- self.file = os.fdopen(self.fd, 'r+')
+ f = tempfile.NamedTemporaryFile('r+')
+ self.file = f
+ self.filename = f.name
+ self.fd = f.fileno()
_execute("set logging file %s" % self.filename)
self.file_position_stack = []
- atexit.register(os.close, self.fd)
- atexit.register(os.remove, self.filename)
-
def __enter__(self):
if not self.file_position_stack:
_execute("set logging redirect on")
inferior.
Of course, executing any code in the inferior may be dangerous and may
- leave the debuggee in an unsafe state or terminate it alltogether.
+ leave the debuggee in an unsafe state or terminate it altogether.
"""
if '\0' in code:
raise gdb.GdbError("String contains NUL byte.")
class new_build_ext(_build_ext, object):
def finalize_options(self):
if self.distribution.ext_modules:
+ nthreads = getattr(self, 'parallel', None) # -j option in Py3.5+
+ nthreads = int(nthreads) if nthreads else None
from Cython.Build.Dependencies import cythonize
self.distribution.ext_modules[:] = cythonize(
- self.distribution.ext_modules)
+ self.distribution.ext_modules, nthreads=nthreads, force=self.force)
super(new_build_ext, self).finalize_options()
# This will become new_build_ext in the future.
object PyList_AsTuple (object)
int PyList_Check (object) # Always succeeds.
int PyList_CheckExact (object) # Always succeeds.
- int PyList_GET_SIZE (object) # Always suceeds.
+ int PyList_GET_SIZE (object) # Always succeeds.
object PyList_GetSlice (object, Py_ssize_t, Py_ssize_t)
int PyList_Insert (object, Py_ssize_t, object) except -1
object PyList_New (Py_ssize_t)
# Read http://docs.python.org/api/refcounts.html which is so
# important I've copied it below.
#
-# For all the declaration below, whenver the Py_ function returns
+# For all the declaration below, whenever the Py_ function returns
# a *new reference* to a PyObject*, the return type is "object".
# When the function returns a borrowed reference, the return
# type is PyObject*. When Cython sees "object" as a return type
# it doesn't increment the reference count. When it sees PyObject*
# in order to use the result you must explicitly cast to <object>,
-# and when you do that Cython increments the reference count wether
+# and when you do that Cython increments the reference count whether
# you want it to or not, forcing you to an explicit DECREF (or leak memory).
# To avoid this we make the above convention. Note, you can
# always locally override this convention by putting something like
def __getbuffer__(self, Py_buffer* info, int flags):
# This implementation of getbuffer is geared towards Cython
- # requirements, and does not yet fullfill the PEP.
+ # requirements, and does not yet fulfill the PEP.
# In particular strided access is always provided regardless
# of flags
item_count = Py_SIZE(self)
return op
cdef inline int extend_buffer(array self, char* stuff, Py_ssize_t n) except -1:
- """ efficent appending of new stuff of same type
+ """ efficient appending of new stuff of same type
(e.g. of same array type)
n: number of elements (not number of bytes!) """
cdef Py_ssize_t itemsize = self.ob_descr.itemsize
ctypedef object (*unaryfunc)(object)
ctypedef object (*binaryfunc)(object, object)
ctypedef object (*ternaryfunc)(object, object, object)
- ctypedef int (*inquiry)(object)
- ctypedef Py_ssize_t (*lenfunc)(object)
+ ctypedef int (*inquiry)(object) except -1
+ ctypedef Py_ssize_t (*lenfunc)(object) except -1
ctypedef object (*ssizeargfunc)(object, Py_ssize_t)
ctypedef object (*ssizessizeargfunc)(object, Py_ssize_t, Py_ssize_t)
- ctypedef int (*ssizeobjargproc)(object, Py_ssize_t, object)
- ctypedef int (*ssizessizeobjargproc)(object, Py_ssize_t, Py_ssize_t, object)
- ctypedef int (*objobjargproc)(object, object, object)
- ctypedef int (*objobjproc)(object, object)
+ ctypedef int (*ssizeobjargproc)(object, Py_ssize_t, object) except -1
+ ctypedef int (*ssizessizeobjargproc)(object, Py_ssize_t, Py_ssize_t, object) except -1
+ ctypedef int (*objobjargproc)(object, object, object) except -1
+ ctypedef int (*objobjproc)(object, object) except -1
- ctypedef Py_hash_t (*hashfunc)(object)
+ ctypedef Py_hash_t (*hashfunc)(object) except -1
ctypedef object (*reprfunc)(object)
- ctypedef int (*cmpfunc)(object, object)
+ ctypedef int (*cmpfunc)(object, object) except -2
ctypedef object (*richcmpfunc)(object, object, int)
# The following functions use 'PyObject*' as first argument instead of 'object' to prevent
# accidental reference counting when calling them during a garbage collection run.
ctypedef void (*destructor)(PyObject*)
- ctypedef int (*visitproc)(PyObject*, void *)
- ctypedef int (*traverseproc)(PyObject*, visitproc, void*)
+ ctypedef int (*visitproc)(PyObject*, void *) except -1
+ ctypedef int (*traverseproc)(PyObject*, visitproc, void*) except -1
+ ctypedef void (*freefunc)(void*)
ctypedef object (*descrgetfunc)(object, object, object)
ctypedef int (*descrsetfunc)(object, object, object) except -1
destructor tp_dealloc
traverseproc tp_traverse
inquiry tp_clear
+ freefunc tp_free
ternaryfunc tp_call
hashfunc tp_hash
ctypedef void *PyThread_type_sema
void PyThread_init_thread()
- long PyThread_start_new_thread(void (*)(void *), void *)
+ long PyThread_start_new_thread(void (*)(void *), void *) # FIXME: legacy
+ #unsigned long PyThread_start_new_thread(void (*)(void *), void *) # returned 'long' before Py3.7
void PyThread_exit_thread()
- long PyThread_get_thread_ident()
+ long PyThread_get_thread_ident() # FIXME: legacy
+ #unsigned long PyThread_get_thread_ident() # returned 'long' before Py3.7
PyThread_type_lock PyThread_allocate_lock()
void PyThread_free_lock(PyThread_type_lock)
size_t PyThread_get_stacksize()
int PyThread_set_stacksize(size_t)
- # Thread Local Storage (TLS) API
+ # Thread Local Storage (TLS) API deprecated in CPython 3.7+
int PyThread_create_key()
void PyThread_delete_key(int)
int PyThread_set_key_value(int, void *)
# Cleanup after a fork
void PyThread_ReInitTLS()
+
+ # Thread Specific Storage (TSS) API in CPython 3.7+ (also backported)
+ #ctypedef struct Py_tss_t: pass # Cython built-in type
+ Py_tss_t Py_tss_NEEDS_INIT # Not normally useful: Cython auto-initialises declared "Py_tss_t" variables.
+ Py_tss_t * PyThread_tss_alloc()
+ void PyThread_tss_free(Py_tss_t *key)
+ int PyThread_tss_is_created(Py_tss_t *key)
+ int PyThread_tss_create(Py_tss_t *key)
+ void PyThread_tss_delete(Py_tss_t *key)
+ int PyThread_tss_set(Py_tss_t *key, void *value)
+ void * PyThread_tss_get(Py_tss_t *key)
# 5.2.4.2.1 Sizes of integer types <limits.h>
cdef extern from "<limits.h>":
+ const int CHAR_BIT
+ const int MB_LEN_MAX
- enum: CHAR_BIT
- enum: MB_LEN_MAX
+ const char CHAR_MIN
+ const char CHAR_MAX
- enum: CHAR_MIN
- enum: CHAR_MAX
+ const signed char SCHAR_MIN
+ const signed char SCHAR_MAX
+ const unsigned char UCHAR_MAX
- enum: SCHAR_MIN
- enum: SCHAR_MAX
- enum: UCHAR_MAX
+ const short SHRT_MIN
+ const short SHRT_MAX
+ const unsigned short USHRT_MAX
- enum: SHRT_MIN
- enum: SHRT_MAX
- enum: USHRT_MAX
+ const int INT_MIN
+ const int INT_MAX
+ const unsigned int UINT_MAX
- enum: INT_MIN
- enum: INT_MAX
- enum: UINT_MAX
+ const long LONG_MIN
+ const long LONG_MAX
+ const unsigned long ULONG_MAX
- enum: LONG_MIN
- enum: LONG_MAX
- enum: ULONG_MAX
-
- enum: LLONG_MIN
- enum: LLONG_MAX
- enum: ULLONG_MAX
+ const long long LLONG_MIN
+ const long long LLONG_MAX
+ const unsigned long long ULLONG_MAX
ctypedef int sig_atomic_t
- enum: SIGABRT
- enum: SIGFPE
- enum: SIGILL
- enum: SIGINT
- enum: SIGSEGV
- enum: SIGTERM
-
sighandler_t SIG_DFL
sighandler_t SIG_IGN
sighandler_t SIG_ERR
sighandler_t signal (int signum, sighandler_t action)
int raise_"raise" (int signum)
-
-cdef extern from "<signal.h>" nogil:
-
- # Program Error
- enum: SIGFPE
- enum: SIGILL
- enum: SIGSEGV
- enum: SIGBUS
- enum: SIGABRT
- enum: SIGIOT
- enum: SIGTRAP
- enum: SIGEMT
- enum: SIGSYS
- # Termination
- enum: SIGTERM
- enum: SIGINT
- enum: SIGQUIT
- enum: SIGKILL
- enum: SIGHUP
- # Alarm
- enum: SIGALRM
- enum: SIGVTALRM
- enum: SIGPROF
- # Asynchronous I/O
- enum: SIGIO
- enum: SIGURG
- enum: SIGPOLL
- # Job Control
- enum: SIGCHLD
- enum: SIGCLD
- enum: SIGCONT
- enum: SIGSTOP
- enum: SIGTSTP
- enum: SIGTTIN
- enum: SIGTTOU
- # Operation Error
- enum: SIGPIPE
- enum: SIGLOST
- enum: SIGXCPU
- enum: SIGXFSZ
- # Miscellaneous
- enum: SIGUSR1
- enum: SIGUSR2
- enum: SIGWINCH
- enum: SIGINFO
-
+ # Signals
+ enum:
+ # Program Error
+ SIGFPE
+ SIGILL
+ SIGSEGV
+ SIGBUS
+ SIGABRT
+ SIGIOT
+ SIGTRAP
+ SIGEMT
+ SIGSYS
+ SIGSTKFLT
+ # Termination
+ SIGTERM
+ SIGINT
+ SIGQUIT
+ SIGKILL
+ SIGHUP
+ # Alarm
+ SIGALRM
+ SIGVTALRM
+ SIGPROF
+ # Asynchronous I/O
+ SIGIO
+ SIGURG
+ SIGPOLL
+ # Job Control
+ SIGCHLD
+ SIGCLD
+ SIGCONT
+ SIGSTOP
+ SIGTSTP
+ SIGTTIN
+ SIGTTOU
+ # Operation Error
+ SIGPIPE
+ SIGLOST
+ SIGXCPU
+ SIGXFSZ
+ SIGPWR
+ # Miscellaneous
+ SIGUSR1
+ SIGUSR2
+ SIGWINCH
+ SIGINFO
+ # Real-time signals
+ SIGRTMIN
+ SIGRTMAX
cdef extern from "<deque>" namespace "std" nogil:
cdef cppclass deque[T,ALLOCATOR=*]:
+ ctypedef T value_type
+ ctypedef ALLOCATOR allocator_type
+
+ # these should really be allocator_type.size_type and
+ # allocator_type.difference_type to be true to the C++ definition
+ # but cython doesn't support defered access on template arguments
+ ctypedef size_t size_type
+ ctypedef ptrdiff_t difference_type
+
cppclass iterator:
T& operator*()
iterator operator++()
iterator operator--()
+ iterator operator+(size_type)
+ iterator operator-(size_type)
+ difference_type operator-(iterator)
bint operator==(iterator)
bint operator!=(iterator)
+ bint operator<(iterator)
+ bint operator>(iterator)
+ bint operator<=(iterator)
+ bint operator>=(iterator)
cppclass reverse_iterator:
T& operator*()
- iterator operator++()
- iterator operator--()
+ reverse_iterator operator++()
+ reverse_iterator operator--()
+ reverse_iterator operator+(size_type)
+ reverse_iterator operator-(size_type)
+ difference_type operator-(reverse_iterator)
bint operator==(reverse_iterator)
bint operator!=(reverse_iterator)
+ bint operator<(reverse_iterator)
+ bint operator>(reverse_iterator)
+ bint operator<=(reverse_iterator)
+ bint operator>=(reverse_iterator)
cppclass const_iterator(iterator):
pass
- #cppclass const_reverse_iterator(reverse_iterator):
- # pass
+ cppclass const_reverse_iterator(reverse_iterator):
+ pass
deque() except +
deque(deque&) except +
deque(size_t) except +
cdef cppclass string:
string() except +
- string(char *) except +
- string(char *, size_t) except +
- string(string&) except +
+ string(const char *) except +
+ string(const char *, size_t) except +
+ string(const string&) except +
# as a string formed by a repetition of character c, n times.
string(size_t, char) except +
char& at(size_t)
char& operator[](size_t)
- int compare(string&)
+ int compare(const string&)
- string& append(string&)
- string& append(string&, size_t, size_t)
- string& append(char *)
- string& append(char *, size_t)
+ string& append(const string&)
+ string& append(const string&, size_t, size_t)
+ string& append(const char *)
+ string& append(const char *, size_t)
string& append(size_t, char)
void push_back(char c)
- string& assign (string&)
- string& assign (string&, size_t, size_t)
- string& assign (char *, size_t)
- string& assign (char *)
+ string& assign (const string&)
+ string& assign (const string&, size_t, size_t)
+ string& assign (const char *, size_t)
+ string& assign (const char *)
string& assign (size_t n, char c)
- string& insert(size_t, string&)
- string& insert(size_t, string&, size_t, size_t)
- string& insert(size_t, char* s, size_t)
+ string& insert(size_t, const string&)
+ string& insert(size_t, const string&, size_t, size_t)
+ string& insert(size_t, const char* s, size_t)
- string& insert(size_t, char* s)
+ string& insert(size_t, const char* s)
string& insert(size_t, size_t, char c)
size_t copy(char *, size_t, size_t)
- size_t find(string&)
- size_t find(string&, size_t)
- size_t find(char*, size_t pos, size_t)
- size_t find(char*, size_t pos)
+ size_t find(const string&)
+ size_t find(const string&, size_t)
+ size_t find(const char*, size_t pos, size_t)
+ size_t find(const char*, size_t pos)
size_t find(char, size_t pos)
- size_t rfind(string&, size_t)
- size_t rfind(char* s, size_t, size_t)
- size_t rfind(char*, size_t pos)
+ size_t rfind(const string&, size_t)
+ size_t rfind(const char* s, size_t, size_t)
+ size_t rfind(const char*, size_t pos)
size_t rfind(char c, size_t)
size_t rfind(char c)
- size_t find_first_of(string&, size_t)
- size_t find_first_of(char* s, size_t, size_t)
- size_t find_first_of(char*, size_t pos)
+ size_t find_first_of(const string&, size_t)
+ size_t find_first_of(const char* s, size_t, size_t)
+ size_t find_first_of(const char*, size_t pos)
size_t find_first_of(char c, size_t)
size_t find_first_of(char c)
- size_t find_first_not_of(string&, size_t)
- size_t find_first_not_of(char* s, size_t, size_t)
- size_t find_first_not_of(char*, size_t pos)
+ size_t find_first_not_of(const string&, size_t)
+ size_t find_first_not_of(const char* s, size_t, size_t)
+ size_t find_first_not_of(const char*, size_t pos)
size_t find_first_not_of(char c, size_t)
size_t find_first_not_of(char c)
- size_t find_last_of(string&, size_t)
- size_t find_last_of(char* s, size_t, size_t)
- size_t find_last_of(char*, size_t pos)
+ size_t find_last_of(const string&, size_t)
+ size_t find_last_of(const char* s, size_t, size_t)
+ size_t find_last_of(const char*, size_t pos)
size_t find_last_of(char c, size_t)
size_t find_last_of(char c)
- size_t find_last_not_of(string&, size_t)
- size_t find_last_not_of(char* s, size_t, size_t)
- size_t find_last_not_of(char*, size_t pos)
+ size_t find_last_not_of(const string&, size_t)
+ size_t find_last_not_of(const char* s, size_t, size_t)
+ size_t find_last_not_of(const char*, size_t pos)
string substr(size_t, size_t)
string substr()
size_t find_last_not_of(char c, size_t)
size_t find_last_not_of(char c)
- #string& operator= (string&)
- #string& operator= (char*)
+ #string& operator= (const string&)
+ #string& operator= (const char*)
#string& operator= (char)
- string operator+ (string& rhs)
- string operator+ (char* rhs)
+ string operator+ (const string& rhs)
+ string operator+ (const char* rhs)
- bint operator==(string&)
- bint operator==(char*)
+ bint operator==(const string&)
+ bint operator==(const char*)
- bint operator!= (string& rhs )
- bint operator!= (char* )
+ bint operator!= (const string& rhs )
+ bint operator!= (const char* )
- bint operator< (string&)
- bint operator< (char*)
+ bint operator< (const string&)
+ bint operator< (const char*)
- bint operator> (string&)
- bint operator> (char*)
+ bint operator> (const string&)
+ bint operator> (const char*)
- bint operator<= (string&)
- bint operator<= (char*)
+ bint operator<= (const string&)
+ bint operator<= (const char*)
- bint operator>= (string&)
- bint operator>= (char*)
+ bint operator>= (const string&)
+ bint operator>= (const char*)
bint operator>=(iterator)
cppclass reverse_iterator:
T& operator*()
- iterator operator++()
- iterator operator--()
- iterator operator+(size_type)
- iterator operator-(size_type)
+ reverse_iterator operator++()
+ reverse_iterator operator--()
+ reverse_iterator operator+(size_type)
+ reverse_iterator operator-(size_type)
+ difference_type operator-(reverse_iterator)
bint operator==(reverse_iterator)
bint operator!=(reverse_iterator)
bint operator<(reverse_iterator)
NPY_ANYORDER
NPY_CORDER
NPY_FORTRANORDER
+ NPY_KEEPORDER
ctypedef enum NPY_CLIPMODE:
NPY_CLIP
# -- the details of this may change.
def __getbuffer__(ndarray self, Py_buffer* info, int flags):
# This implementation of getbuffer is geared towards Cython
- # requirements, and does not yet fullfill the PEP.
+ # requirements, and does not yet fulfill the PEP.
# In particular strided access is always provided regardless
# of flags
- if info == NULL: return
-
- cdef int copy_shape, i, ndim
+ cdef int i, ndim
cdef int endian_detector = 1
cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
ndim = PyArray_NDIM(self)
- if sizeof(npy_intp) != sizeof(Py_ssize_t):
- copy_shape = 1
- else:
- copy_shape = 0
-
if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
raise ValueError(u"ndarray is not C contiguous")
info.buf = PyArray_DATA(self)
info.ndim = ndim
- if copy_shape:
+ if sizeof(npy_intp) != sizeof(Py_ssize_t):
# Allocate new buffer for strides and shape info.
# This is allocated as one block, strides first.
info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim)
cdef dtype descr = self.descr
cdef int offset
- cdef bint hasfields = PyDataType_HASFIELDS(descr)
-
- if not hasfields and not copy_shape:
- # do not call releasebuffer
- info.obj = None
- else:
- # need to call releasebuffer
- info.obj = self
+ info.obj = self
- if not hasfields:
+ if not PyDataType_HASFIELDS(descr):
t = descr.type_num
if ((descr.byteorder == c'>' and little_endian) or
(descr.byteorder == c'<' and not little_endian)):
sigset_t sa_mask
int sa_flags
+ ctypedef struct stack_t:
+ void *ss_sp
+ int ss_flags
+ size_t ss_size
+
enum: SA_NOCLDSTOP
enum: SIG_BLOCK
enum: SIG_UNBLOCK
int sigemptyset (sigset_t *)
int sigfillset (sigset_t *)
int sigismember (const sigset_t *)
+
+ int sigaltstack(const stack_t *, stack_t *)
from posix.signal cimport sigevent
cdef extern from "<sys/time.h>" nogil:
- enum: CLOCK_PROCESS_CPUTIME_ID
- enum: CLOCK_THREAD_CPUTIME_ID
-
enum: CLOCK_REALTIME
enum: TIMER_ABSTIME
enum: CLOCK_MONOTONIC
# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
# we explicitly match '*' here, too, to give it proper precedence.
# Illegal combinations and orderings are blocked in ast.c:
-# multiple (test comp_for) arguements are blocked; keyword unpackings
+# multiple (test comp_for) arguments are blocked; keyword unpackings
# that precede iterable unpackings are blocked; etc.
argument: ( test [comp_for] |
test '=' test |
# cython.* namespace for pure mode.
from __future__ import absolute_import
-__version__ = "0.27.3"
+__version__ = "0.28"
try:
from __builtin__ import basestring
returns = wraparound = boundscheck = initializedcheck = nonecheck = \
overflowcheck = embedsignature = cdivision = cdivision_warnings = \
- always_allows_keywords = profile = linetrace = infer_type = \
+ always_allows_keywords = profile = linetrace = infer_types = \
unraisable_tracebacks = freelist = \
lambda _: _EmptyDecoratorAndManager()
int_types = ['char', 'short', 'Py_UNICODE', 'int', 'Py_UCS4', 'long', 'longlong', 'Py_ssize_t', 'size_t']
float_types = ['longdouble', 'double', 'float']
complex_types = ['longdoublecomplex', 'doublecomplex', 'floatcomplex', 'complex']
-other_types = ['bint', 'void']
+other_types = ['bint', 'void', 'Py_tss_t']
to_repr = {
'longlong': 'long long',
gs[name] = typedef(py_complex, to_repr(name, name))
bint = typedef(bool, "bint")
-void = typedef(int, "void")
+void = typedef(None, "void")
+Py_tss_t = typedef(None, "Py_tss_t")
for t in int_types + float_types + complex_types + other_types:
for i in range(1, 4):
gs["%s_%s" % ('p'*i, t)] = gs[t]._pointer(i)
-void = typedef(None, "void")
NULL = gs['p_void'](0)
# looks like 'gs' has some users out there by now...
--- /dev/null
+cimport cython
+
+cdef class StringIOTree:
+ cdef public list prepended_children
+ cdef public object stream
+ cdef public object write
+ cdef public list markers
+
+ @cython.locals(x=StringIOTree)
+ cpdef getvalue(self)
+ @cython.locals(child=StringIOTree)
+ cpdef copyto(self, target)
+ cpdef commit(self)
+ #def insert(self, iotree)
+ #def insertion_point(self)
+ @cython.locals(c=StringIOTree)
+ cpdef allmarkers(self)
+r"""
+Implements a buffer with insertion points. When you know you need to
+"get back" to a place and write more later, simply call insertion_point()
+at that spot and get a new StringIOTree object that is "left behind".
+
+EXAMPLE:
+
+>>> a = StringIOTree()
+>>> _= a.write('first\n')
+>>> b = a.insertion_point()
+>>> _= a.write('third\n')
+>>> _= b.write('second\n')
+>>> a.getvalue().split()
+['first', 'second', 'third']
+
+>>> c = b.insertion_point()
+>>> d = c.insertion_point()
+>>> _= d.write('alpha\n')
+>>> _= b.write('gamma\n')
+>>> _= c.write('beta\n')
+>>> b.getvalue().split()
+['second', 'alpha', 'beta', 'gamma']
+
+>>> i = StringIOTree()
+>>> d.insert(i)
+>>> _= i.write('inserted\n')
+>>> out = StringIO()
+>>> a.copyto(out)
+>>> out.getvalue().split()
+['first', 'second', 'alpha', 'inserted', 'beta', 'gamma', 'third']
+"""
+
+from __future__ import absolute_import #, unicode_literals
+
try:
+ # Prefer cStringIO since io.StringIO() does not support writing 'str' in Py2.
from cStringIO import StringIO
except ImportError:
- from io import StringIO # does not support writing 'str' in Py2
+ from io import StringIO
class StringIOTree(object):
def allmarkers(self):
children = self.prepended_children
return [m for c in children for m in c.allmarkers()] + self.markers
-
-
-__doc__ = r"""
-Implements a buffer with insertion points. When you know you need to
-"get back" to a place and write more later, simply call insertion_point()
-at that spot and get a new StringIOTree object that is "left behind".
-
-EXAMPLE:
-
->>> a = StringIOTree()
->>> _= a.write('first\n')
->>> b = a.insertion_point()
->>> _= a.write('third\n')
->>> _= b.write('second\n')
->>> a.getvalue().split()
-['first', 'second', 'third']
-
->>> c = b.insertion_point()
->>> d = c.insertion_point()
->>> _= d.write('alpha\n')
->>> _= b.write('gamma\n')
->>> _= c.write('beta\n')
->>> b.getvalue().split()
-['second', 'alpha', 'beta', 'gamma']
->>> i = StringIOTree()
->>> d.insert(i)
->>> _= i.write('inserted\n')
->>> out = StringIO()
->>> a.copyto(out)
->>> out.getvalue().split()
-['first', 'second', 'alpha', 'inserted', 'beta', 'gamma', 'third']
-"""
# CythonTest uses the CodeWriter heavily, so do some checking by
# roundtripping Cython code through the test framework.
- # Note that this test is dependant upon the normal Cython parser
+ # Note that this test is dependent upon the normal Cython parser
# to generate the input trees to the CodeWriter. This save *a lot*
# of time; better to spend that time writing other tests than perfecting
# this one...
static __pyx_CoroutineObject *__Pyx_AsyncGen_New(
- __pyx_coroutine_body_t body, PyObject *closure,
+ __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name) {
__pyx_PyAsyncGenObject *gen = PyObject_GC_New(__pyx_PyAsyncGenObject, __pyx_AsyncGenType);
if (unlikely(!gen))
gen->ag_finalizer = NULL;
gen->ag_closed = 0;
gen->ag_hooks_inited = 0;
- return __Pyx__Coroutine_NewInit((__pyx_CoroutineObject*)gen, body, closure, name, qualname, module_name);
+ return __Pyx__Coroutine_NewInit((__pyx_CoroutineObject*)gen, body, code, closure, name, qualname, module_name);
}
static int __pyx_AsyncGen_init(void);
//////////////////// AsyncGenerator ////////////////////
//@requires: AsyncGeneratorInitFinalizer
//@requires: Coroutine.c::Coroutine
+//@requires: Coroutine.c::ReturnWithStopIteration
//@requires: ObjectHandling.c::PyObjectCallMethod1
-
+//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict
PyDoc_STRVAR(__Pyx_async_gen_send_doc,
"send(arg) -> send 'arg' into generator,\n\
static PyGetSetDef __Pyx_async_gen_getsetlist[] = {
- {"__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name,
- PyDoc_STR("name of the async generator"), 0},
- {"__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
- PyDoc_STR("qualified name of the async generator"), 0},
- //REMOVED: {"ag_await", (getter)coro_get_cr_await, NULL,
- //REMOVED: PyDoc_STR("object being awaited on, or None")},
+ {(char*) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name,
+ (char*) PyDoc_STR("name of the async generator"), 0},
+ {(char*) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
+ (char*) PyDoc_STR("qualified name of the async generator"), 0},
+ //REMOVED: {(char*) "ag_await", (getter)coro_get_cr_await, NULL,
+ //REMOVED: (char*) PyDoc_STR("object being awaited on, or None")},
{0, 0, 0, 0, 0} /* Sentinel */
};
static PyMemberDef __Pyx_async_gen_memberlist[] = {
- //REMOVED: {"ag_frame", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_frame), READONLY},
- {"ag_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
- //REMOVED: {"ag_code", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_code), READONLY},
+ //REMOVED: {(char*) "ag_frame", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_frame), READONLY},
+ {(char*) "ag_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
+ //REMOVED: {(char*) "ag_code", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_code), READONLY},
//ADDED: "ag_await"
{(char*) "ag_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
(char*) PyDoc_STR("object being awaited on, or None")},
};
+#if CYTHON_USE_ASYNC_SLOTS
static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_asend_as_async = {
PyObject_SelfIter, /* am_await */
0, /* am_aiter */
0 /* am_anext */
};
+#endif
static PyTypeObject __pyx__PyAsyncGenASendType_type = {
};
+#if CYTHON_USE_ASYNC_SLOTS
static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_athrow_as_async = {
PyObject_SelfIter, /* am_await */
0, /* am_aiter */
0 /* am_anext */
};
+#endif
static PyTypeObject __pyx__PyAsyncGenAThrowType_type = {
static int __pyx_AsyncGen_init(void) {
// on Windows, C-API functions can't be used in slots statically
- __pyx_AsyncGenType_type.tp_getattro = PyObject_GenericGetAttr;
- __pyx__PyAsyncGenWrappedValueType_type.tp_getattro = PyObject_GenericGetAttr;
- __pyx__PyAsyncGenAThrowType_type.tp_getattro = PyObject_GenericGetAttr;
- __pyx__PyAsyncGenASendType_type.tp_getattro = PyObject_GenericGetAttr;
+ __pyx_AsyncGenType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ __pyx__PyAsyncGenWrappedValueType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ __pyx__PyAsyncGenAThrowType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ __pyx__PyAsyncGenASendType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx_AsyncGenType = __Pyx_FetchCommonType(&__pyx_AsyncGenType_type);
if (unlikely(!__pyx_AsyncGenType))
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
- /* fall through */
+ CYTHON_FALLTHROUGH;
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
++ts;
break;
}
- /* fall through */
+ CYTHON_FALLTHROUGH;
case 's':
/* 's' or new type (cannot be added to current pool) */
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
locals = globals;
}
- if (PyDict_GetItem(globals, PYIDENT("__builtins__")) == NULL) {
+ if (__Pyx_PyDict_GetItemStr(globals, PYIDENT("__builtins__")) == NULL) {
if (PyDict_SetItem(globals, PYIDENT("__builtins__"), PyEval_GetBuiltins()) < 0)
goto bad;
}
static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) {
PyObject *source_gen, *retval;
#ifdef __Pyx_Coroutine_USED
- if (__Pyx_Coroutine_CheckExact(source)) {
+ if (__Pyx_Coroutine_Check(source)) {
// TODO: this should only happen for types.coroutine()ed generators, but we can't determine that here
Py_INCREF(source);
source_gen = source;
return NULL;
}
// source_gen is now the iterator, make the first next() call
- if (__Pyx_Coroutine_CheckExact(source_gen)) {
+ if (__Pyx_Coroutine_Check(source_gen)) {
retval = __Pyx_Generator_Next(source_gen);
} else {
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_Coroutine_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) {
PyObject *retval;
- if (__Pyx_Coroutine_CheckExact(source)) {
+ if (__Pyx_Coroutine_Check(source)) {
if (unlikely(((__pyx_CoroutineObject*)source)->yieldfrom)) {
PyErr_SetString(
PyExc_RuntimeError,
static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o) {
#ifdef __Pyx_Coroutine_USED
- if (__Pyx_Coroutine_CheckExact(o)) {
+ if (__Pyx_Coroutine_Check(o)) {
return __Pyx_NewRef(o);
}
#endif
} else {
int is_coroutine = 0;
#ifdef __Pyx_Coroutine_USED
- is_coroutine |= __Pyx_Coroutine_CheckExact(res);
+ is_coroutine |= __Pyx_Coroutine_Check(res);
#endif
#if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact)
is_coroutine |= PyCoro_CheckExact(res);
PyObject *gi_name;
PyObject *gi_qualname;
PyObject *gi_modulename;
+ PyObject *gi_code;
int resume_label;
// using T_BOOL for property below requires char value
char is_running;
} __pyx_CoroutineObject;
static __pyx_CoroutineObject *__Pyx__Coroutine_New(
- PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *closure,
+ PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/
static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit(
- __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *closure,
+ __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/
static int __Pyx_Coroutine_clear(PyObject *self); /*proto*/
static PyTypeObject *__pyx_CoroutineType = 0;
static PyTypeObject *__pyx_CoroutineAwaitType = 0;
#define __Pyx_Coroutine_CheckExact(obj) (Py_TYPE(obj) == __pyx_CoroutineType)
+// __Pyx_Coroutine_Check(obj): see override for IterableCoroutine below
+#define __Pyx_Coroutine_Check(obj) __Pyx_Coroutine_CheckExact(obj)
#define __Pyx_CoroutineAwait_CheckExact(obj) (Py_TYPE(obj) == __pyx_CoroutineAwaitType)
-#define __Pyx_Coroutine_New(body, closure, name, qualname, module_name) \
- __Pyx__Coroutine_New(__pyx_CoroutineType, body, closure, name, qualname, module_name)
+#define __Pyx_Coroutine_New(body, code, closure, name, qualname, module_name) \
+ __Pyx__Coroutine_New(__pyx_CoroutineType, body, code, closure, name, qualname, module_name)
static int __pyx_Coroutine_init(void); /*proto*/
static PyObject *__Pyx__Coroutine_await(PyObject *coroutine); /*proto*/
static PyTypeObject *__pyx_GeneratorType = 0;
#define __Pyx_Generator_CheckExact(obj) (Py_TYPE(obj) == __pyx_GeneratorType)
-#define __Pyx_Generator_New(body, closure, name, qualname, module_name) \
- __Pyx__Coroutine_New(__pyx_GeneratorType, body, closure, name, qualname, module_name)
+#define __Pyx_Generator_New(body, code, closure, name, qualname, module_name) \
+ __Pyx__Coroutine_New(__pyx_GeneratorType, body, code, closure, name, qualname, module_name)
static PyObject *__Pyx_Generator_Next(PyObject *self);
static int __pyx_Generator_init(void); /*proto*/
const char *msg;
if (0) {
#ifdef __Pyx_Coroutine_USED
- } else if (__Pyx_Coroutine_CheckExact((PyObject*)gen)) {
+ } else if (__Pyx_Coroutine_Check((PyObject*)gen)) {
msg = "coroutine already executing";
#endif
#ifdef __Pyx_AsyncGen_USED
const char *msg;
if (0) {
#ifdef __Pyx_Coroutine_USED
- } else if (__Pyx_Coroutine_CheckExact(gen)) {
+ } else if (__Pyx_Coroutine_Check(gen)) {
msg = "can't send non-None value to a just-started coroutine";
#endif
#ifdef __Pyx_AsyncGen_USED
#define __Pyx_Coroutine_AlreadyTerminatedError(gen, value, closing) (__Pyx__Coroutine_AlreadyTerminatedError(gen, value, closing), (PyObject*)NULL)
static void __Pyx__Coroutine_AlreadyTerminatedError(CYTHON_UNUSED PyObject *gen, PyObject *value, CYTHON_UNUSED int closing) {
#ifdef __Pyx_Coroutine_USED
- if (!closing && __Pyx_Coroutine_CheckExact(gen)) {
+ if (!closing && __Pyx_Coroutine_Check(gen)) {
// `self` is an exhausted coroutine: raise an error,
// except when called from gen_close(), which should
// always be a silent method.
} else
#endif
#ifdef __Pyx_Coroutine_USED
- if (__Pyx_Coroutine_CheckExact(yf)) {
+ if (__Pyx_Coroutine_Check(yf)) {
ret = __Pyx_Coroutine_Send(yf, value);
} else
#endif
} else
#endif
#ifdef __Pyx_Coroutine_USED
- if (__Pyx_Coroutine_CheckExact(yf)) {
+ if (__Pyx_Coroutine_Check(yf)) {
retval = __Pyx_Coroutine_Close(yf);
if (!retval)
return -1;
ret = _PyGen_Send((PyGenObject*)yf, NULL);
} else
#endif
+ #ifdef __Pyx_Coroutine_USED
+ if (__Pyx_Coroutine_Check(yf)) {
+ ret = __Pyx_Coroutine_Send(yf, Py_None);
+ } else
+ #endif
ret = Py_TYPE(yf)->tp_iternext(yf);
gen->is_running = 0;
//Py_DECREF(yf);
Py_DECREF(retval);
if ((0)) {
#ifdef __Pyx_Coroutine_USED
- } else if (__Pyx_Coroutine_CheckExact(self)) {
+ } else if (__Pyx_Coroutine_Check(self)) {
msg = "coroutine ignored GeneratorExit";
#endif
#ifdef __Pyx_AsyncGen_USED
|| __Pyx_Generator_CheckExact(yf)
#endif
#ifdef __Pyx_Coroutine_USED
- || __Pyx_Coroutine_CheckExact(yf)
+ || __Pyx_Coroutine_Check(yf)
#endif
) {
ret = __Pyx__Coroutine_Throw(yf, typ, val, tb, args, close_on_genexit);
Py_CLEAR(((__pyx_PyAsyncGenObject*)gen)->ag_finalizer);
}
#endif
+ Py_CLEAR(gen->gi_code);
Py_CLEAR(gen->gi_name);
Py_CLEAR(gen->gi_qualname);
Py_CLEAR(gen->gi_modulename);
}
static __pyx_CoroutineObject *__Pyx__Coroutine_New(
- PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *closure,
+ PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name) {
__pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type);
if (unlikely(!gen))
return NULL;
- return __Pyx__Coroutine_NewInit(gen, body, closure, name, qualname, module_name);
+ return __Pyx__Coroutine_NewInit(gen, body, code, closure, name, qualname, module_name);
}
static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit(
- __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *closure,
+ __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name) {
gen->body = body;
gen->closure = closure;
gen->gi_name = name;
Py_XINCREF(module_name);
gen->gi_modulename = module_name;
+ Py_XINCREF(code);
+ gen->gi_code = code;
PyObject_GC_Track(gen);
return gen;
//////////////////// Coroutine ////////////////////
//@requires: CoroutineBase
//@requires: PatchGeneratorABC
+//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict
static void __Pyx_CoroutineAwait_dealloc(PyObject *self) {
PyObject_GC_UnTrack(self);
}
static PyObject *__Pyx_Coroutine_await(PyObject *coroutine) {
- if (unlikely(!coroutine || !__Pyx_Coroutine_CheckExact(coroutine))) {
+ if (unlikely(!coroutine || !__Pyx_Coroutine_Check(coroutine))) {
PyErr_SetString(PyExc_TypeError, "invalid input, expected coroutine");
return NULL;
}
return __Pyx__Coroutine_await(coroutine);
}
+static PyObject *
+__Pyx_Coroutine_get_frame(CYTHON_UNUSED __pyx_CoroutineObject *self)
+{
+ // Fake implementation that always returns None, but at least does not raise an AttributeError.
+ Py_RETURN_NONE;
+}
+
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
static PyObject *__Pyx_Coroutine_compare(PyObject *obj, PyObject *other, int op) {
PyObject* result;
{(char *) "cr_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
{(char*) "cr_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
(char*) PyDoc_STR("object being awaited, or None")},
+ {(char*) "cr_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL},
{(char *) "__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), PY_WRITE_RESTRICTED, 0},
{0, 0, 0, 0, 0}
};
(char*) PyDoc_STR("name of the coroutine"), 0},
{(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
(char*) PyDoc_STR("qualified name of the coroutine"), 0},
+ {(char *) "cr_frame", (getter)__Pyx_Coroutine_get_frame, NULL,
+ (char*) PyDoc_STR("Frame of the coroutine"), 0},
{0, 0, 0, 0, 0}
};
static int __pyx_Coroutine_init(void) {
// on Windows, C-API functions can't be used in slots statically
- __pyx_CoroutineType_type.tp_getattro = PyObject_GenericGetAttr;
-
+ __pyx_CoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx_CoroutineType = __Pyx_FetchCommonType(&__pyx_CoroutineType_type);
if (unlikely(!__pyx_CoroutineType))
return -1;
+#ifdef __Pyx_IterableCoroutine_USED
+ if (unlikely(__pyx_IterableCoroutine_init() == -1))
+ return -1;
+#endif
+
__pyx_CoroutineAwaitType = __Pyx_FetchCommonType(&__pyx_CoroutineAwaitType_type);
if (unlikely(!__pyx_CoroutineAwaitType))
return -1;
return 0;
}
+
+//////////////////// IterableCoroutine.proto ////////////////////
+
+#define __Pyx_IterableCoroutine_USED
+
+static PyTypeObject *__pyx_IterableCoroutineType = 0;
+
+#undef __Pyx_Coroutine_Check
+#define __Pyx_Coroutine_Check(obj) (__Pyx_Coroutine_CheckExact(obj) || (Py_TYPE(obj) == __pyx_IterableCoroutineType))
+
+#define __Pyx_IterableCoroutine_New(body, code, closure, name, qualname, module_name) \
+ __Pyx__Coroutine_New(__pyx_IterableCoroutineType, body, code, closure, name, qualname, module_name)
+
+static int __pyx_IterableCoroutine_init(void);/*proto*/
+
+
+//////////////////// IterableCoroutine ////////////////////
+//@requires: Coroutine
+//@requires: CommonStructures.c::FetchCommonType
+
+static PyTypeObject __pyx_IterableCoroutineType_type = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "iterable_coroutine", /*tp_name*/
+ sizeof(__pyx_CoroutineObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ (destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+#if CYTHON_USE_ASYNC_SLOTS
+ &__pyx_Coroutine_as_async, /*tp_as_async (tp_reserved) - Py3 only! */
+#else
+ 0, /*tp_reserved*/
+#endif
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/
+ 0, /*tp_doc*/
+ (traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/
+ 0, /*tp_clear*/
+#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
+ // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare
+ __Pyx_Coroutine_compare, /*tp_richcompare*/
+#else
+ 0, /*tp_richcompare*/
+#endif
+ offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/
+ // enable iteration for legacy support of asyncio yield-from protocol
+ __Pyx_Coroutine_await, /*tp_iter*/
+ (iternextfunc) __Pyx_Generator_Next, /*tp_iternext*/
+ __pyx_Coroutine_methods, /*tp_methods*/
+ __pyx_Coroutine_memberlist, /*tp_members*/
+ __pyx_Coroutine_getsets, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ 0, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+#if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_del*/
+#else
+ __Pyx_Coroutine_del, /*tp_del*/
+#endif
+ 0, /*tp_version_tag*/
+#if PY_VERSION_HEX >= 0x030400a1
+ __Pyx_Coroutine_del, /*tp_finalize*/
+#endif
+};
+
+
+static int __pyx_IterableCoroutine_init(void) {
+ __pyx_IterableCoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ __pyx_IterableCoroutineType = __Pyx_FetchCommonType(&__pyx_IterableCoroutineType_type);
+ if (unlikely(!__pyx_IterableCoroutineType))
+ return -1;
+ return 0;
+}
+
+
//////////////////// Generator ////////////////////
//@requires: CoroutineBase
//@requires: PatchGeneratorABC
+//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict
static PyMethodDef __pyx_Generator_methods[] = {
{"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O,
{(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
{(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
(char*) PyDoc_STR("object being iterated by 'yield from', or None")},
+ {(char*) "gi_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL},
{0, 0, 0, 0, 0}
};
static int __pyx_Generator_init(void) {
// on Windows, C-API functions can't be used in slots statically
- __pyx_GeneratorType_type.tp_getattro = PyObject_GenericGetAttr;
+ __pyx_GeneratorType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx_GeneratorType_type.tp_iter = PyObject_SelfIter;
__pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type);
Py_INCREF(value);
exc = value;
}
+ #if CYTHON_FAST_THREAD_STATE
__Pyx_PyThreadState_assign
- if (!$local_tstate_cname->exc_type) {
+ #if PY_VERSION_HEX >= 0x030700A2
+ if (!$local_tstate_cname->exc_state.exc_type)
+ #else
+ if (!$local_tstate_cname->exc_type)
+ #endif
+ {
// no chaining needed => avoid the overhead in PyErr_SetObject()
Py_INCREF(PyExc_StopIteration);
__Pyx_ErrRestore(PyExc_StopIteration, exc, NULL);
return;
}
+ #endif
#else
args = PyTuple_Pack(1, value);
if (unlikely(!args)) return;
//////////////////// CythonFunction.proto ////////////////////
#define __Pyx_CyFunction_USED 1
-#include <structmember.h>
#define __Pyx_CYFUNCTION_STATICMETHOD 0x01
#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02
//@requires: CommonStructures.c::FetchCommonType
////@requires: ObjectHandling.c::PyObjectGetAttrStr
+#include <structmember.h>
+
static PyObject *
__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure)
{
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(${cython_runtime_cname});
if (likely(cython_runtime_dict)) {
- use_cline = PyDict_GetItem(*cython_runtime_dict, PYIDENT("cline_in_traceback"));
+ use_cline = __Pyx_PyDict_GetItemStr(*cython_runtime_dict, PYIDENT("cline_in_traceback"));
} else
#endif
{
+/////////////// PyType_Ready.proto ///////////////
+
+static int __Pyx_PyType_Ready(PyTypeObject *t);
+
+/////////////// PyType_Ready ///////////////
+
+// Wrapper around PyType_Ready() with some runtime checks and fixes
+// to deal with multiple inheritance.
+static int __Pyx_PyType_Ready(PyTypeObject *t) {
+ // Loop over all bases (except the first) and check that those
+ // really are heap types. Otherwise, it would not be safe to
+ // subclass them.
+ //
+ // We also check tp_dictoffset: it is unsafe to inherit
+ // tp_dictoffset from a base class because the object structures
+ // would not be compatible. So, if our extension type doesn't set
+ // tp_dictoffset (i.e. there is no __dict__ attribute in the object
+ // structure), we need to check that none of the base classes sets
+ // it either.
+ int r;
+ PyObject *bases = t->tp_bases;
+ if (bases)
+ {
+ Py_ssize_t i, n = PyTuple_GET_SIZE(bases);
+ for (i = 1; i < n; i++) /* Skip first base */
+ {
+ PyObject *b0 = PyTuple_GET_ITEM(bases, i);
+ PyTypeObject *b;
+#if PY_MAJOR_VERSION < 3
+ /* Disallow old-style classes */
+ if (PyClass_Check(b0))
+ {
+ PyErr_Format(PyExc_TypeError, "base class '%.200s' is an old-style class",
+ PyString_AS_STRING(((PyClassObject*)b0)->cl_name));
+ return -1;
+ }
+#endif
+ b = (PyTypeObject*)b0;
+ if (!PyType_HasFeature(b, Py_TPFLAGS_HEAPTYPE))
+ {
+ PyErr_Format(PyExc_TypeError, "base class '%.200s' is not a heap type",
+ b->tp_name);
+ return -1;
+ }
+ if (t->tp_dictoffset == 0 && b->tp_dictoffset)
+ {
+ PyErr_Format(PyExc_TypeError,
+ "extension type '%.200s' has no __dict__ slot, but base type '%.200s' has: "
+ "either add 'cdef dict __dict__' to the extension type "
+ "or add '__slots__ = [...]' to the base type",
+ t->tp_name, b->tp_name);
+ return -1;
+ }
+ }
+ }
+
+#if PY_VERSION_HEX >= 0x03050000
+ // As of https://bugs.python.org/issue22079
+ // PyType_Ready enforces that all bases of a non-heap type are
+ // non-heap. We know that this is the case for the solid base but
+ // other bases are heap allocated and are kept alive through the
+ // tp_bases reference.
+ // Other than this check, the Py_TPFLAGS_HEAPTYPE flag is unused
+ // in PyType_Ready().
+ t->tp_flags |= Py_TPFLAGS_HEAPTYPE;
+#endif
+
+ r = PyType_Ready(t);
+
+#if PY_VERSION_HEX >= 0x03050000
+ t->tp_flags &= ~Py_TPFLAGS_HEAPTYPE;
+#endif
+
+ return r;
+}
/////////////// CallNextTpDealloc.proto ///////////////
}
+/////////////// MergeVTables.proto ///////////////
+//@requires: GetVTable
+
+static int __Pyx_MergeVtables(PyTypeObject *type); /*proto*/
+
+/////////////// MergeVTables ///////////////
+
+static int __Pyx_MergeVtables(PyTypeObject *type) {
+ int i;
+ void** base_vtables;
+ void* unknown = (void*)-1;
+ PyObject* bases = type->tp_bases;
+ int base_depth = 0;
+ {
+ PyTypeObject* base = type->tp_base;
+ while (base) {
+ base_depth += 1;
+ base = base->tp_base;
+ }
+ }
+ base_vtables = (void**) malloc(sizeof(void*) * (base_depth + 1));
+ base_vtables[0] = unknown;
+ // Could do MRO resolution of individual methods in the future, assuming
+ // compatible vtables, but for now simply require a common vtable base.
+ // Note that if the vtables of various bases are extended separately,
+ // resolution isn't possible and we must reject it just as when the
+ // instance struct is so extended. (It would be good to also do this
+ // check when a multiple-base class is created in pure Python as well.)
+ for (i = 1; i < PyTuple_GET_SIZE(bases); i++) {
+ void* base_vtable = __Pyx_GetVtable(((PyTypeObject*)PyTuple_GET_ITEM(bases, i))->tp_dict);
+ if (base_vtable != NULL) {
+ int j;
+ PyTypeObject* base = type->tp_base;
+ for (j = 0; j < base_depth; j++) {
+ if (base_vtables[j] == unknown) {
+ base_vtables[j] = __Pyx_GetVtable(base->tp_dict);
+ base_vtables[j + 1] = unknown;
+ }
+ if (base_vtables[j] == base_vtable) {
+ break;
+ } else if (base_vtables[j] == NULL) {
+ // No more potential matching bases (with vtables).
+ goto bad;
+ }
+ base = base->tp_base;
+ }
+ }
+ }
+ PyErr_Clear();
+ free(base_vtables);
+ return 0;
+bad:
+ PyErr_Format(
+ PyExc_TypeError,
+ "multiple bases have vtable conflict: '%s' and '%s'",
+ type->tp_base->tp_name, ((PyTypeObject*)PyTuple_GET_ITEM(bases, i))->tp_name);
+ free(base_vtables);
+ return -1;
+}
+
+
/////////////// ImportNumPyArray.proto ///////////////
static PyObject *__pyx_numpy_ndarray = NULL;
PyBUF_STRIDES
PyBUF_INDIRECT
PyBUF_RECORDS
+ PyBUF_RECORDS_RO
ctypedef struct __Pyx_TypeInfo:
pass
return self.convert_item_to_object(itemp)
def __setitem__(memoryview self, object index, object value):
+ if self.view.readonly:
+ raise TypeError("Cannot assign to read-only memoryview")
+
have_slices, index = _unellipsify(index, self.view.ndim)
if have_slices:
@cname('getbuffer')
def __getbuffer__(self, Py_buffer *info, int flags):
+ if flags & PyBUF_WRITABLE and self.view.readonly:
+ raise ValueError("Cannot create writable memory view from read-only memoryview")
+
if flags & PyBUF_STRIDES:
info.shape = self.view.shape
else:
info.ndim = self.view.ndim
info.itemsize = self.view.itemsize
info.len = self.view.len
- info.readonly = 0
+ info.readonly = self.view.readonly
info.obj = self
__pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
(<__pyx_buffer *> &result.view).obj = Py_None
Py_INCREF(Py_None)
- result.flags = PyBUF_RECORDS
+ if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
+ result.flags = PyBUF_RECORDS
+ else:
+ result.flags = PyBUF_RECORDS_RO
result.view.shape = <Py_ssize_t *> result.from_slice.shape
result.view.strides = <Py_ssize_t *> result.from_slice.strides
mslice.suboffsets[i] = -1
#
-### Take care of refcounting the objects in slices. Do this seperately from any copying,
+### Take care of refcounting the objects in slices. Do this separately from any copying,
### to minimize acquiring the GIL
#
/////////////// ObjectToMemviewSlice.proto ///////////////
-static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *);
+static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *, int writable_flag);
////////// MemviewSliceInit.proto //////////
/////////////// ObjectToMemviewSlice ///////////////
//@requires: MemviewSliceValidateAndInit
-static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *obj) {
+static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *obj, int writable_flag) {
{{memviewslice_name}} result = {{memslice_init}};
__Pyx_BufFmt_StackElem stack[{{struct_nesting_depth}}];
int axes_specs[] = { {{axes_specs}} };
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, {{c_or_f_flag}},
- {{buf_flag}}, {{ndim}},
+ {{buf_flag}} | writable_flag, {{ndim}},
&{{dtype_typeinfo}}, stack,
&result, obj);
#undef MASK
#endif
+#ifndef __has_attribute
+ #define __has_attribute(x) 0
+#endif
+
+#ifndef __has_cpp_attribute
+ #define __has_cpp_attribute(x) 0
+#endif
+
+// restrict
+#ifndef CYTHON_RESTRICT
+ #if defined(__GNUC__)
+ #define CYTHON_RESTRICT __restrict__
+ #elif defined(_MSC_VER) && _MSC_VER >= 1400
+ #define CYTHON_RESTRICT __restrict
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_RESTRICT restrict
+ #else
+ #define CYTHON_RESTRICT
+ #endif
+#endif
+
+// unused attribute
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+
+#ifndef CYTHON_MAYBE_UNUSED_VAR
+# if defined(__cplusplus)
+ template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
+# else
+# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
+# endif
+#endif
+
+#ifndef CYTHON_NCP_UNUSED
+# if CYTHON_COMPILING_IN_CPYTHON
+# define CYTHON_NCP_UNUSED
+# else
+# define CYTHON_NCP_UNUSED CYTHON_UNUSED
+# endif
+#endif
+
+#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+
+#ifdef _MSC_VER
+ #ifndef _MSC_STDINT_H_
+ #if _MSC_VER < 1300
+ typedef unsigned char uint8_t;
+ typedef unsigned int uint32_t;
+ #else
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int32 uint32_t;
+ #endif
+ #endif
+#else
+ #include <stdint.h>
+#endif
+
+
+#ifndef CYTHON_FALLTHROUGH
+ #if defined(__cplusplus) && __cplusplus >= 201103L
+ #if __has_cpp_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH [[fallthrough]]
+ #elif __has_cpp_attribute(clang::fallthrough)
+ #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
+ #elif __has_cpp_attribute(gnu::fallthrough)
+ #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
+ #endif
+ #endif
+
+ #ifndef CYTHON_FALLTHROUGH
+ #if __has_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
+ #else
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+
+ #if defined(__clang__ ) && defined(__apple_build_version__)
+ #if __apple_build_version__ < 7000000 /* Xcode < 7.0 */
+ #undef CYTHON_FALLTHROUGH
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+#endif
+
+/////////////// CInitCode ///////////////
+
+// inline attribute
+#ifndef CYTHON_INLINE
+ #if defined(__clang__)
+ #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
+ #elif defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+
+/////////////// CppInitCode ///////////////
+
+#ifndef __cplusplus
+ #error "Cython files generated with the C++ option must be compiled with a C++ compiler."
+#endif
+
+// inline attribute
+#ifndef CYTHON_INLINE
+ #if defined(__clang__)
+ #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
+ #else
+ #define CYTHON_INLINE inline
+ #endif
+#endif
+
+// Work around clang bug http://stackoverflow.com/questions/21847816/c-invoke-nested-template-class-destructor
+template<typename T>
+void __Pyx_call_destructor(T& x) {
+ x.~T();
+}
+
+// Used for temporary variables of "reference" type.
+template<typename T>
+class __Pyx_FakeReference {
+ public:
+ __Pyx_FakeReference() : ptr(NULL) { }
+ // __Pyx_FakeReference(T& ref) : ptr(&ref) { }
+ // Const version needed as Cython doesn't know about const overloads (e.g. for stl containers).
+ __Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { }
+ T *operator->() { return ptr; }
+ T *operator&() { return ptr; }
+ operator T&() { return *ptr; }
+ // TODO(robertwb): Delegate all operators (or auto-generate unwrapping code where needed).
+ template<typename U> bool operator ==(U other) { return *ptr == other; }
+ template<typename U> bool operator !=(U other) { return *ptr != other; }
+ private:
+ T *ptr;
+};
+
+
+/////////////// PythonCompatibility ///////////////
+
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
-#if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL)
- // new in CPython 3.6, but changed in 3.7 - see https://bugs.python.org/issue29464
+#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
+ // new in CPython 3.6, but changed in 3.7 - see
+ // positional-only parameters:
+ // https://bugs.python.org/issue29464
+ // const args:
+ // https://bugs.python.org/issue32240
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
- typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs);
+ typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
// new in CPython 3.7, used to be old signature of _PyCFunctionFast() in 3.6
- typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args,
+ typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
+ #define PyObject_Malloc(s) PyMem_Malloc(s)
+ #define PyObject_Free(p) PyMem_Free(p)
+ #define PyObject_Realloc(p) PyMem_Realloc(p)
+#endif
+
+#if CYTHON_COMPILING_IN_PYSTON
+ // special C-API functions only in Pyston
+ #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
+#else
+ #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
+#endif
+
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
+// TSS (Thread Specific Storage) API
+#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
+#include "pythread.h"
+#define Py_tss_NEEDS_INIT 0
+typedef int Py_tss_t;
+static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
+ *key = PyThread_create_key();
+ return 0; // PyThread_create_key reports success always
+}
+static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
+ Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
+ *key = Py_tss_NEEDS_INIT;
+ return key;
+}
+static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
+ PyObject_Free(key);
+}
+static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
+ return *key != Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
+ PyThread_delete_key(*key);
+ *key = Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
+ return PyThread_set_key_value(*key, value);
+}
+static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
+ return PyThread_get_key_value(*key);
+}
+// PyThread_delete_key_value(key) is equalivalent to PyThread_set_key_value(key, NULL)
+// PyThread_ReInitTLS() is a no-op
+#endif // TSS (Thread Specific Storage) API
+
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
+#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
+#else
+#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
+#endif
+
/* new Py3.3 unicode type (PEP 393) */
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
-#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
- #define PyObject_Malloc(s) PyMem_Malloc(s)
- #define PyObject_Free(p) PyMem_Free(p)
- #define PyObject_Realloc(p) PyMem_Realloc(p)
-#endif
-
-#if CYTHON_COMPILING_IN_PYSTON
- // special C-API functions only in Pyston
- #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
- #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
-#else
- #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
- #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
-#endif
-
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
-#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
+#if CYTHON_ASSUME_SAFE_MACROS
+ #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
+#else
+ // NOTE: might fail with exception => check for -1
+ #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
+#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3
- #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
+ #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
-#ifndef __has_attribute
- #define __has_attribute(x) 0
-#endif
-
-#ifndef __has_cpp_attribute
- #define __has_cpp_attribute(x) 0
-#endif
-
// backport of PyAsyncMethods from Py3.5 to older Py3.x versions
// (mis-)using the "tp_reserved" type slot which is re-activated as "tp_as_async" in Py3.5
#if CYTHON_USE_ASYNC_SLOTS
} __Pyx_PyAsyncMethodsStruct;
#endif
-// restrict
-#ifndef CYTHON_RESTRICT
- #if defined(__GNUC__)
- #define CYTHON_RESTRICT __restrict__
- #elif defined(_MSC_VER) && _MSC_VER >= 1400
- #define CYTHON_RESTRICT __restrict
- #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
- #define CYTHON_RESTRICT restrict
- #else
- #define CYTHON_RESTRICT
- #endif
-#endif
-
-// unused attribute
-#ifndef CYTHON_UNUSED
-# if defined(__GNUC__)
-# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
-# define CYTHON_UNUSED __attribute__ ((__unused__))
-# else
-# define CYTHON_UNUSED
-# endif
-# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
-# define CYTHON_UNUSED __attribute__ ((__unused__))
-# else
-# define CYTHON_UNUSED
-# endif
-#endif
-
-#ifndef CYTHON_MAYBE_UNUSED_VAR
-# if defined(__cplusplus)
- template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
-# else
-# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
-# endif
-#endif
-#ifndef CYTHON_NCP_UNUSED
-# if CYTHON_COMPILING_IN_CPYTHON
-# define CYTHON_NCP_UNUSED
-# else
-# define CYTHON_NCP_UNUSED CYTHON_UNUSED
-# endif
-#endif
+/////////////// PyModInitFuncType.proto ///////////////
-#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+#if PY_MAJOR_VERSION < 3
-#ifdef _MSC_VER
- #ifndef _MSC_STDINT_H_
- #if _MSC_VER < 1300
- typedef unsigned char uint8_t;
- typedef unsigned int uint32_t;
- #else
- typedef unsigned __int8 uint8_t;
- typedef unsigned __int32 uint32_t;
- #endif
- #endif
+#ifdef CYTHON_NO_PYINIT_EXPORT
+// define this to void manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition.
+#define __Pyx_PyMODINIT_FUNC void
#else
- #include <stdint.h>
+#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
+#else
-#ifndef CYTHON_FALLTHROUGH
- #if defined(__cplusplus) && __cplusplus >= 201103L
- #if __has_cpp_attribute(fallthrough)
- #define CYTHON_FALLTHROUGH [[fallthrough]]
- #elif __has_cpp_attribute(clang::fallthrough)
- #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
- #elif __has_cpp_attribute(gnu::fallthrough)
- #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
- #endif
- #endif
-
- #ifndef CYTHON_FALLTHROUGH
- #if __has_attribute(fallthrough)
- #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
- #else
- #define CYTHON_FALLTHROUGH
- #endif
- #endif
-
- #if defined(__clang__ ) && defined(__apple_build_version__)
- #if __apple_build_version__ < 7000000 /* Xcode < 7.0 */
- #undef CYTHON_FALLTHROUGH
- #define CYTHON_FALLTHROUGH
- #endif
- #endif
+#ifdef CYTHON_NO_PYINIT_EXPORT
+// define this to PyObject * manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition.
+#define __Pyx_PyMODINIT_FUNC PyObject *
+#else
+#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
-/////////////// CInitCode ///////////////
-
-// inline attribute
-#ifndef CYTHON_INLINE
- #if defined(__clang__)
- #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
- #elif defined(__GNUC__)
- #define CYTHON_INLINE __inline__
- #elif defined(_MSC_VER)
- #define CYTHON_INLINE __inline
- #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
- #define CYTHON_INLINE inline
- #else
- #define CYTHON_INLINE
- #endif
#endif
-
-/////////////// CppInitCode ///////////////
-
-#ifndef __cplusplus
- #error "Cython files generated with the C++ option must be compiled with a C++ compiler."
+#ifndef CYTHON_SMALL_CODE
+#if defined(__clang__)
+ #define CYTHON_SMALL_CODE
+#elif defined(__GNUC__)
+ #define CYTHON_SMALL_CODE __attribute__((optimize("Os")))
+#else
+ #define CYTHON_SMALL_CODE
#endif
-
-// inline attribute
-#ifndef CYTHON_INLINE
- #if defined(__clang__)
- #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
- #else
- #define CYTHON_INLINE inline
- #endif
#endif
-// Work around clang bug http://stackoverflow.com/questions/21847816/c-invoke-nested-template-class-destructor
-template<typename T>
-void __Pyx_call_destructor(T& x) {
- x.~T();
-}
-
-// Used for temporary variables of "reference" type.
-template<typename T>
-class __Pyx_FakeReference {
- public:
- __Pyx_FakeReference() : ptr(NULL) { }
- // __Pyx_FakeReference(T& ref) : ptr(&ref) { }
- // Const version needed as Cython doesn't know about const overloads (e.g. for stl containers).
- __Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { }
- T *operator->() { return ptr; }
- T *operator&() { return ptr; }
- operator T&() { return *ptr; }
- // TODO(robertwb): Delegate all operators (or auto-generate unwrapping code where needed).
- template<typename U> bool operator ==(U other) { return *ptr == other; }
- template<typename U> bool operator !=(U other) { return *ptr != other; }
- private:
- T *ptr;
-};
-
/////////////// FastTypeChecks.proto ///////////////
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
+#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
+
/////////////// FastTypeChecks ///////////////
//@requires: Exceptions.c::PyThreadStateGet
//@requires: Exceptions.c::PyErrFetchRestore
}
#endif /* CYTHON_REFNANNY */
+
+/////////////// ImportRefnannyAPI ///////////////
+
+#if CYTHON_REFNANNY
+__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+}
+#endif
+
+
/////////////// RegisterModuleCleanup.proto ///////////////
//@substitute: naming
__Pyx_PyThreadState_assign
exc_type = __Pyx_PyErr_Occurred();
if (unlikely(exc_type)) {
- if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))
+ if (!defval || unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))
return NULL;
- if (defval) {
- __Pyx_PyErr_Clear();
- Py_INCREF(defval);
- }
+ __Pyx_PyErr_Clear();
+ Py_INCREF(defval);
return defval;
}
if (defval) {
// originally copied from Py3's builtin_next()
static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject* iterator, PyObject* defval) {
PyObject* next;
- // we always do a quick slot check because always PyIter_Check() is so wasteful
+ // We always do a quick slot check because calling PyIter_Check() is so wasteful.
iternextfunc iternext = Py_TYPE(iterator)->tp_iternext;
if (likely(iternext)) {
#if CYTHON_USE_TYPE_SLOTS
return NULL;
#endif
#else
- // note: PyIter_Next() crashes if the slot is NULL in CPython
+ // Since the slot was set, assume that PyIter_Next() will likely succeed, and properly fail otherwise.
+ // Note: PyIter_Next() crashes in CPython if "tp_iternext" is NULL.
next = PyIter_Next(iterator);
if (likely(next))
return next;
#endif
- } else if (CYTHON_USE_TYPE_SLOTS || !PyIter_Check(iterator)) {
+ } else if (CYTHON_USE_TYPE_SLOTS || unlikely(!PyIter_Check(iterator))) {
+ // If CYTHON_USE_TYPE_SLOTS, then the slot was not set and we don't have an iterable.
+ // Otherwise, don't trust "tp_iternext" and rely on PyIter_Check().
__Pyx_PyIter_Next_ErrorNoIterator(iterator);
return NULL;
}
+#if !CYTHON_USE_TYPE_SLOTS
+ else {
+ // We have an iterator with an empty "tp_iternext", but didn't call next() on it yet.
+ next = PyIter_Next(iterator);
+ if (likely(next))
+ return next;
+ }
+#endif
return __Pyx_PyIter_Next2Default(defval);
}
#endif
}
+
+/////////////// ObjectGetItem.proto ///////////////
+
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);/*proto*/
+#else
+#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
+#endif
+
+/////////////// ObjectGetItem ///////////////
+// //@requires: GetItemInt - added in IndexNode as it uses templating.
+
+#if CYTHON_USE_TYPE_SLOTS
+static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
+ PyObject *runerr;
+ Py_ssize_t key_value;
+ PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
+ if (unlikely(!(m && m->sq_item))) {
+ PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
+ return NULL;
+ }
+
+ key_value = __Pyx_PyIndex_AsSsize_t(index);
+ if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
+ return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
+ }
+
+ // Error handling code -- only manage OverflowError differently.
+ if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
+ PyErr_Clear();
+ PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
+ }
+ return NULL;
+}
+
+static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
+ PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
+ if (likely(m && m->mp_subscript)) {
+ return m->mp_subscript(obj, key);
+ }
+ return __Pyx_PyObject_GetIndex(obj, key);
+}
+#endif
+
+
/////////////// DictGetItem.proto ///////////////
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
+static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);/*proto*/
+
+#define __Pyx_PyObject_Dict_GetItem(obj, name) \
+ (likely(PyDict_CheckExact(obj)) ? \
+ __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name))
+
+#else
+#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
+#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name)
+#endif
+
+/////////////// DictGetItem ///////////////
+
+#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
Py_INCREF(value);
return value;
}
-#else
- #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
/////////////// GetItemInt.proto ///////////////
//@requires: CalculateMetaclass
static PyObject *__Pyx_Py3MetaclassGet(PyObject *bases, PyObject *mkw) {
- PyObject *metaclass = mkw ? PyDict_GetItem(mkw, PYIDENT("metaclass")) : NULL;
+ PyObject *metaclass = mkw ? __Pyx_PyDict_GetItemStr(mkw, PYIDENT("metaclass")) : NULL;
if (metaclass) {
Py_INCREF(metaclass);
if (PyDict_DelItem(mkw, PYIDENT("metaclass")) < 0) {
return NULL;
/* Python2 __metaclass__ */
- metaclass = PyDict_GetItem(dict, PYIDENT("__metaclass__"));
+ metaclass = __Pyx_PyDict_GetItemStr(dict, PYIDENT("__metaclass__"));
if (metaclass) {
Py_INCREF(metaclass);
if (PyType_Check(metaclass)) {
return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
}
+/////////////// PySetContains.proto ///////////////
+
+static CYTHON_INLINE int __Pyx_PySet_ContainsTF(PyObject* key, PyObject* set, int eq); /* proto */
+
+/////////////// PySetContains ///////////////
+
+static int __Pyx_PySet_ContainsUnhashable(PyObject *set, PyObject *key) {
+ int result = -1;
+ if (PySet_Check(key) && PyErr_ExceptionMatches(PyExc_TypeError)) {
+ /* Convert key to frozenset */
+ PyObject *tmpkey;
+ PyErr_Clear();
+ tmpkey = PyFrozenSet_New(key);
+ if (tmpkey != NULL) {
+ result = PySet_Contains(set, tmpkey);
+ Py_DECREF(tmpkey);
+ }
+ }
+ return result;
+}
+
+static CYTHON_INLINE int __Pyx_PySet_ContainsTF(PyObject* key, PyObject* set, int eq) {
+ int result = PySet_Contains(set, key);
+
+ if (unlikely(result < 0)) {
+ result = __Pyx_PySet_ContainsUnhashable(set, key);
+ }
+ return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
+}
+
/////////////// PySequenceContains.proto ///////////////
static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) {
return result;
}
+
+/////////////// SetNameInClass.proto ///////////////
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
+// Identifier names are always interned and have a pre-calculated hash value.
+#define __Pyx_SetNameInClass(ns, name, value) \
+ (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value))
+#elif CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_SetNameInClass(ns, name, value) \
+ (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value))
+#else
+#define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value)
+#endif
+
+
/////////////// GetModuleGlobalName.proto ///////////////
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*proto*/
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
+ // Identifier names are always interned and have a pre-calculated hash value.
+ result = _PyDict_GetItem_KnownHash($moddict_cname, name, ((PyASCIIObject *) name)->hash);
+ if (likely(result)) {
+ Py_INCREF(result);
+ } else if (unlikely(PyErr_Occurred())) {
+ result = NULL;
+ } else {
+#else
result = PyDict_GetItem($moddict_cname, name);
if (likely(result)) {
Py_INCREF(result);
} else {
+#endif
#else
result = PyObject_GetItem($moddict_cname, name);
if (!result) {
#define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n)
#endif
+
+/////////////// PyObject_GenericGetAttrNoDict.proto ///////////////
+
+// Setting "tp_getattro" to anything but "PyObject_GenericGetAttr" disables fast method calls in Py3.7.
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
+#else
+// No-args macro to allow function pointer assignment.
+#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
+#endif
+
+/////////////// PyObject_GenericGetAttrNoDict ///////////////
+
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+
+static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
+ PyErr_Format(PyExc_AttributeError,
+#if PY_MAJOR_VERSION >= 3
+ "'%.50s' object has no attribute '%U'",
+ tp->tp_name, attr_name);
+#else
+ "'%.50s' object has no attribute '%.400s'",
+ tp->tp_name, PyString_AS_STRING(attr_name));
+#endif
+ return NULL;
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
+ // Copied and adapted from _PyObject_GenericGetAttrWithDict() in CPython 2.6/3.7.
+ // To be used in the "tp_getattro" slot of extension types that have no instance dict and cannot be subclassed.
+ PyObject *descr;
+ PyTypeObject *tp = Py_TYPE(obj);
+
+ if (unlikely(!PyString_Check(attr_name))) {
+ return PyObject_GenericGetAttr(obj, attr_name);
+ }
+
+ assert(!tp->tp_dictoffset);
+ descr = _PyType_Lookup(tp, attr_name);
+ if (unlikely(!descr)) {
+ return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
+ }
+
+ Py_INCREF(descr);
+
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
+ #endif
+ {
+ descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
+ // Optimise for the non-descriptor case because it is faster.
+ if (unlikely(f)) {
+ PyObject *res = f(descr, obj, (PyObject *)tp);
+ Py_DECREF(descr);
+ return res;
+ }
+ }
+ return descr;
+}
+#endif
+
+
+/////////////// PyObject_GenericGetAttr.proto ///////////////
+
+// Setting "tp_getattro" to anything but "PyObject_GenericGetAttr" disables fast method calls in Py3.7.
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
+#else
+// No-args macro to allow function pointer assignment.
+#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
+#endif
+
+/////////////// PyObject_GenericGetAttr ///////////////
+//@requires: PyObject_GenericGetAttrNoDict
+
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
+ if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
+ return PyObject_GenericGetAttr(obj, attr_name);
+ }
+ return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
+}
+#endif
+
+
/////////////// PyObjectGetAttrStr.proto ///////////////
#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);/*proto*/
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+/////////////// PyObjectGetAttrStr ///////////////
+
+#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
#endif
return PyObject_GetAttr(obj, attr_name);
}
-#else
-#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
+
/////////////// PyObjectSetAttrStr.proto ///////////////
#if CYTHON_USE_TYPE_SLOTS
-#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o,n,NULL)
+#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL)
+static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value);/*proto*/
+#else
+#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n)
+#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v)
+#endif
+
+/////////////// PyObjectSetAttrStr ///////////////
+
+#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_setattro))
#endif
return PyObject_SetAttr(obj, attr_name, value);
}
-#else
-#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n)
-#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v)
#endif
static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); /*proto*/
#if CYTHON_COMPILING_IN_CPYTHON
+// FASTCALL methods receive "&empty_tuple" as simple "PyObject[0]*"
#define __Pyx_CallUnboundCMethod0(cfunc, self) \
- ((likely((cfunc)->func)) ? \
+ (likely((cfunc)->func) ? \
(likely((cfunc)->flag == METH_NOARGS) ? (*((cfunc)->func))(self, NULL) : \
- (likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(cfunc)->func)(self, $empty_tuple, NULL)) : \
- ((cfunc)->flag == METH_VARARGS ? (*((cfunc)->func))(self, $empty_tuple) : \
- (PY_VERSION_HEX >= 0x030600B1 && (cfunc)->flag == METH_FASTCALL ? \
- (PY_VERSION_HEX >= 0x030700A0 ? \
- (*(__Pyx_PyCFunctionFast)(cfunc)->func)(self, &PyTuple_GET_ITEM($empty_tuple, 0), 0) : \
- (*(__Pyx_PyCFunctionFastWithKeywords)(cfunc)->func)(self, &PyTuple_GET_ITEM($empty_tuple, 0), 0, NULL)) : \
- (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ? \
- (*(__Pyx_PyCFunctionFastWithKeywords)(cfunc)->func)(self, &PyTuple_GET_ITEM($empty_tuple, 0), 0, NULL) : \
- __Pyx__CallUnboundCMethod0(cfunc, self)))))) : \
+ (PY_VERSION_HEX >= 0x030600B1 && likely((cfunc)->flag == METH_FASTCALL) ? \
+ (PY_VERSION_HEX >= 0x030700A0 ? \
+ (*(__Pyx_PyCFunctionFast)(cfunc)->func)(self, &$empty_tuple, 0) : \
+ (*(__Pyx_PyCFunctionFastWithKeywords)(cfunc)->func)(self, &$empty_tuple, 0, NULL)) : \
+ (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ? \
+ (*(__Pyx_PyCFunctionFastWithKeywords)(cfunc)->func)(self, &$empty_tuple, 0, NULL) : \
+ (likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(cfunc)->func)(self, $empty_tuple, NULL)) : \
+ ((cfunc)->flag == METH_VARARGS ? (*((cfunc)->func))(self, $empty_tuple) : \
+ __Pyx__CallUnboundCMethod0(cfunc, self)))))) : \
__Pyx__CallUnboundCMethod0(cfunc, self))
#else
#define __Pyx_CallUnboundCMethod0(cfunc, self) __Pyx__CallUnboundCMethod0(cfunc, self)
/////////////// CallUnboundCMethod1.proto ///////////////
-static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg); /*proto*/
+static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);/*proto*/
#if CYTHON_COMPILING_IN_CPYTHON
-#define __Pyx_CallUnboundCMethod1(cfunc, self, arg) \
- ((likely((cfunc)->func && (cfunc)->flag == METH_O)) ? (*((cfunc)->func))(self, arg) : \
- ((PY_VERSION_HEX >= 0x030600B1 && (cfunc)->func && (cfunc)->flag == METH_FASTCALL) ? \
- (PY_VERSION_HEX >= 0x030700A0 ? \
- (*(__Pyx_PyCFunctionFast)(cfunc)->func)(self, &arg, 1) : \
- (*(__Pyx_PyCFunctionFastWithKeywords)(cfunc)->func)(self, &arg, 1, NULL)) : \
- (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->func && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ? \
- (*(__Pyx_PyCFunctionFastWithKeywords)(cfunc)->func)(self, &arg, 1, NULL) : \
- __Pyx__CallUnboundCMethod1(cfunc, self, arg))))
+static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);/*proto*/
#else
#define __Pyx_CallUnboundCMethod1(cfunc, self, arg) __Pyx__CallUnboundCMethod1(cfunc, self, arg)
#endif
//@requires: UnpackUnboundCMethod
//@requires: PyObjectCall
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg) {
+ if (likely(cfunc->func)) {
+ int flag = cfunc->flag;
+ // Not using #ifdefs for PY_VERSION_HEX to avoid C compiler warnings about unused functions.
+ if (flag == METH_O) {
+ return (*(cfunc->func))(self, arg);
+ } else if (PY_VERSION_HEX >= 0x030600B1 && flag == METH_FASTCALL) {
+ if (PY_VERSION_HEX >= 0x030700A0) {
+ return (*(__Pyx_PyCFunctionFast)cfunc->func)(self, &arg, 1);
+ } else {
+ return (*(__Pyx_PyCFunctionFastWithKeywords)cfunc->func)(self, &arg, 1, NULL);
+ }
+ } else if (PY_VERSION_HEX >= 0x030700A0 && flag == (METH_FASTCALL | METH_KEYWORDS)) {
+ return (*(__Pyx_PyCFunctionFastWithKeywords)cfunc->func)(self, &arg, 1, NULL);
+ }
+ }
+ return __Pyx__CallUnboundCMethod1(cfunc, self, arg);
+}
+#endif
+
static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg){
PyObject *args, *result = NULL;
- if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL;
+ if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL;
#if CYTHON_COMPILING_IN_CPYTHON
if (cfunc->func && (cfunc->flag & METH_VARARGS)) {
args = PyTuple_New(1);
}
+/////////////// CallUnboundCMethod2.proto ///////////////
+
+static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2); /*proto*/
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030600B1
+static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2); /*proto*/
+#else
+#define __Pyx_CallUnboundCMethod2(cfunc, self, arg1, arg2) __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2)
+#endif
+
+/////////////// CallUnboundCMethod2 ///////////////
+//@requires: UnpackUnboundCMethod
+//@requires: PyObjectCall
+
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030600B1
+static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2) {
+ if (likely(cfunc->func)) {
+ PyObject *args[2] = {arg1, arg2};
+ if (cfunc->flag == METH_FASTCALL) {
+ #if PY_VERSION_HEX >= 0x030700A0
+ return (*(__Pyx_PyCFunctionFast)cfunc->func)(self, args, 2);
+ #else
+ return (*(__Pyx_PyCFunctionFastWithKeywords)cfunc->func)(self, args, 2, NULL);
+ #endif
+ }
+ #if PY_VERSION_HEX >= 0x030700A0
+ if (cfunc->flag == (METH_FASTCALL | METH_KEYWORDS))
+ return (*(__Pyx_PyCFunctionFastWithKeywords)cfunc->func)(self, args, 2, NULL);
+ #endif
+ }
+ return __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2);
+}
+#endif
+
+static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2){
+ PyObject *args, *result = NULL;
+ if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL;
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (cfunc->func && (cfunc->flag & METH_VARARGS)) {
+ args = PyTuple_New(2);
+ if (unlikely(!args)) goto bad;
+ Py_INCREF(arg1);
+ PyTuple_SET_ITEM(args, 0, arg1);
+ Py_INCREF(arg2);
+ PyTuple_SET_ITEM(args, 1, arg2);
+ if (cfunc->flag & METH_KEYWORDS)
+ result = (*(PyCFunctionWithKeywords)cfunc->func)(self, args, NULL);
+ else
+ result = (*cfunc->func)(self, args);
+ } else {
+ args = PyTuple_New(3);
+ if (unlikely(!args)) goto bad;
+ Py_INCREF(self);
+ PyTuple_SET_ITEM(args, 0, self);
+ Py_INCREF(arg1);
+ PyTuple_SET_ITEM(args, 1, arg1);
+ Py_INCREF(arg2);
+ PyTuple_SET_ITEM(args, 2, arg2);
+ result = __Pyx_PyObject_Call(cfunc->method, args, NULL);
+ }
+#else
+ args = PyTuple_Pack(3, self, arg1, arg2);
+ if (unlikely(!args)) goto bad;
+ result = __Pyx_PyObject_Call(cfunc->method, args, NULL);
+#endif
+bad:
+ Py_XDECREF(args);
+ return result;
+}
+
+
/////////////// PyObjectCallMethod0.proto ///////////////
static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); /*proto*/
}
static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) {
- PyObject *method, *result = NULL;
+ PyObject *method, *result;
method = __Pyx_PyObject_GetAttrStr(obj, method_name);
- if (unlikely(!method)) goto done;
+ if (unlikely(!method)) return NULL;
result = __Pyx__PyObject_CallMethod1(method, arg);
-done:
- Py_XDECREF(method);
+ Py_DECREF(method);
return result;
}
value = default_value;
}
Py_INCREF(value);
+ // avoid C compiler warning about unused utility functions
+ if ((1));
#else
if (PyString_CheckExact(key) || PyUnicode_CheckExact(key) || PyInt_CheckExact(key)) {
/* these presumably have safe hash functions */
value = default_value;
}
Py_INCREF(value);
- } else {
- if (default_value == Py_None)
- default_value = NULL;
- value = PyObject_CallMethodObjArgs(
- d, PYIDENT("get"), key, default_value, NULL);
}
#endif
+ else {
+ if (default_value == Py_None)
+ value = CALL_UNBOUND_METHOD(PyDict_Type, "get", d, key);
+ else
+ value = CALL_UNBOUND_METHOD(PyDict_Type, "get", d, key, default_value);
+ }
return value;
}
static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value, int is_safe_type); /*proto*/
/////////////// dict_setdefault ///////////////
-//@requires: ObjectHandling.c::PyObjectCallMethod2
static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value,
CYTHON_UNUSED int is_safe_type) {
#endif
#endif
} else {
- value = __Pyx_PyObject_CallMethod2(d, PYIDENT("setdefault"), key, default_value);
+ value = CALL_UNBOUND_METHOD(PyDict_Type, "setdefault", d, key, default_value);
}
return value;
}
#define __Pyx_PyDict_Clear(d) (PyDict_Clear(d), 0)
+
+/////////////// py_dict_pop.proto ///////////////
+
+static CYTHON_INLINE PyObject *__Pyx_PyDict_Pop(PyObject *d, PyObject *key, PyObject *default_value); /*proto*/
+
+/////////////// py_dict_pop ///////////////
+
+static CYTHON_INLINE PyObject *__Pyx_PyDict_Pop(PyObject *d, PyObject *key, PyObject *default_value) {
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B3
+ if ((1)) {
+ return _PyDict_Pop(d, key, default_value);
+ } else
+ // avoid "function unused" warnings
+#endif
+ if (default_value) {
+ return CALL_UNBOUND_METHOD(PyDict_Type, "pop", d, key, default_value);
+ } else {
+ return CALL_UNBOUND_METHOD(PyDict_Type, "pop", d, key);
+ }
+}
+
+
/////////////// dict_iter.proto ///////////////
static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name,
}
+/////////////// set_iter.proto ///////////////
+
+static CYTHON_INLINE PyObject* __Pyx_set_iterator(PyObject* iterable, int is_set,
+ Py_ssize_t* p_orig_length, int* p_source_is_set); /*proto*/
+static CYTHON_INLINE int __Pyx_set_iter_next(
+ PyObject* iter_obj, Py_ssize_t orig_length,
+ Py_ssize_t* ppos, PyObject **value,
+ int source_is_set); /*proto*/
+
+/////////////// set_iter ///////////////
+//@requires: ObjectHandling.c::IterFinish
+
+static CYTHON_INLINE PyObject* __Pyx_set_iterator(PyObject* iterable, int is_set,
+ Py_ssize_t* p_orig_length, int* p_source_is_set) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ is_set = is_set || likely(PySet_CheckExact(iterable) || PyFrozenSet_CheckExact(iterable));
+ *p_source_is_set = is_set;
+ if (unlikely(!is_set))
+ return PyObject_GetIter(iterable);
+ *p_orig_length = PySet_Size(iterable);
+ Py_INCREF(iterable);
+ return iterable;
+#else
+ (void)is_set;
+ *p_source_is_set = 0;
+ *p_orig_length = 0;
+ return PyObject_GetIter(iterable);
+#endif
+}
+
+static CYTHON_INLINE int __Pyx_set_iter_next(
+ PyObject* iter_obj, Py_ssize_t orig_length,
+ Py_ssize_t* ppos, PyObject **value,
+ int source_is_set) {
+ if (!CYTHON_COMPILING_IN_CPYTHON || unlikely(!source_is_set)) {
+ *value = PyIter_Next(iter_obj);
+ if (unlikely(!*value)) {
+ return __Pyx_IterFinish();
+ }
+ (void)orig_length;
+ (void)ppos;
+ return 0;
+ }
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(PySet_GET_SIZE(iter_obj) != orig_length)) {
+ PyErr_SetString(
+ PyExc_RuntimeError,
+ "set changed size during iteration");
+ return -1;
+ }
+ {
+ Py_hash_t hash;
+ int ret = _PySet_NextEntry(iter_obj, ppos, value, &hash);
+ // CPython does not raise errors here, only if !isinstance(iter_obj, set/frozenset)
+ assert (ret != -1);
+ if (likely(ret)) {
+ Py_INCREF(*value);
+ return 1;
+ }
+ return 0;
+ }
+#endif
+}
+
+/////////////// py_set_discard_unhashable ///////////////
+
+static int __Pyx_PySet_DiscardUnhashable(PyObject *set, PyObject *key) {
+ PyObject *tmpkey;
+ int rv;
+
+ if (likely(!PySet_Check(key) || !PyErr_ExceptionMatches(PyExc_TypeError)))
+ return -1;
+ PyErr_Clear();
+ tmpkey = PyFrozenSet_New(key);
+ if (tmpkey == NULL)
+ return -1;
+ rv = PySet_Discard(set, tmpkey);
+ Py_DECREF(tmpkey);
+ return rv;
+}
+
+
+/////////////// py_set_discard.proto ///////////////
+
+static CYTHON_INLINE int __Pyx_PySet_Discard(PyObject *set, PyObject *key); /*proto*/
+
+/////////////// py_set_discard ///////////////
+//@requires: py_set_discard_unhashable
+
+static CYTHON_INLINE int __Pyx_PySet_Discard(PyObject *set, PyObject *key) {
+ int found = PySet_Discard(set, key);
+ // Convert *key* to frozenset if necessary
+ if (unlikely(found < 0)) {
+ found = __Pyx_PySet_DiscardUnhashable(set, key);
+ }
+ // note: returns -1 on error, 0 (not found) or 1 (found) otherwise => error check for -1 or < 0 works
+ return found;
+}
+
+
+/////////////// py_set_remove.proto ///////////////
+
+static CYTHON_INLINE int __Pyx_PySet_Remove(PyObject *set, PyObject *key); /*proto*/
+
+/////////////// py_set_remove ///////////////
+//@requires: py_set_discard_unhashable
+
+static int __Pyx_PySet_RemoveNotFound(PyObject *set, PyObject *key, int found) {
+ // Convert *key* to frozenset if necessary
+ if (unlikely(found < 0)) {
+ found = __Pyx_PySet_DiscardUnhashable(set, key);
+ }
+ if (likely(found == 0)) {
+ // Not found
+ PyObject *tup;
+ tup = PyTuple_Pack(1, key);
+ if (!tup)
+ return -1;
+ PyErr_SetObject(PyExc_KeyError, tup);
+ Py_DECREF(tup);
+ return -1;
+ }
+ // note: returns -1 on error, 0 (not found) or 1 (found) otherwise => error check for -1 or < 0 works
+ return found;
+}
+
+static CYTHON_INLINE int __Pyx_PySet_Remove(PyObject *set, PyObject *key) {
+ int found = PySet_Discard(set, key);
+ if (unlikely(found != 1)) {
+ // note: returns -1 on error, 0 (not found) or 1 (found) otherwise => error check for -1 or < 0 works
+ return __Pyx_PySet_RemoveNotFound(set, key, found);
+ }
+ return 0;
+}
+
+
/////////////// unicode_iter.proto ///////////////
static CYTHON_INLINE int __Pyx_init_unicode_iteration(
{{endif}}
}
// if size doesn't fit into a long or PY_LONG_LONG anymore, fall through to default
+ CYTHON_FALLTHROUGH;
{{endfor}}
{{endfor}}
// check above. However, the number of digits that CPython uses for a given PyLong
// value is minimal, and together with the "(size-1) * SHIFT < 53" check above,
// this should make it safe.
+ CYTHON_FALLTHROUGH;
{{endfor}}
default:
#else
/*
These functions provide integer arithmetic with integer checking. They do not
actually raise an exception when an overflow is detected, but rather set a bit
-in the overflow parameter. (This parameter may be re-used accross several
+in the overflow parameter. (This parameter may be re-used across several
arithmetic operations, so should be or-ed rather than assigned to.)
The implementation is divided into two parts, the signed and unsigned basecases,
/////////////// Profile.proto ///////////////
+//@requires: Exceptions.c::PyErrFetchRestore
//@substitute: naming
// Note that cPython ignores PyTrace_EXCEPTION,
static void __Pyx_call_return_trace_func(PyThreadState *tstate, PyFrameObject *frame, PyObject *result) {
PyObject *type, *value, *traceback;
- PyErr_Fetch(&type, &value, &traceback);
+ __Pyx_ErrFetchInState(tstate, &type, &value, &traceback);
tstate->tracing++;
tstate->use_tracing = 0;
if (CYTHON_TRACE && tstate->c_tracefunc)
CYTHON_FRAME_DEL(frame);
tstate->use_tracing = 1;
tstate->tracing--;
- PyErr_Restore(type, value, traceback);
+ __Pyx_ErrRestoreInState(tstate, type, value, traceback);
}
#ifdef WITH_THREAD
static int __Pyx_call_line_trace_func(PyThreadState *tstate, PyFrameObject *frame, int lineno) {
int ret;
PyObject *type, *value, *traceback;
- PyErr_Fetch(&type, &value, &traceback);
+ __Pyx_ErrFetchInState(tstate, &type, &value, &traceback);
__Pyx_PyFrame_SetLineNumber(frame, lineno);
tstate->tracing++;
tstate->use_tracing = 0;
tstate->use_tracing = 1;
tstate->tracing--;
if (likely(!ret)) {
- PyErr_Restore(type, value, traceback);
+ __Pyx_ErrRestoreInState(tstate, type, value, traceback);
} else {
Py_XDECREF(type);
Py_XDECREF(value);
retval = 1;
tstate->tracing++;
tstate->use_tracing = 0;
- PyErr_Fetch(&type, &value, &traceback);
+ __Pyx_ErrFetchInState(tstate, &type, &value, &traceback);
#if CYTHON_TRACE
if (tstate->c_tracefunc)
retval = tstate->c_tracefunc(tstate->c_traceobj, *frame, PyTrace_CALL, NULL) == 0;
(CYTHON_TRACE && tstate->c_tracefunc));
tstate->tracing--;
if (retval) {
- PyErr_Restore(type, value, traceback);
+ __Pyx_ErrRestoreInState(tstate, type, value, traceback);
return tstate->use_tracing && retval;
} else {
Py_XDECREF(type);
return -1;
// initialise cached hash value
if (PyObject_Hash(*t->p) == -1)
- PyErr_Clear();
+ return -1;
++t;
}
return 0;
Py_DECREF(s);
return result;
}
+
+
+//////////////////// PyUnicode_Unicode.proto ////////////////////
+
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Unicode(PyObject *obj);/*proto*/
+
+//////////////////// PyUnicode_Unicode ////////////////////
+
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Unicode(PyObject *obj) {
+ if (unlikely(obj == Py_None))
+ obj = PYUNICODE("None");
+ return __Pyx_NewRef(obj);
+}
+
+
+//////////////////// PyObject_Unicode.proto ////////////////////
+
+#if PY_MAJOR_VERSION >= 3
+#define __Pyx_PyObject_Unicode(obj) \
+ (likely(PyUnicode_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Str(obj))
+#else
+#define __Pyx_PyObject_Unicode(obj) \
+ (likely(PyUnicode_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Unicode(obj))
+#endif
};
static const char DIGITS_HEX[2*16+1] = {
- "0123456789abcdef0123456789ABCDEF"
+ "0123456789abcdef"
+ "0123456789ABCDEF"
};
if (format_char == 'X') {
hex_digits += 16;
format_char = 'x';
- };
+ }
// surprise: even trivial sprintf() calls don't get optimised in gcc (4.8)
remaining = value; /* not using abs(value) to avoid overflow problems */
last_one_off = 0;
dpos = end;
- while (remaining != 0) {
+ do {
int digit_pos;
switch (format_char) {
case 'o':
digit_pos = abs((int)(remaining % (8*8)));
- remaining = remaining / (8*8);
+ remaining = ({{TYPE}}) (remaining / (8*8));
dpos -= 2;
*(uint16_t*)dpos = ((uint16_t*)DIGIT_PAIRS_8)[digit_pos]; /* copy 2 digits at a time */
last_one_off = (digit_pos < 8);
break;
case 'd':
digit_pos = abs((int)(remaining % (10*10)));
- remaining = remaining / (10*10);
+ remaining = ({{TYPE}}) (remaining / (10*10));
dpos -= 2;
*(uint16_t*)dpos = ((uint16_t*)DIGIT_PAIRS_10)[digit_pos]; /* copy 2 digits at a time */
last_one_off = (digit_pos < 10);
break;
case 'x':
*(--dpos) = hex_digits[abs((int)(remaining % 16))];
- remaining = remaining / 16;
+ remaining = ({{TYPE}}) (remaining / 16);
break;
default:
assert(0);
break;
}
- }
+ } while (unlikely(remaining != 0));
+
if (last_one_off) {
assert(*dpos == '0');
dpos++;
- } else if (unlikely(dpos == end)) {
- *(--dpos) = '0';
}
length = end - dpos;
ulength = length;
with one or more Cython modules built in. This allows one to create a single
executable from Cython code, without having to have separate shared objects
for each Cython module. A major advantage of this approach is that it allows
-debuging with gprof(1), which does not work with shared objects.
+debugging with gprof(1), which does not work with shared objects.
Unless ``-p`` is given, the first module's ``__name__`` is set to
``"__main__"`` and is imported on startup; if ``-p`` is given, a normal Python
;;; cython-mode.el --- Major mode for editing Cython files
+;; License: Apache-2.0
+
;;; Commentary:
;; This should work with python-mode.el as well as either the new
["-s '%s=%s'" % x for x in cython_options])
# TODO(robertwb): It might be better to only generate the C files,
# letting cc_library (or similar) handle the rest, but there isn't yet
- # suport compiling Python C extensions from bazel.
+ # support compiling Python C extensions from bazel.
native.genrule(
name = name + "_cythonize",
srcs = pyx_srcs,
return pyext_cccom, pyext_cxxcom, pyext_linkcom
def set_basic_vars(env):
- # Set construction variables which are independant on whether we are using
+ # Set construction variables which are independent on whether we are using
# distutils or not.
env['PYEXTCPPPATH'] = SCons.Util.CLVar('$PYEXTINCPATH')
global:
# SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the
- # /E:ON and /V:ON options are not enabled in the batch script intepreter
+ # /E:ON and /V:ON options are not enabled in the batch script interpreter
# See: http://stackoverflow.com/a/13751649/163740
WITH_ENV: "cmd /E:ON /V:ON /C .\\appveyor\\run_with_env.cmd"
test: off
test_script:
+ - "%PYTHON%\\Scripts\\pip.exe install -r test-requirements.txt"
- "set CFLAGS=/Od"
- "%WITH_ENV% %PYTHON%\\python.exe runtests.py -vv --no-cpp -j7"
=================
Many scientific Python distributions, such as Anaconda [Anaconda]_,
-Enthought Canopy [Canopy]_, Python(x,y) [Pythonxy]_, and Sage [Sage]_,
+Enthought Canopy [Canopy]_, and Sage [Sage]_,
bundle Cython and no setup is needed. Note however that if your
distribution ships a version of Cython which is too old you can still
use the instructions below to update Cython. Everything in this
.. [Anaconda] http://docs.continuum.io/anaconda/
.. [Canopy] https://enthought.com/products/canopy/
-.. [Pythonxy] http://www.pythonxy.com/
.. [Sage] W. Stein et al., Sage Mathematics Software, http://sagemath.org
The following sub-sections describe several ways to build your
extension modules, and how to pass directives to the Cython compiler.
+
Compiling from the command line
===============================
-Run the Cython compiler command with your options and list of ``.pyx``
-files to generate. For example::
+Run the ``cythonize`` compiler command with your options and list of
+``.pyx`` files to generate. For example::
+
+ $ cythonize -a -i yourmod.pyx
- $ cython -a yourmod.pyx
+This creates a ``yourmod.c`` file (or ``yourmod.cpp`` in C++ mode), compiles it,
+and puts the resulting extension module (``.so`` or ``.pyd``, depending on your
+platform) next to the source file for direct import (``-i`` builds "in place").
+The ``-a`` switch additionally produces an annotated html file of the source code.
-This creates a ``yourmod.c`` file, and the ``-a`` switch produces an
-annotated html file of the source code. Pass the ``-h`` flag for a
-complete list of supported flags.
+The ``cythonize`` command accepts multiple source files and glob patterns like
+``**/*.pyx`` as argument and also understands the common ``-j`` option for
+running multiple parallel build jobs. When called without further options, it
+will only translate the source files to ``.c`` or ``.cpp`` files. Pass the
+``-h`` flag for a complete list of supported options.
-Compiling your ``.c`` files will vary depending on your operating
-system. Python documentation for writing extension modules should
-have some details for your system. Here we give an example on a Linux
-system::
+There is also a simpler command line tool named ``cython`` which only invokes
+the source code translator.
+
+In the case of manual compilation, how to compile your ``.c`` files will vary
+depending on your operating system and compiler. The Python documentation for
+writing extension modules should have some details for your system. On a Linux
+system, for example, it might look similar to this::
$ gcc -shared -pthread -fPIC -fwrapv -O2 -Wall -fno-strict-aliasing \
- -I/usr/include/python2.7 -o yourmod.so yourmod.c
+ -I/usr/include/python3.5 -o yourmod.so yourmod.c
-[``gcc`` will need to have paths to your included header files and
-paths to libraries you need to link with]
+(``gcc`` will need to have paths to your included header files and paths
+to libraries you want to link with.)
-A ``yourmod.so`` file is now in the same directory and your module,
-``yourmod``, is available for you to import as you normally would.
+After compilation, a ``yourmod.so`` file is written into the target directory
+and your module, ``yourmod``, is available for you to import as with any other
+Python module. Note that if you are not relying on ``cythonize`` or distutils,
+you will not automatically benefit from the platform specific file extension
+that CPython generates for disambiguation, such as
+``yourmod.cpython-35m-x86_64-linux-gnu.so`` on a regular 64bit Linux installation
+of CPython 3.5.
Compiling with ``distutils``
These ``.pxd`` files need not have corresponding ``.pyx``
modules if they contain purely declarations of external libraries.
+
+Integrating multiple modules
+============================
+
+In some scenarios, it can be useful to link multiple Cython modules
+(or other extension modules) into a single binary, e.g. when embedding
+Python in another application. This can be done through the inittab
+import mechanism of CPython.
+
+Create a new C file to integrate the extension modules and add this
+macro to it::
+
+ #if PY_MAJOR_VERSION < 3
+ # define MODINIT(name) init ## name
+ #else
+ # define MODINIT(name) PyInit_ ## name
+ #endif
+
+If you are only targeting Python 3.x, just use ``PyInit_`` as prefix.
+
+Then, for each or the modules, declare its module init function
+as follows, replacing ``...`` by the name of the module::
+
+ PyMODINIT_FUNC MODINIT(...) (void);
+
+In C++, declare them as ``extern C``.
+
+If you are not sure of the name of the module init function, refer
+to your generated module source file and look for a function name
+starting with ``PyInit_``.
+
+Next, before you start the Python runtime from your application code
+with ``Py_Initialize()``, you need to initialise the modules at runtime
+using the ``PyImport_AppendInittab()`` C-API function, again inserting
+the name of each of the modules::
+
+ PyImport_AppendInittab("...", MODINIT(...));
+
+This enables normal imports for the embedded extension modules.
+
+In order to prevent the joined binary from exporting all of the module
+init functions as public symbols, Cython 0.28 and later can hide these
+symbols if the macro ``CYTHON_NO_PYINIT_EXPORT`` is defined while
+C-compiling the module C files.
+
+Also take a look at the `cython_freeze
+<https://github.com/cython/cython/blob/master/bin/cython_freeze>`_ tool.
+
+
Compiling with :mod:`pyximport`
===============================
([]-operator) in the code will not cause any IndexErrors to be
raised. Lists, tuples, and strings are affected only if the index
can be determined to be non-negative (or if ``wraparound`` is False).
- Conditions
- which would normally trigger an IndexError may instead cause
+ Conditions which would normally trigger an IndexError may instead cause
segfaults or data corruption if this is set to False.
Default is True.
``wraparound`` (True / False)
- In Python arrays can be indexed relative to the end. For example
- A[-1] indexes the last value of a list. In C negative indexing is
- not supported. If set to False, Cython will neither check for nor
- correctly handle negative indices, possibly causing segfaults or
- data corruption.
+ In Python, arrays and sequences can be indexed relative to the end.
+ For example, A[-1] indexes the last value of a list.
+ In C, negative indexing is not supported.
+ If set to False, Cython is allowed to neither check for nor correctly
+ handle negative indices, possibly causing segfaults or data corruption.
+ If bounds checks are enabled (the default, see ``boundschecks`` above),
+ negative indexing will usually raise an ``IndexError`` for indices that
+ Cython evaluates itself.
+ However, these cases can be difficult to recognise in user code to
+ distinguish them from indexing or slicing that is evaluated by the
+ underlying Python array or sequence object and thus continues to support
+ wrap-around indices.
+ It is therefore safest to apply this option only to code that does not
+ process negative indices at all.
Default is True.
``initializedcheck`` (True / False)
``unraisable_tracebacks`` (True / False)
Whether to print tracebacks when suppressing unraisable exceptions.
+``iterable_coroutine`` (True / False)
+ `PEP 492 <https://www.python.org/dev/peps/pep-0492/>`_ specifies that async-def
+ coroutines must not be iterable, in order to prevent accidental misuse in
+ non-async contexts. However, this makes it difficult and inefficient to write
+ backwards compatible code that uses async-def coroutines in Cython but needs to
+ interact with async Python code that uses the older yield-from syntax, such as
+ asyncio before Python 3.5. This directive can be applied in modules or
+ selectively as decorator on an async-def coroutine to make the affected
+ coroutine(s) iterable and thus directly interoperable with yield-from.
+
Configurable optimisations
--------------------------
completely wrong.
Disabling this option can also reduce the code size. Default is True.
+Warnings
+--------
+
+All warning directives take True / False as options
+to turn the warning on / off.
+
+``warn.undeclared`` (default False)
+ Warns about any variables that are implicitly declared without a ``cdef`` declaration
+
+``warn.unreachable`` (default True)
+ Warns about code paths that are statically determined to be unreachable, e.g.
+ returning twice unconditionally.
+
+``warn.maybe_uninitialized`` (default False)
+ Warns about use of variables that are conditionally uninitialized.
+
+``warn.unused`` (default False)
+ Warns about unused variables and declarations
+
+``warn.unused_arg`` (default False)
+ Warns about unused function arguments
+
+``warn.unused_result`` (default False)
+ Warns about unused assignment to the same name, such as
+ ``r = 2; r = 1 + 2``
+
+``warn.multiple_declarators`` (default True)
+ Warns about multiple variables declared on the same line with at least one pointer type.
+ For example ``cdef double* a, b`` - which, as in C, declares ``a`` as a pointer, ``b`` as
+ a value type, but could be mininterpreted as declaring two pointers.
+
How to set directives
---------------------
Rich Comparisons
================
-* Starting with Cython 0.27, the Python
+There are two ways to implement comparison methods.
+Depending on the application, one way or the other may be better:
+
+* The first way uses the 6 Python
`special methods <https://docs.python.org/3/reference/datamodel.html#basic-customization>`_
- ``__eq__``, ``__lt__``, etc. can be implemented. In previous versions, ``__richcmp__`` was
- the only way to implement rich comparisons.
-* A single special method called ``__richcmp__()`` can be used to implement all the individual
- rich compare, special method types.
-* ``__richcmp__()`` takes an integer argument, indicating which operation is to be performed
- as shown in the table below.
+ ``__eq__``, ``__lt__``, etc.
+ This is new since Cython 0.27 and works exactly as in plain Python classes.
+* The second way uses a single special method ``__richcmp__``.
+ This implements all rich comparison operations in one method.
+ The signature is ``def __richcmp__(self, other, int op)`` matching the
+ `PyObject_RichCompare() <https://docs.python.org/3/c-api/object.html#c.PyObject_RichCompare>`_
+ Python/C API function.
+ The integer argument ``op`` indicates which operation is to be performed
+ as shown in the table below:
+-----+-----+-------+
| < | 0 | Py_LT |
+-----+-----+-------+
The named constants can be cimported from the ``cpython.object`` module.
- They should generally be preferred over plain integers to improve readabilty.
+ They should generally be preferred over plain integers to improve readability.
The ``__next__()`` Method
* Be very aware of exposing Python functions that take extension types as arguments::
- def widen_shrubbery(Shrubbery sh, extra_width): # This is
- sh.width = sh.width + extra_width
+ def widen_shrubbery(Shrubbery sh, extra_width): # This is dangerous
+ sh.width = sh.width + extra_width
* Users could **crash** the program by passing ``None`` for the ``sh`` parameter.
* This could be avoided by::
.. note:: Typing is not a necessity
Providing static typing to parameters and variables is convenience to speed up your code, but it is not a necessity. Optimize where and when needed.
+ In fact, typing can *slow down* your code in the case where the
+ typing does not allow optimizations but where Cython still needs to
+ check that the type of some object matches the declared type.
The cdef Statement
int age
float volume
-..note Structs can be declared as ``cdef packed struct``, which has
-the same effect as the C directive ``#pragma pack(1)``.
+.. note::
+
+ Structs can be declared as ``cdef packed struct``, which has
+ the same effect as the C directive ``#pragma pack(1)``.
:Unions:
ctypedef int *IntPtr
+.. _typing_types:
+
+C types and Python classes
+==========================
+
+There are three kinds of types that you can declare:
+
+1. C types, like ``cdef double x = 1.0``.
+ In the C code that Cython generates, this will create a C variable
+ of type ``double``. So working with this variable is exactly as fast
+ as working with a C variable of that type.
+
+2. Builtin Python classes like ``cdef list L = []``.
+ This requires an *exact* match of the class, it does not allow
+ subclasses. This allows Cython to optimize code by accessing
+ internals of the builtin class.
+ Cython uses a C variable of type ``PyObject*``.
+
+3. Extension types (declared with ``cdef class``).
+ This does allow subclasses. This typing is mostly used to access
+ ``cdef`` methods and attributes of the extension type.
+ The C code uses a variable which is a pointer to a structure of the
+ specific type, something like ``struct MyExtensionTypeObject*``.
+
Parameters
==========
-* Both C and Python **function** types can be declared to have parameters C data types.
+* Both C and Python **function** types can be declared to have parameters with a given C data type.
* Use normal C declaration syntax::
def spam(int i, char *s):
cdef int eggs(unsigned long l, float f):
...
-* As these parameters are passed into a Python declared function, they are magically **converted** to the specified C type value.
-
- * This holds true for only numeric and string types
+* As these parameters are passed into a Python declared function,
+ they are automatically **converted** to the specified C type value,
+ if a conversion is possible and safe. This applies to numeric and
+ string types, as well as some C++ container types.
-* If no type is specified for a parameter or a return value, it is assumed to be a Python object
+* If no type is specified for a parameter or a return value, it is assumed to be a Python object.
* The following takes two Python objects as parameters and returns a Python object::
cdef spamobjs(x, y):
...
- .. note:: --
+ .. note::
- This is different then C language behavior, where it is an int by default.
+ This is different from the C language behavior, where missing types are assumed as ``int`` by default.
-
-* Python object types have reference counting performed according to the standard Python C-API rules:
+* Python object types have reference counting performed according to the standard Python/C-API rules:
* Borrowed references are taken as parameters
* New references are returned
-.. todo::
- link or label here the one ref count caveat for NumPy.
+ .. warning::
+
+ This only applies to Cython code. Other Python packages which
+ are implemented in C like NumPy may not follow these conventions.
* The name ``object`` can be used to explicitly declare something as a Python Object.
cdef object foo(object int):
...
-.. todo::
- Do a see also here ..??
-
-Optional Arguments
-------------------
-
-* Are supported for ``cdef`` and ``cpdef`` functions
-* There are differences though whether you declare them in a ``.pyx`` file or a ``.pxd`` file:
-
- * When in a ``.pyx`` file, the signature is the same as it is in Python itself::
-
- cdef class A:
- cdef foo(self):
- print "A"
- cdef class B(A)
- cdef foo(self, x=None)
- print "B", x
- cdef class C(B):
- cpdef foo(self, x=True, int k=3)
- print "C", x, k
-
-
- * When in a ``.pxd`` file, the signature is different like this example: ``cdef foo(x=*)``::
-
- cdef class A:
- cdef foo(self)
- cdef class B(A)
- cdef foo(self, x=*)
- cdef class C(B):
- cpdef foo(self, x=*, int k=*)
-
-
- * The number of arguments may increase when subclassing, but the arg types and order must be the same.
-
-* There may be a slight performance penalty when the optional arg is overridden with one that does not have default values.
-
-Keyword-only Arguments
-=======================
-
-* As in Python 3, ``def`` functions can have keyword-only arguments listed after a ``"*"`` parameter and before a ``"**"`` parameter if any::
-
- def f(a, b, *args, c, d = 42, e, **kwds):
- ...
-
- * Shown above, the ``c``, ``d`` and ``e`` arguments can not be passed as positional arguments and must be passed as keyword arguments.
- * Furthermore, ``c`` and ``e`` are required keyword arguments since they do not have a default value.
-
-* If the parameter name after the ``"*"`` is omitted, the function will not accept any extra positional arguments::
-
- def g(a, b, *, c, d):
- ...
-
- * Shown above, the signature takes exactly two positional parameters and has two required keyword parameters
-
-
Automatic Type Conversion
=========================
cdef char *s
s = pystring1 + pystring2
- * The reason is that concatenating to strings in Python produces a temporary variable.
+ * The reason is that concatenating two strings in Python produces a temporary variable.
* The variable is decrefed, and the Python string deallocated as soon as the statement has finished,
Type Casting
-=============
-
-* The syntax used in type casting are ``"<"`` and ``">"``
-
- .. note::
- The syntax is different from C convention
+============
- ::
+* The syntax used in type casting uses ``"<"`` and ``">"``, for example::
- cdef char *p, float *q
- p = <char*>q
+ cdef char *p
+ cdef float *q
+ p = <char*>q
-* If one of the types is a python object for ``<type>x``, Cython will try and do a coercion.
+* If one of the types is a Python object for ``<type>x``, Cython will try to do a coercion.
.. note:: Cython will not stop a casting where there is no conversion, but it will emit a warning.
-* If the address is what is wanted, cast to a ``void*`` first.
-
-
-Type Checking
--------------
+* To get the address of some Python object, use a cast to a pointer type
+ like ``<void*>`` or ``<PyObject*>``.
-* A cast like ``<MyExtensionType>x`` will cast x to type ``MyExtensionType`` without type checking at all.
+* The precedence of ``<...>`` is such that ``<type>a.b.c`` is interpreted as ``<type>(a.b.c)``.
-* To have a cast type checked, use the syntax like: ``<MyExtensionType?>x``.
-
- * In this case, Cython will throw an error if ``"x"`` is not a (subclass) of ``MyExtensionType``
+Checked Type Casts
+------------------
-* Automatic type checking for extension types can be obtained whenever ``isinstance()`` is used as the second parameter
+* A cast like ``<MyExtensionType>x`` will cast x to the class
+ ``MyExtensionType`` without any checking at all.
+* To have a cast checked, use the syntax like: ``<MyExtensionType?>x``.
+ In this case, Cython will apply a runtime check that raises a ``TypeError``
+ if ``x`` is not an instance of ``MyExtensionType``.
+ As explained in :ref:`typing_types`, this tests for the exact class
+ for builtin types, but allows subclasses for extension types.
-Python Objects
-==============
==========================
Statements and Expressions
Cython still supports this function, but the usage is deprecated in favour of
the normal builtin, which Cython can optimise in both forms.
+Optional Arguments
+==================
+
+* Are supported for ``cdef`` and ``cpdef`` functions
+* There are differences though whether you declare them in a ``.pyx`` file or a ``.pxd`` file:
+
+ * When in a ``.pyx`` file, the signature is the same as it is in Python itself::
+
+ cdef class A:
+ cdef foo(self):
+ print "A"
+ cdef class B(A)
+ cdef foo(self, x=None)
+ print "B", x
+ cdef class C(B):
+ cpdef foo(self, x=True, int k=3)
+ print "C", x, k
+
+
+ * When in a ``.pxd`` file, the signature is different like this example: ``cdef foo(x=*)``::
+
+ cdef class A:
+ cdef foo(self)
+ cdef class B(A)
+ cdef foo(self, x=*)
+ cdef class C(B):
+ cpdef foo(self, x=*, int k=*)
+
+
+ * The number of arguments may increase when subclassing, but the arg types and order must be the same.
+
+* There may be a slight performance penalty when the optional arg is overridden with one that does not have default values.
+
+Keyword-only Arguments
+=======================
+
+* As in Python 3, ``def`` functions can have keyword-only arguments listed after a ``"*"`` parameter and before a ``"**"`` parameter if any::
+
+ def f(a, b, *args, c, d = 42, e, **kwds):
+ ...
+
+ * Shown above, the ``c``, ``d`` and ``e`` arguments can not be passed as positional arguments and must be passed as keyword arguments.
+ * Furthermore, ``c`` and ``e`` are required keyword arguments since they do not have a default value.
+
+* If the parameter name after the ``"*"`` is omitted, the function will not accept any extra positional arguments::
+
+ def g(a, b, *, c, d):
+ ...
+
+ * Shown above, the signature takes exactly two positional parameters and has two required keyword parameters
+
============================
Error and Exception Handling
return sin(x**2)
This does slightly more than providing a python wrapper for a cdef
-method: unlike a cdef method, a cpdef method is fully overrideable by
+method: unlike a cdef method, a cpdef method is fully overridable by
methods and instance attributes in Python subclasses. It adds a
little calling overhead compared to a cdef method.
Working with NumPy
=======================
+.. NOTE:: Cython 0.16 introduced typed memoryviews as a successor to the NumPy
+ integration described here. They are easier to use than the buffer syntax
+ below, have less overhead, and can be passed around without requiring the GIL.
+ They should be preferred to the syntax presented in this page.
+ See :ref:`Typed Memoryviews <memoryviews>`.
+
You can use NumPy from Cython exactly the same as in regular Python, but by
doing so you are losing potentially high speedups because Cython has support
for fast access to NumPy arrays. Let's see how this works with a simple
py_bytes_object = <unicode>c_string
The other direction, i.e. automatic encoding to C strings, is only
-supported for the ASCII codec (and the "default encoding", which is
-runtime specific and may or may not be ASCII). This is because
-CPython handles the memory management in this case by keeping an
-encoded copy of the string alive together with the original unicode
-string. Otherwise, there would be no way to limit the lifetime of
-the encoded string in any sensible way, thus rendering any attempt to
-extract a C string pointer from it a dangerous endeavour. As long
-as you stick to the ASCII encoding for the ``c_string_encoding``
-directive, though, the following will work::
+supported for ASCII and the "default encoding", which is usually UTF-8
+in Python 3 and usually ASCII in Python 2. CPython handles the memory
+management in this case by keeping an encoded copy of the string alive
+together with the original unicode string. Otherwise, there would be no
+way to limit the lifetime of the encoded string in any sensible way,
+thus rendering any attempt to extract a C string pointer from it a
+dangerous endeavour. The following safely converts a Unicode string to
+ASCII (change ``c_string_encoding`` to ``default`` to use the default
+encoding instead)::
# cython: c_string_type=unicode, c_string_encoding=ascii
with an interpreter that is compiled with debugging symbols (i.e. configured
with ``--with-pydebug`` or compiled with the ``-g`` CFLAG). If your Python is
installed and managed by your package manager you probably need to install debug
-support separately, e.g. for ubuntu::
+support separately. If using NumPy then you also need to install numpy debugging, or you'll
+see an [import error for multiarray](https://bugzilla.redhat.com/show_bug.cgi?id=1030830).
+E.G. for ubuntu::
- $ sudo apt-get install python-dbg
+ $ sudo apt-get install python-dbg python-numpy-dbg
$ python-dbg setup.py build_ext --inplace
Then you need to run your script with ``python-dbg`` also. Ensure that when
cdef class OwnedPointer:
cdef void* ptr
- cdef __dealloc__(self):
- if ptr != NULL:
- free(ptr)
+ def __dealloc__(self):
+ if self.ptr != NULL:
+ free(self.ptr)
@staticmethod
cdef create(void* ptr):
cdef extern from *:
...
+* If a ``cdef extern from "inc.h"`` block is not empty and contains only
+ function or variable declarations (and no type declarations of any kind),
+ Cython will put the ``#include "inc.h"`` statement after all
+ declarations generated by Cython. This means that the included file
+ has access to the variables, functions, structures, ... which are
+ declared by Cython.
+
Implementing functions in C
---------------------------
the C file for it, you can do this using a C name declaration. Consider this
an advanced feature, only for the rare cases where everything else fails.
+Including verbatim C code
+-------------------------
+
+For advanced use cases, Cython allows you to directly write C code
+as "docstring" of a ``cdef extern from`` block::
+
+ cdef extern from *:
+ """
+ /* This is C code which will be put
+ * in the .c file output by Cython */
+ static long square(long x) {return x * x;}
+ #define assign(x, y) ((x) = (y))
+ """
+ long square(long x)
+ void assign(long& x, long y)
+
+The above is essentially equivalent to having the C code in a file
+``header.h`` and writing ::
+
+ cdef extern from "header.h":
+ long square(long x)
+ void assign(long& x, long y)
+
+It is also possible to combine a header file and verbatim C code::
+
+ cdef extern from "badheader.h":
+ """
+ /* This macro breaks stuff */
+ #undef int
+ """
+ # Stuff from badheader.h
+
+In this case, the C code ``#undef int`` is put right after
+``#include "badheader.h"`` in the C code generated by Cython.
+
Using Cython Declarations from C
================================
include "spamstuff.pxi"
-The contents of the named file are textually included at that point. The
+The contents of the named file are textually included at that point. The
included file can contain any complete statements or declarations that are
valid in the context where the include statement appears, including other
-include statements. The contents of the included file should begin at an
+include statements. The contents of the included file should begin at an
indentation level of zero, and will be treated as though they were indented to
-the level of the include statement that is including the file.
+the level of the include statement that is including the file. The include
+statement cannot, however, be used outside of the module scope, such as inside
+of functions or class bodies.
.. note::
One may mix new axis indexing with all other forms of indexing and slicing.
See also an example_.
+Read-only views
+---------------
+
+Since Cython 0.28, the memoryview item type can be declared as ``const`` to
+support read-only buffers as input::
+
+ cdef const double[:] myslice # const item type => read-only view
+
+ a = np.linspace(0, 10, num=50)
+ a.setflags(write=False)
+ myslice = a
+
+Note that this does not *require* the input buffer to be read-only::
+
+ a = np.linspace(0, 10, num=50)
+ myslice = a # read-only view of a writable buffer
+
+Writable buffers are still accepted by ``const`` views, but read-only
+buffers are not accepted for non-const, writable views::
+
+ cdef double[:] myslice # a normal read/write memory view
+
+ a = np.linspace(0, 10, num=50)
+ a.setflags(write=False)
+ myslice = a # ERROR: requesting writable memory view from read-only buffer!
+
+
Comparison to the old buffer support
====================================
Out[6]: (12, 4, 1)
A Fortran contiguous array has the opposite memory ordering, with the elements
-on the first axis closest togther in memory::
+on the first axis closest together in memory::
In [7]: f_contig = np.array(c_contig, order='F')
In [8]: np.all(f_contig == c_contig)
----------------------------
You first need to install Pythran. See its `documentation
-<https://pythonhosted.org/pythran/MANUAL.html>`_ for more information.
+<http://pythran.readthedocs.io/en/latest/>`_ for more information.
Then, simply add a ``cython: np_pythran=True`` directive at the top of the
Python files that needs to be compiled using Pythran numpy support.
Please note that Pythran can further be tweaked by adding settings in the
``$HOME/.pythranrc`` file. For instance, this can be used to enable `Boost.SIMD`_ support.
See the `Pythran user manual
-<https://pythonhosted.org/pythran/MANUAL.html#customizing-your-pythranrc>`_ for
+<https://pythran.readthedocs.io/en/latest/MANUAL.html#customizing-your-pythranrc>`_ for
more information.
.. _Pythran: https://github.com/serge-sans-paille/pythran
* Basic Cython documentation (see `Cython front page <http://cython.org>`_).
* ``[:enhancements/buffer:Spec for the efficient indexing]``
-.. Note::
- The fast array access documented below is a completely new feature, and
- there may be bugs waiting to be discovered. It might be a good idea to do
- a manual sanity check on the C code Cython generates before using this for
- serious purposes, at least until some months have passed.
-
Cython at a glance
====================
Rich comparisons
-----------------
-Starting with Cython 0.27, the Python
-`special methods <https://docs.python.org/3/reference/datamodel.html#basic-customization>`_
-:meth:``__eq__``, :meth:``__lt__``, etc. can be implemented. In previous versions,
-:meth:``__richcmp__`` was the only way to implement rich comparisons. It takes an integer
-indicating which operation is to be performed, as follows:
-
-+-----+-----+-------+
-| < | 0 | Py_LT |
-+-----+-----+-------+
-| == | 2 | Py_EQ |
-+-----+-----+-------+
-| > | 4 | Py_GT |
-+-----+-----+-------+
-| <= | 1 | Py_LE |
-+-----+-----+-------+
-| != | 3 | Py_NE |
-+-----+-----+-------+
-| >= | 5 | Py_GE |
-+-----+-----+-------+
-
-The named constants can be cimported from the ``cpython.object`` module.
-They should generally be preferred over plain integers to improve readabilty.
+There are two ways to implement comparison methods.
+Depending on the application, one way or the other may be better:
+
+* The first way uses the 6 Python
+ `special methods <https://docs.python.org/3/reference/datamodel.html#basic-customization>`_
+ :meth:`__eq__`, :meth:`__lt__`, etc.
+ This is new since Cython 0.27 and works exactly as in plain Python classes.
+* The second way uses a single special method :meth:`__richcmp__`.
+ This implements all rich comparison operations in one method.
+ The signature is ``def __richcmp__(self, other, int op)``.
+ The integer argument ``op`` indicates which operation is to be performed
+ as shown in the table below:
+
+ +-----+-------+
+ | < | Py_LT |
+ +-----+-------+
+ | == | Py_EQ |
+ +-----+-------+
+ | > | Py_GT |
+ +-----+-------+
+ | <= | Py_LE |
+ +-----+-------+
+ | != | Py_NE |
+ +-----+-------+
+ | >= | Py_GE |
+ +-----+-------+
+
+ These constants can be cimported from the ``cpython.object`` module.
The :meth:`__next__` method
----------------------------
https://docs.python.org/3/reference/datamodel.html#basic-customization
-+-----------------------+---------------------------------------+-------------+--------------------------------------------------------+
-| __richcmp__ |x, y, int op | object | Rich comparison (no direct Python equivalent) |
+You can choose to either implement the standard Python special methods
+like :meth:`__eq__` or the single special method :meth:`__richcmp__`.
+Depending on the application, one way or the other may be better.
+
+-----------------------+---------------------------------------+-------------+--------------------------------------------------------+
| __eq__ |self, y | object | self == y |
+-----------------------+---------------------------------------+-------------+--------------------------------------------------------+
+-----------------------+---------------------------------------+-------------+--------------------------------------------------------+
| __ge__ |self, y | object | self >= y |
+-----------------------+---------------------------------------+-------------+--------------------------------------------------------+
+| __richcmp__ |self, y, int op | object | Joined rich comparison method for all of the above |
+| | | | (no direct Python equivalent) |
++-----------------------+---------------------------------------+-------------+--------------------------------------------------------+
Arithmetic operators
^^^^^^^^^^^^^^^^^^^^
Cython initializes C++ class attributes of a cdef class using the nullary constructor.
If the class you're wrapping does not have a nullary constructor, you must store a pointer
to the wrapped class and manually allocate and deallocate it.
-A convienient and safe place to do so is in the `__cinit__` and `__dealloc__` methods
+A convenient and safe place to do so is in the `__cinit__` and `__dealloc__` methods
which are guaranteed to be called exactly once upon creation and deletion of the Python
instance.
cdef vector[int] v = ...
it = v.begin()
-(Though of course the ``for .. in`` syntax is prefered for objects supporting
+(Though of course the ``for .. in`` syntax is preferred for objects supporting
the iteration protocol.)
RTTI and typeid()
# already built. It might be more efficient to only do it when the
# mod time of the .pyx is newer than the mod time of the .so but
# the question is how to get distutils to tell me the name of the .so
-# before it builds it. Maybe it is easy...but maybe the peformance
+# before it builds it. Maybe it is easy...but maybe the performance
# issue isn't real.
def _load_pyrex(name, filename):
"Load a pyrex file given a name and filename."
``build_in_temp=False`` will produce the C files locally. Working
with complex dependencies and debugging becomes more easy. This
can principally interfere with existing files of the same name.
- build_in_temp can be overriden by <modulename>.pyxbld/make_setup_args()
+ build_in_temp can be overridden by <modulename>.pyxbld/make_setup_args()
by a dict item of 'build_in_temp'
``setup_args``: dict of arguments for Distribution - see
- distutils.core.setup() . They are extended/overriden by those of
+ distutils.core.setup() . They are extended/overridden by those of
<modulename>.pyxbld/make_setup_args()
``reload_support``: Enables support for dynamic
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
- # late import to accomodate for setuptools override
+ # late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:pythran': 'pythran',
+ 'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
+ 'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
]),
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
- self.assertEquals(expected, actual)
+ self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
- self.assertEquals(expected, None)
+ self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
- self.assertEquals(None, unexpected)
+ self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
- help="do not delete the generated shared libary files (allows manual module experimentation)")
+ help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
import platform
is_cpython = platform.python_implementation() == 'CPython'
+# this specifies which versions of python we support, pip >= 9 knows to skip
+# versions of packages which are not compatible with the running python
+PYTHON_REQUIRES = '>=2.6, !=3.0.*, !=3.1.*, !=3.2.*'
+
if sys.platform == "darwin":
# Don't create resource files on OS X tar.
os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'
setuptools_extra_args = {}
if 'setuptools' in sys.modules:
+ setuptools_extra_args['python_requires'] = PYTHON_REQUIRES
setuptools_extra_args['zip_safe'] = False
setuptools_extra_args['entry_points'] = {
'console_scripts': [
'cython = Cython.Compiler.Main:setuptools_main',
- 'cythonize = Cython.Build.Cythonize:main'
+ 'cythonize = Cython.Build.Cythonize:main',
+ 'cygdb = Cython.Debugger.Cygdb:main',
]
}
scripts = []
else:
if os.name == "posix":
- scripts = ["bin/cython", 'bin/cythonize']
- else:
- scripts = ["cython.py", "cythonize.py"]
-
-if 'setuptools' in sys.modules:
- setuptools_extra_args['entry_points']['console_scripts'].append(
- 'cygdb = Cython.Debugger.Cygdb:main')
-else:
- if os.name == "posix":
- scripts.append('bin/cygdb')
+ scripts = ["bin/cython", "bin/cythonize", "bin/cygdb"]
else:
- scripts.append('cygdb.py')
+ scripts = ["cython.py", "cythonize.py", "cygdb.py"]
def compile_cython_modules(profile=False, compile_more=False, cython_with_refnanny=False):
"Cython.Compiler.FlowControl",
"Cython.Compiler.Code",
"Cython.Runtime.refnanny",
- # "Cython.Compiler.FusedNode",
+ "Cython.Compiler.FusedNode",
"Cython.Tempita._tempita",
+ "Cython.StringIOTree",
]
if compile_more:
compiled_modules.extend([
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: C",
acquired R
25
released R
- >>> [str(x) for x in R.recieved_flags] # Works in both py2 and py3
+ >>> [str(x) for x in R.received_flags] # Works in both py2 and py3
['FORMAT', 'INDIRECT', 'ND', 'STRIDES']
"""
cdef object[unsigned short int, ndim=3] buf = obj
>>> writable(R)
acquired R
released R
- >>> [str(x) for x in R.recieved_flags] # Py2/3
+ >>> [str(x) for x in R.received_flags] # Py2/3
['FORMAT', 'INDIRECT', 'ND', 'STRIDES', 'WRITABLE']
"""
cdef object[unsigned short int, ndim=3] buf = obj
acquired A
released A
2
- >>> [str(x) for x in A.recieved_flags] # Py2/3
+ >>> [str(x) for x in A.received_flags] # Py2/3
['FORMAT', 'ND', 'STRIDES']
Check that the suboffsets were patched back prior to release.
>>> A = IntMockBuffer(None, range(4))
>>> c_contig(A)
2
- >>> [str(x) for x in A.recieved_flags]
+ >>> [str(x) for x in A.received_flags]
['FORMAT', 'ND', 'STRIDES', 'C_CONTIGUOUS']
"""
return buf[2]
@testcase
def c_contig_2d(object[int, ndim=2, mode='c'] buf):
"""
- Multi-dim has seperate implementation
+ Multi-dim has separate implementation
>>> A = IntMockBuffer(None, range(12), shape=(3,4))
>>> c_contig_2d(A)
7
- >>> [str(x) for x in A.recieved_flags]
+ >>> [str(x) for x in A.received_flags]
['FORMAT', 'ND', 'STRIDES', 'C_CONTIGUOUS']
"""
return buf[1, 3]
>>> A = IntMockBuffer(None, range(4))
>>> f_contig(A)
2
- >>> [str(x) for x in A.recieved_flags]
+ >>> [str(x) for x in A.received_flags]
['FORMAT', 'ND', 'STRIDES', 'F_CONTIGUOUS']
"""
return buf[2]
>>> A = IntMockBuffer(None, range(12), shape=(4,3), strides=(1, 4))
>>> f_contig_2d(A)
7
- >>> [str(x) for x in A.recieved_flags]
+ >>> [str(x) for x in A.received_flags]
['FORMAT', 'ND', 'STRIDES', 'F_CONTIGUOUS']
"""
return buf[3, 1]
>>> bufdefaults1(A)
acquired A
released A
- >>> [str(x) for x in A.recieved_flags]
+ >>> [str(x) for x in A.received_flags]
['FORMAT', 'ND', 'STRIDES']
"""
pass
cdef object format, offset
cdef void* buffer
cdef Py_ssize_t len, itemsize
- cdef int ndim
cdef Py_ssize_t* strides
cdef Py_ssize_t* shape
cdef Py_ssize_t* suboffsets
cdef object label, log
+ cdef int ndim
+ cdef bint writable
- cdef readonly object recieved_flags, release_ok
+ cdef readonly object received_flags, release_ok
cdef public object fail
- def __init__(self, label, data, shape=None, strides=None, format=None, offset=0):
+ def __init__(self, label, data, shape=None, strides=None, format=None, writable=True, offset=0):
# It is important not to store references to data after the constructor
# as refcounting is checked on object buffers.
self.label = label
self.log = ""
self.offset = offset
self.itemsize = self.get_itemsize()
+ self.writable = writable
if format is None: format = self.get_default_format()
if shape is None: shape = (len(data),)
if strides is None:
if self.fail:
raise ValueError("Failing on purpose")
- self.recieved_flags = []
+ self.received_flags = []
cdef int value
for name, value in available_flags:
if (value & flags) == value:
- self.recieved_flags.append(name)
+ self.received_flags.append(name)
+
+ if flags & cpython.buffer.PyBUF_WRITABLE and not self.writable:
+ raise BufferError("Writable buffer requested from read-only mock: %s" % ' | '.join(self.received_flags))
buffer.buf = <void*>(<char*>self.buffer + (<int>self.offset * self.itemsize))
buffer.obj = self
buffer.len = self.len
- buffer.readonly = 0
+ buffer.readonly = not self.writable
buffer.format = <char*>self.format
buffer.ndim = self.ndim
buffer.shape = self.shape
--- /dev/null
+import sys
+
+__doc__ = u""
+
+if sys.version_info[:2] == (2, 6):
+ __doc__ += u"""
+>>> memoryview = _memoryview
+"""
+
+__doc__ += u"""
+>>> b1 = UserBuffer1()
+>>> m1 = memoryview(b1)
+>>> m1.tolist()
+[0, 1, 2, 3, 4]
+>>> del m1, b1
+"""
+
+__doc__ += u"""
+>>> b2 = UserBuffer2()
+>>> m2 = memoryview(b2)
+UserBuffer2: getbuffer
+>>> m2.tolist()
+[5, 6, 7, 8, 9]
+>>> del m2, b2
+UserBuffer2: release
+"""
+
+cdef extern from *:
+ ctypedef struct Py_buffer # redeclared
+ enum: PyBUF_SIMPLE
+ int PyBuffer_FillInfo(Py_buffer *, object, void *, Py_ssize_t, bint, int) except -1
+ int PyObject_GetBuffer(object, Py_buffer *, int) except -1
+ void PyBuffer_Release(Py_buffer *)
+
+cdef char global_buf[5]
+global_buf[0:5] = [0, 1, 2, 3, 4]
+
+cdef class UserBuffer1:
+
+ def __getbuffer__(self, Py_buffer* view, int flags):
+ PyBuffer_FillInfo(view, None, global_buf, 5, 1, flags)
+
+cdef class UserBuffer2:
+ cdef char buf[5]
+
+ def __cinit__(self):
+ self.buf[0:5] = [5, 6, 7, 8, 9]
+
+ def __getbuffer__(self, Py_buffer* view, int flags):
+ print('UserBuffer2: getbuffer')
+ PyBuffer_FillInfo(view, self, self.buf, 5, 0, flags)
+
+ def __releasebuffer__(self, Py_buffer* view):
+ print('UserBuffer2: release')
+
+
+cdef extern from *:
+ ctypedef struct PyBuffer"Py_buffer":
+ void *buf
+ Py_ssize_t len
+ bint readonly
+
+cdef class _memoryview:
+
+ """
+ Memory
+ """
+
+ cdef PyBuffer view
+
+ def __cinit__(self, obj):
+ cdef Py_buffer *view = <Py_buffer*>&self.view
+ PyObject_GetBuffer(obj, view, PyBUF_SIMPLE)
+
+ def __dealloc__(self):
+ cdef Py_buffer *view = <Py_buffer*>&self.view
+ PyBuffer_Release(view )
+
+ def __getbuffer__(self, Py_buffer *view, int flags):
+ PyBuffer_FillInfo(view, self,
+ self.view.buf, self.view.len,
+ self.view.readonly, flags)
+ def tolist(self):
+ cdef char *b = <char *> self.view.buf
+ return [b[i] for i in range(self.view.len)]
# This file contains tests corresponding to unresolved bugs,
# which will be skipped in the normal testing run.
+setuptools_reimport
class_attribute_init_values_T18
unsignedbehaviour_T184
missing_baseclass_in_predecl_T262
--- /dev/null
+PYTHON setup.py build_ext --inplace
+PYTHON -c "from pkg import b; assert b.test() == 43"
+
+######## setup.py ########
+
+from Cython.Build import cythonize
+
+from distutils.core import setup, Extension
+
+extensions = [
+ Extension('pkg.b', sources=['pkg/a.pyx', 'pkg/alib.c'],
+ include_dirs=['pkg'])
+]
+
+setup(
+ ext_modules = cythonize(extensions)
+)
+
+######## pkg/__init__.py ########
+
+######## pkg/a.pyx ########
+
+cdef extern from "alib.h":
+ int c_function(int x)
+
+def test():
+ return c_function(42)
+
+
+######## pkg/alib.c ########
+
+int c_function(int x) {
+ return x + 1;
+}
+
+######## pkg/alib.h ########
+
+int c_function(int x);
--- /dev/null
+# mode: run
+# tag: setuptools
+
+PYTHON setup.py
+
+
+######## module.pyx ########
+
+cimport cython
+
+
+######## import_cython.py ########
+
+import Cython.Compiler.Main
+
+
+######## setup.py ########
+
+from setuptools.sandbox import run_setup
+run_setup('import_cython.py', ['egg_info'])
+
+from Cython.Build import cythonize
+cythonize('module.pyx')
+++ /dev/null
-int c_a, c_b;
# mode: compile
-cdef extern from "cnamespec.h":
+cdef extern from *:
+ """
+ int c_a, c_b;
+ """
int a "c_a", b "c_b"
cdef struct foo "c_foo":
-# tag: cpp
# mode: compile
+# tag: cpp, warnings
cdef extern from "templates.h":
cdef cppclass TemplateTest1[T]:
--- /dev/null
+# mode: compile
+# tag: pep492, await
+
+# Need to include all utility code !
+
+async def sleep(x):
+ pass
+
+
+async def call():
+ await sleep(1)
+ yield
--- /dev/null
+PYTHON setup.py build_ext --inplace
+
+######## setup.py ########
+
+from Cython.Build import cythonize
+from distutils.core import setup
+
+setup(
+ ext_modules = cythonize("*.pyx"),
+)
+
+######## test.pyx ########
+
+from moda cimport DEFINE_A
+from modb cimport DEFINE_B
+
+######## moda.pxd ########
+
+from verbatim cimport DEFINE_ONCE as DEFINE_A
+
+######## modb.pxd ########
+
+from verbatim cimport DEFINE_ONCE as DEFINE_B
+
+######## verbatim.pxd ########
+
+# Check that we include this only once
+cdef extern from *:
+ """
+ #ifdef DEFINE_ONCE
+ #error "DEFINE_ONCE already defined"
+ #endif
+
+ #define DEFINE_ONCE 1
+ """
+ int DEFINE_ONCE
pass
_ERRORS = """
-5:5: inheritance from PyVarObject types like 'tuple' is not currently supported
-8:5: inheritance from PyVarObject types like 'bytes' is not currently supported
-11:5: inheritance from PyVarObject types like 'str' is not currently supported
+5:19: inheritance from PyVarObject types like 'tuple' is not currently supported
+8:19: inheritance from PyVarObject types like 'bytes' is not currently supported
+11:17: inheritance from PyVarObject types like 'str' is not currently supported
"""
--- /dev/null
+# tag: cpp
+# mode: error
+
+
+cdef cppclass Base:
+ __init__() nogil:
+ pass
+
+cdef cppclass Sub1(Base):
+ __init__(): # implicit requires GIL
+ pass
+
+cdef cppclass Sub2(Sub1):
+ __init__() nogil:
+ pass
+
+_ERRORS = u"""
+10:4: Base constructor defined here.
+14:4: Constructor cannot be called without GIL unless all base constructors can also be called without GIL
+"""
_ERRORS = u"""
-18:40: Cannot assign type 'long' to 'wrapped_int'
+18:40: Cannot assign type 'long' to 'const wrapped_int'
"""
# mode: error
+# cython: auto_pickle=False
ctypedef int[1] int_array
ctypedef int[2] int_array2
_ERRORS = u"""
-20:0: Assignment to slice of wrong length, expected 2, got 1
-21:0: Assignment to slice of wrong length, expected 1, got 2
+21:0: Assignment to slice of wrong length, expected 2, got 1
+22:0: Assignment to slice of wrong length, expected 1, got 2
"""
with cython.parallel.parallel():
pass
+
_ERRORS = u"""
3:8: cython.parallel.parallel is not a module
4:0: No such directive: cython.parallel.something
# mode: error
-# cython: nonexistant = True
+# cython: nonexistent = True
# cython: boundscheck = true
# cython: boundscheck = 9
pass
# invalid
+with nogil, parallel(num_threads=None, num_threads=None):
+ pass
+
with nogil, parallel(num_threads=0):
pass
with nogil, parallel(num_threads=i):
pass
+with nogil, parallel(num_threads=2, num_threads=2):
+ pass
+
with nogil, parallel(num_threads=2):
for i in prange(10, num_threads=2):
pass
pass
_ERRORS = u"""
-e_invalid_num_threads.pyx:12:20: argument to num_threads must be greater than 0
-e_invalid_num_threads.pyx:19:19: num_threads already declared in outer section
-e_invalid_num_threads.pyx:23:19: num_threads must be declared in the parent parallel section
+12:20: Duplicate keyword argument found: num_threads
+15:20: argument to num_threads must be greater than 0
+21:20: Duplicate keyword argument found: num_threads
+25:19: num_threads already declared in outer section
+29:19: num_threads must be declared in the parent parallel section
"""
_ERRORS = u"""
4:4: no binding for nonlocal 'no_such_name' found
+10:8: Previous declaration is here
11:8: 'x' redeclared as nonlocal
16:4: no binding for nonlocal 'global_name' found
+27:8: Previous declaration is here
28:8: 'x' redeclared as nonlocal
"""
pass
_ERRORS = """
-9:5: Base class 'FinalClass' of type 'SubType' is final
+9:19: Base class 'FinalClass' of type 'SubType' is final
"""
>>> writable(R)
acquired R
released R
- >>> [str(x) for x in R.recieved_flags] # Py2/3
+ >>> [str(x) for x in R.received_flags] # Py2/3
['FORMAT', 'ND', 'STRIDES', 'WRITABLE']
"""
buf = mslice
def c_contig_2d(int[:, ::1] mslice):
"""
- Multi-dim has seperate implementation
+ Multi-dim has separate implementation
>>> A = IntMockBuffer(None, range(12), shape=(3,4))
>>> c_contig_2d(A)
cdef char[:] aview = a
return max(<char>1, aview[0]), min(<char>5, aview[2])
+
+
+@cython.test_fail_if_path_exists(
+ '//MemoryViewSliceNode',
+)
+@cython.test_assert_path_exists(
+ '//MemoryViewIndexNode',
+)
+#@cython.boundscheck(False) # reduce C code clutter
+def optimised_index_of_slice(int[:,:,:] arr, int x, int y, int z):
+ """
+ >>> arr = IntMockBuffer("A", list(range(10*10*10)), shape=(10,10,10))
+ >>> optimised_index_of_slice(arr, 2, 3, 4)
+ acquired A
+ (123, 123)
+ (223, 223)
+ (133, 133)
+ (124, 124)
+ (234, 234)
+ (123, 123)
+ (123, 123)
+ (123, 123)
+ (134, 134)
+ (134, 134)
+ (234, 234)
+ (234, 234)
+ (234, 234)
+ released A
+ """
+ print(arr[1, 2, 3], arr[1][2][3])
+ print(arr[x, 2, 3], arr[x][2][3])
+ print(arr[1, y, 3], arr[1][y][3])
+ print(arr[1, 2, z], arr[1][2][z])
+ print(arr[x, y, z], arr[x][y][z])
+
+ print(arr[1, 2, 3], arr[:, 2][1][3])
+ print(arr[1, 2, 3], arr[:, 2, :][1, 3])
+ print(arr[1, 2, 3], arr[:, 2, 3][1])
+ print(arr[1, y, z], arr[1, :][y][z])
+ print(arr[1, y, z], arr[1, :][y, z])
+
+ print(arr[x, y, z], arr[x][:][:][y][:][:][z])
+ print(arr[x, y, z], arr[:][x][:][y][:][:][z])
+ print(arr[x, y, z], arr[:, :][x][:, :][y][:][z])
@testcase
def tuple_buffer_assignment1(a, b):
"""
- >>> A = IntMockBuffer("A", range(6))
- >>> B = IntMockBuffer("B", range(6))
+ >>> A = IntMockBuffer("A", range(6)) # , writable=False)
+ >>> B = IntMockBuffer("B", range(6)) # , writable=False)
>>> tuple_buffer_assignment1(A, B)
acquired A
acquired B
@testcase
def tuple_buffer_assignment2(tup):
"""
- >>> A = IntMockBuffer("A", range(6))
- >>> B = IntMockBuffer("B", range(6))
+ >>> A = IntMockBuffer("A", range(6)) # , writable=False)
+ >>> B = IntMockBuffer("B", range(6)) # , writable=False)
>>> tuple_buffer_assignment2((A, B))
acquired A
acquired B
released A
After release
"""
- cdef int[:] x = IntMockBuffer("A", range(10))
+ cdef int[:] x = IntMockBuffer("A", range(10)) # , writable=False)
del x
print "After release"
def get_int_2d_uintindex(int[:, :] buf, unsigned int i, unsigned int j):
"""
Unsigned indexing:
- >>> C = IntMockBuffer("C", range(6), (2,3))
+ >>> C = IntMockBuffer("C", range(6), (2,3)) # , writable=False)
>>> get_int_2d_uintindex(C, 0, 0)
acquired C
released C
...
IndexError: Out of bounds on buffer access (axis 1)
+ >>> C = IntMockBuffer("C", range(6), (2,3), writable=False)
+ >>> set_int_2d(C, -2, -3, 9)
+ Traceback (most recent call last):
+ BufferError: Writable buffer requested from read-only mock: FORMAT | ND | STRIDES | WRITABLE
"""
buf[i, j] = value
@testcase
def list_comprehension(int[:] buf, len):
"""
- >>> list_comprehension(IntMockBuffer(None, [1,2,3]), 3)
+ >>> list_comprehension(IntMockBuffer(None, [1,2,3]), 3) # , writable=False), 3)
1|2|3
"""
cdef int i
- print u"|".join([unicode(buf[i]) for i in range(len)])
+ print "|".join([str(buf[i]) for i in range(len)])
@testcase
@cython.wraparound(False)
"""
Again, the most interesting thing here is to inspect the C source.
- >>> A = IntMockBuffer(None, range(4))
+ >>> A = IntMockBuffer(None, range(4)) # , writable=False)
>>> wraparound_directive(A, 2, -1)
5
>>> wraparound_directive(A, -1, 2)
>>> writable(R)
acquired R
released R
- >>> [str(x) for x in R.recieved_flags] # Py2/3
+ >>> [str(x) for x in R.received_flags] # Py2/3
['FORMAT', 'ND', 'STRIDES', 'WRITABLE']
"""
cdef unsigned short int[:, :, :] buf = obj
buf[2, 2, 1] = 23
@testcase
-def strided(int[:] buf):
+def strided(const int[:] buf):
"""
- >>> A = IntMockBuffer("A", range(4))
+ >>> A = IntMockBuffer("A", range(4), writable=False)
>>> strided(A)
acquired A
released A
2
- >>> [str(x) for x in A.recieved_flags] # Py2/3
- ['FORMAT', 'ND', 'STRIDES', 'WRITABLE']
+ >>> [str(x) for x in A.received_flags] # Py2/3
+ ['FORMAT', 'ND', 'STRIDES']
Check that the suboffsets were patched back prior to release.
>>> A.release_ok
return buf[2]
@testcase
-def c_contig(int[::1] buf):
+def c_contig(const int[::1] buf):
"""
- >>> A = IntMockBuffer(None, range(4))
+ >>> A = IntMockBuffer(None, range(4), writable=False)
>>> c_contig(A)
2
- >>> [str(x) for x in A.recieved_flags]
- ['FORMAT', 'ND', 'STRIDES', 'C_CONTIGUOUS', 'WRITABLE']
+ >>> [str(x) for x in A.received_flags]
+ ['FORMAT', 'ND', 'STRIDES', 'C_CONTIGUOUS']
"""
return buf[2]
@testcase
def c_contig_2d(int[:, ::1] buf):
"""
- Multi-dim has seperate implementation
+ Multi-dim has separate implementation
- >>> A = IntMockBuffer(None, range(12), shape=(3,4))
+ >>> A = IntMockBuffer(None, range(12), shape=(3,4)) # , writable=False)
>>> c_contig_2d(A)
7
- >>> [str(x) for x in A.recieved_flags]
+ >>> [str(x) for x in A.received_flags]
['FORMAT', 'ND', 'STRIDES', 'C_CONTIGUOUS', 'WRITABLE']
"""
return buf[1, 3]
@testcase
def f_contig(int[::1, :] buf):
"""
- >>> A = IntMockBuffer(None, range(4), shape=(2, 2), strides=(1, 2))
+ >>> A = IntMockBuffer(None, range(4), shape=(2, 2), strides=(1, 2)) # , writable=False)
>>> f_contig(A)
2
- >>> [str(x) for x in A.recieved_flags]
+ >>> [str(x) for x in A.received_flags]
['FORMAT', 'ND', 'STRIDES', 'F_CONTIGUOUS', 'WRITABLE']
"""
return buf[0, 1]
"""
Must set up strides manually to ensure Fortran ordering.
- >>> A = IntMockBuffer(None, range(12), shape=(4,3), strides=(1, 4))
+ >>> A = IntMockBuffer(None, range(12), shape=(4,3), strides=(1, 4)) # , writable=False)
>>> f_contig_2d(A)
7
- >>> [str(x) for x in A.recieved_flags]
+ >>> [str(x) for x in A.received_flags]
['FORMAT', 'ND', 'STRIDES', 'F_CONTIGUOUS', 'WRITABLE']
"""
return buf[3, 1]
11
released A
released B
- >>> [str(x) for x in A.recieved_flags]
+ >>> [str(x) for x in A.received_flags]
['FORMAT', 'INDIRECT', 'ND', 'STRIDES', 'WRITABLE']
- >>> [str(x) for x in B.recieved_flags]
+ >>> [str(x) for x in B.received_flags]
['FORMAT', 'INDIRECT', 'ND', 'STRIDES', 'WRITABLE']
"""
print buf1[1, 1]
# 11
# released A
# released B
-# >>> [str(x) for x in A.recieved_flags]
+# >>> [str(x) for x in A.received_flags]
# ['FORMAT', 'INDIRECT', 'ND', 'STRIDES', 'WRITABLE']
-# >>> [str(x) for x in B.recieved_flags]
+# >>> [str(x) for x in B.received_flags]
# ['FORMAT', 'INDIRECT', 'ND', 'STRIDES', 'WRITABLE']
# """
# print buf1[1, 1]
11
released A
released B
- >>> [str(x) for x in A.recieved_flags]
+ >>> [str(x) for x in A.received_flags]
['FORMAT', 'INDIRECT', 'ND', 'STRIDES', 'WRITABLE']
- >>> [str(x) for x in B.recieved_flags]
+ >>> [str(x) for x in B.received_flags]
['FORMAT', 'INDIRECT', 'ND', 'STRIDES', 'WRITABLE']
"""
print buf1[1, 1]
11
released A
released B
- >>> [str(x) for x in A.recieved_flags]
+ >>> [str(x) for x in A.received_flags]
['FORMAT', 'INDIRECT', 'ND', 'STRIDES', 'WRITABLE']
- >>> [str(x) for x in B.recieved_flags]
+ >>> [str(x) for x in B.received_flags]
['FORMAT', 'INDIRECT', 'ND', 'STRIDES', 'WRITABLE']
"""
print buf1[1, 1]
@testcase
def safe_get(int[:] buf, int idx):
"""
- >>> A = IntMockBuffer(None, range(10), shape=(3,), offset=5)
+ >>> A = IntMockBuffer(None, range(10), shape=(3,), offset=5) # , writable=False)
Validate our testing buffer...
>>> safe_get(A, 0)
def unsafe_get(int[:] buf, int idx):
"""
Access outside of the area the buffer publishes.
- >>> A = IntMockBuffer(None, range(10), shape=(3,), offset=5)
+ >>> A = IntMockBuffer(None, range(10), shape=(3,), offset=5) # , writable=False)
>>> unsafe_get(A, -4)
4
>>> unsafe_get(A, -5)
@testcase
def mixed_get(int[:] buf, int unsafe_idx, int safe_idx):
"""
- >>> A = IntMockBuffer(None, range(10), shape=(3,), offset=5)
+ >>> A = IntMockBuffer(None, range(10), shape=(3,), offset=5) # , writable=False)
>>> mixed_get(A, -4, 0)
(4, 5)
>>> mixed_get(A, 0, -4)
"""
Strided:
- >>> printbuf_int_2d(IntMockBuffer("A", range(6), (2,3)), (2,3))
+ >>> printbuf_int_2d(IntMockBuffer("A", range(6), (2,3), writable=False), (2,3))
acquired A
0 1 2 END
3 4 5 END
released A
- >>> printbuf_int_2d(IntMockBuffer("A", range(100), (3,3), strides=(20,5)), (3,3))
+ >>> printbuf_int_2d(IntMockBuffer("A", range(100), (3,3), strides=(20,5), writable=False), (3,3))
acquired A
0 5 10 END
20 25 30 END
released A
Indirect:
- >>> printbuf_int_2d(IntMockBuffer("A", [[1,2],[3,4]]), (2,2))
+ >>> printbuf_int_2d(IntMockBuffer("A", [[1,2],[3,4]], writable=False), (2,2))
acquired A
1 2 END
3 4 END
released A
"""
# should make shape builtin
- cdef int[::view.generic, ::view.generic] buf
+ cdef const int[::view.generic, ::view.generic] buf
buf = o
cdef int i, j
for i in range(shape[0]):
@testcase
def printbuf_float(o, shape):
"""
- >>> printbuf_float(FloatMockBuffer("F", [1.0, 1.25, 0.75, 1.0]), (4,))
+ >>> printbuf_float(FloatMockBuffer("F", [1.0, 1.25, 0.75, 1.0], writable=False), (4,))
acquired F
1.0 1.25 0.75 1.0 END
released F
"""
# should make shape builtin
- cdef float[:] buf
+ cdef const float[:] buf
buf = o
cdef int i, j
for i in range(shape[0]):
@testcase
def printbuf_td_cy_int(td_cy_int[:] buf, shape):
"""
- >>> printbuf_td_cy_int(IntMockBuffer(None, range(3)), (3,))
+ >>> printbuf_td_cy_int(IntMockBuffer(None, range(3)), (3,)) # , writable=False), (3,))
0 1 2 END
- >>> printbuf_td_cy_int(ShortMockBuffer(None, range(3)), (3,))
+ >>> printbuf_td_cy_int(ShortMockBuffer(None, range(3)), (3,)) # , writable=False), (3,))
Traceback (most recent call last):
...
ValueError: Buffer dtype mismatch, expected 'td_cy_int' but got 'short'
@testcase
def printbuf_td_h_short(td_h_short[:] buf, shape):
"""
- >>> printbuf_td_h_short(ShortMockBuffer(None, range(3)), (3,))
+ >>> printbuf_td_h_short(ShortMockBuffer(None, range(3)), (3,)) # , writable=False), (3,))
0 1 2 END
- >>> printbuf_td_h_short(IntMockBuffer(None, range(3)), (3,))
+ >>> printbuf_td_h_short(IntMockBuffer(None, range(3)), (3,)) # , writable=False), (3,))
Traceback (most recent call last):
...
ValueError: Buffer dtype mismatch, expected 'td_h_short' but got 'int'
print 'END'
@testcase
-def printbuf_td_h_cy_short(td_h_cy_short[:] buf, shape):
+def printbuf_td_h_cy_short(const td_h_cy_short[:] buf, shape):
"""
- >>> printbuf_td_h_cy_short(ShortMockBuffer(None, range(3)), (3,))
+ >>> printbuf_td_h_cy_short(ShortMockBuffer(None, range(3), writable=False), (3,))
0 1 2 END
- >>> printbuf_td_h_cy_short(IntMockBuffer(None, range(3)), (3,))
+ >>> printbuf_td_h_cy_short(IntMockBuffer(None, range(3), writable=False), (3,))
Traceback (most recent call last):
...
- ValueError: Buffer dtype mismatch, expected 'td_h_cy_short' but got 'int'
+ ValueError: Buffer dtype mismatch, expected 'const td_h_cy_short' but got 'int'
"""
cdef int i
for i in range(shape[0]):
print 'END'
@testcase
-def printbuf_td_h_ushort(td_h_ushort[:] buf, shape):
+def printbuf_td_h_ushort(const td_h_ushort[:] buf, shape):
"""
- >>> printbuf_td_h_ushort(UnsignedShortMockBuffer(None, range(3)), (3,))
+ >>> printbuf_td_h_ushort(UnsignedShortMockBuffer(None, range(3), writable=False), (3,))
0 1 2 END
- >>> printbuf_td_h_ushort(ShortMockBuffer(None, range(3)), (3,))
+ >>> printbuf_td_h_ushort(ShortMockBuffer(None, range(3), writable=False), (3,))
Traceback (most recent call last):
...
- ValueError: Buffer dtype mismatch, expected 'td_h_ushort' but got 'short'
+ ValueError: Buffer dtype mismatch, expected 'const td_h_ushort' but got 'short'
"""
cdef int i
for i in range(shape[0]):
print 'END'
@testcase
-def printbuf_td_h_double(td_h_double[:] buf, shape):
+def printbuf_td_h_double(const td_h_double[:] buf, shape):
"""
- >>> printbuf_td_h_double(DoubleMockBuffer(None, [0.25, 1, 3.125]), (3,))
+ >>> printbuf_td_h_double(DoubleMockBuffer(None, [0.25, 1, 3.125], writable=False), (3,))
0.25 1.0 3.125 END
- >>> printbuf_td_h_double(FloatMockBuffer(None, [0.25, 1, 3.125]), (3,))
+ >>> printbuf_td_h_double(FloatMockBuffer(None, [0.25, 1, 3.125], writable=False), (3,))
Traceback (most recent call last):
...
- ValueError: Buffer dtype mismatch, expected 'td_h_double' but got 'float'
+ ValueError: Buffer dtype mismatch, expected 'const td_h_double' but got 'float'
"""
cdef int i
for i in range(shape[0]):
>>> a, b, c = "globally_unique_string_23234123", {4:23}, [34,3]
>>> get_refcount(a), get_refcount(b), get_refcount(c)
(2, 2, 2)
- >>> A = ObjectMockBuffer(None, [a, b, c])
+ >>> A = ObjectMockBuffer(None, [a, b, c]) # , writable=False)
>>> printbuf_object(A, (3,))
'globally_unique_string_23234123' 2
{4: 23} 2
"strided" by defaults which should show
up in the flags.
- >>> A = IntStridedMockBuffer("A", range(10))
+ >>> A = IntStridedMockBuffer("A", range(10)) # , writable=False)
>>> bufdefaults1(A)
acquired A
released A
- >>> [str(x) for x in A.recieved_flags]
+ >>> [str(x) for x in A.received_flags]
['FORMAT', 'ND', 'STRIDES', 'WRITABLE']
"""
pass
"""
See also buffmt.pyx
- >>> basic_struct(MyStructMockBuffer(None, [(1, 2, 3, 4, 5)]))
+ >>> basic_struct(MyStructMockBuffer(None, [(1, 2, 3, 4, 5)])) # , writable=False))
1 2 3 4 5
- >>> basic_struct(MyStructMockBuffer(None, [(1, 2, 3, 4, 5)], format="ccqii"))
+ >>> basic_struct(MyStructMockBuffer(None, [(1, 2, 3, 4, 5)], format="ccqii")) # , writable=False))
1 2 3 4 5
"""
print buf[0].a, buf[0].b, buf[0].c, buf[0].d, buf[0].e
"""
See also buffmt.pyx
- >>> nested_struct(NestedStructMockBuffer(None, [(1, 2, 3, 4, 5)]))
+ >>> nested_struct(NestedStructMockBuffer(None, [(1, 2, 3, 4, 5)])) # , writable=False))
1 2 3 4 5
- >>> nested_struct(NestedStructMockBuffer(None, [(1, 2, 3, 4, 5)], format="T{ii}T{2i}i"))
+ >>> nested_struct(NestedStructMockBuffer(None, [(1, 2, 3, 4, 5)], format="T{ii}T{2i}i")) # , writable=False))
1 2 3 4 5
"""
print buf[0].x.a, buf[0].x.b, buf[0].y.a, buf[0].y.b, buf[0].z
"""
See also buffmt.pyx
- >>> packed_struct(PackedStructMockBuffer(None, [(1, 2)]))
+ >>> packed_struct(PackedStructMockBuffer(None, [(1, 2)])) # , writable=False))
1 2
- >>> packed_struct(PackedStructMockBuffer(None, [(1, 2)], format="T{c^i}"))
+ >>> packed_struct(PackedStructMockBuffer(None, [(1, 2)], format="T{c^i}")) # , writable=False))
1 2
- >>> packed_struct(PackedStructMockBuffer(None, [(1, 2)], format="T{c=i}"))
+ >>> packed_struct(PackedStructMockBuffer(None, [(1, 2)], format="T{c=i}")) # , writable=False))
1 2
"""
"""
See also buffmt.pyx
- >>> nested_packed_struct(NestedPackedStructMockBuffer(None, [(1, 2, 3, 4, 5)]))
+ >>> nested_packed_struct(NestedPackedStructMockBuffer(None, [(1, 2, 3, 4, 5)])) # , writable=False))
1 2 3 4 5
- >>> nested_packed_struct(NestedPackedStructMockBuffer(None, [(1, 2, 3, 4, 5)], format="ci^ci@i"))
+ >>> nested_packed_struct(NestedPackedStructMockBuffer(None, [(1, 2, 3, 4, 5)], format="ci^ci@i")) # , writable=False))
1 2 3 4 5
- >>> nested_packed_struct(NestedPackedStructMockBuffer(None, [(1, 2, 3, 4, 5)], format="^c@i^ci@i"))
+ >>> nested_packed_struct(NestedPackedStructMockBuffer(None, [(1, 2, 3, 4, 5)], format="^c@i^ci@i")) # , writable=False))
1 2 3 4 5
"""
print buf[0].a, buf[0].b, buf[0].sub.a, buf[0].sub.b, buf[0].c
@testcase
def complex_dtype(long double complex[:] buf):
"""
- >>> complex_dtype(LongComplexMockBuffer(None, [(0, -1)]))
+ >>> complex_dtype(LongComplexMockBuffer(None, [(0, -1)])) # , writable=False))
-1j
"""
print buf[0]
"""
Note that the format string is "Zg" rather than "2g", yet a struct
is accessed.
- >>> complex_struct_dtype(LongComplexMockBuffer(None, [(0, -1)]))
+ >>> complex_struct_dtype(LongComplexMockBuffer(None, [(0, -1)])) # , writable=False))
0.0 -1.0
"""
print buf[0].real, buf[0].imag
def test_generic_slicing(arg, indirect=False):
"""
Test simple slicing
- >>> test_generic_slicing(IntMockBuffer("A", range(8 * 14 * 11), shape=(8, 14, 11)))
+ >>> test_generic_slicing(IntMockBuffer("A", range(8 * 14 * 11), shape=(8, 14, 11))) # , writable=False))
acquired A
3 9 2
308 -11 1
released A
Test direct slicing, negative slice oob in dim 2
- >>> test_generic_slicing(IntMockBuffer("A", range(1 * 2 * 3), shape=(1, 2, 3)))
+ >>> test_generic_slicing(IntMockBuffer("A", range(1 * 2 * 3), shape=(1, 2, 3))) # , writable=False))
acquired A
0 0 2
12 -3 1
released A
Test indirect slicing
- >>> test_generic_slicing(IntMockBuffer("A", shape_5_3_4_list, shape=(5, 3, 4)), indirect=True)
+ >>> test_generic_slicing(IntMockBuffer("A", shape_5_3_4_list, shape=(5, 3, 4)), indirect=True) # , writable=False), indirect=True)
acquired A
2 0 2
0 1 -1
released A
- >>> test_generic_slicing(IntMockBuffer("A", shape_9_14_21_list, shape=(9, 14, 21)), indirect=True)
+ >>> test_generic_slicing(IntMockBuffer("A", shape_9_14_21_list, shape=(9, 14, 21)), indirect=True) # , writable=False), indirect=True)
acquired A
3 9 2
10 1 -1
def test_indirect_slicing(arg):
"""
Test indirect slicing
- >>> test_indirect_slicing(IntMockBuffer("A", shape_5_3_4_list, shape=(5, 3, 4)))
+ >>> test_indirect_slicing(IntMockBuffer("A", shape_5_3_4_list, shape=(5, 3, 4))) # , writable=False))
acquired A
5 3 2
0 0 -1
58
released A
- >>> test_indirect_slicing(IntMockBuffer("A", shape_9_14_21_list, shape=(9, 14, 21)))
+ >>> test_indirect_slicing(IntMockBuffer("A", shape_9_14_21_list, shape=(9, 14, 21))) # , writable=False))
acquired A
5 14 3
0 16 -1
Fused types would be convenient to test this stuff!
Test simple slicing
- >>> test_direct_slicing(IntMockBuffer("A", range(8 * 14 * 11), shape=(8, 14, 11)))
+ >>> test_direct_slicing(IntMockBuffer("A", range(8 * 14 * 11), shape=(8, 14, 11))) # , writable=False))
acquired A
3 9 2
308 -11 1
released A
Test direct slicing, negative slice oob in dim 2
- >>> test_direct_slicing(IntMockBuffer("A", range(1 * 2 * 3), shape=(1, 2, 3)))
+ >>> test_direct_slicing(IntMockBuffer("A", range(1 * 2 * 3), shape=(1, 2, 3))) # , writable=False))
acquired A
0 0 2
12 -3 1
@testcase
def test_slicing_and_indexing(arg):
"""
- >>> a = IntStridedMockBuffer("A", range(10 * 3 * 5), shape=(10, 3, 5))
+ >>> a = IntStridedMockBuffer("A", range(10 * 3 * 5), shape=(10, 3, 5)) # , writable=False)
>>> test_slicing_and_indexing(a)
acquired A
5 2
...
IndexError: Index out of bounds (axis 1)
"""
- cdef int[:, :] a = IntMockBuffer("A", range(4 * 9), shape=(4, 9))
+ cdef int[:, :] a = IntMockBuffer("A", range(4 * 9), shape=(4, 9)) # , writable=False)
print a[:, 20]
...
IndexError: Index out of bounds (axis 0)
"""
- cdef int[:, :] a = IntMockBuffer("A", range(4 * 9), shape=(4, 9))
+ cdef int[:, :] a = IntMockBuffer("A", range(4 * 9), shape=(4, 9)) # , writable=False)
with nogil:
a[100, 9:]
2
released A
"""
- cdef int[:] a = IntMockBuffer("A", range(10), shape=(10,))
+ cdef int[:] a = IntMockBuffer("A", range(10), shape=(10,)) # , writable=False)
with nogil:
a = a[2:4]
print a[0]
@cython.wraparound(False)
def test_memslice_prange(arg):
"""
- >>> test_memslice_prange(IntMockBuffer("A", range(400), shape=(20, 4, 5)))
+ >>> test_memslice_prange(IntMockBuffer("A", range(400), shape=(20, 4, 5))) # FIXME: , writable=False))
acquired A
released A
- >>> test_memslice_prange(IntMockBuffer("A", range(200), shape=(100, 2, 1)))
+ >>> test_memslice_prange(IntMockBuffer("A", range(200), shape=(100, 2, 1))) # FIXME: , writable=False))
acquired A
released A
"""
@testcase
def test_newaxis(int[:] one_D):
"""
- >>> A = IntMockBuffer("A", range(6))
+ >>> A = IntMockBuffer("A", range(6)) # , writable=False)
>>> test_newaxis(A)
acquired A
3
@testcase
def test_newaxis2(int[:, :] two_D):
"""
- >>> A = IntMockBuffer("A", range(6), shape=(3, 2))
+ >>> A = IntMockBuffer("A", range(6), shape=(3, 2)) # , writable=False)
>>> test_newaxis2(A)
acquired A
shape: 3 1 1
_print_attributes(d)
+@testcase
+def test_const_buffer(const int[:] a):
+ """
+ >>> A = IntMockBuffer("A", range(6), shape=(6,), writable=False)
+ >>> test_const_buffer(A)
+ acquired A
+ 0
+ 5
+ released A
+ """
+ cdef const int[:] c = a
+ print(a[0])
+ print(c[-1])
__test__[f.__name__] = f.__doc__
return f
+
+def gc_collect_if_required():
+ major, minor, *rest = np.__version__.split('.')
+ if (int(major), int(minor)) >= (1, 14):
+ import gc
+ gc.collect()
+
+
#
### Test slicing memoryview slices
#
deallocating...
12.2
deallocating...
- 13.3
+ 13.25
deallocating...
(14.4+15.5j)
deallocating...
- (16.6+17.7j)
+ (16.5+17.7j)
deallocating...
- (18.8+19.9j)
+ (18.8125+19.9375j)
deallocating...
22
deallocating...
floats[idx] = 11.1
doubles[idx] = 12.2
- longdoubles[idx] = 13.3
+ longdoubles[idx] = 13.25
floatcomplex[idx] = 14.4 + 15.5j
- doublecomplex[idx] = 16.6 + 17.7j
- longdoublecomplex[idx] = 18.8 + 19.9j
+ doublecomplex[idx] = 16.5 + 17.7j
+ longdoublecomplex[idx] = 18.8125 + 19.9375j # x/64 to avoid float format rounding issues
h_shorts[idx] = 22
h_doubles[idx] = 33.33
@testcase_numpy_1_5
def test_memslice_getbuffer():
"""
- >>> test_memslice_getbuffer()
+ >>> test_memslice_getbuffer(); gc_collect_if_required()
[[ 0 2 4]
[10 12 14]]
callback called
"""
cdef int[:, :] array = create_array((4, 5), mode="c", use_callback=True)
- print np.asarray(array)[::2, ::2]
+ print(np.asarray(array)[::2, ::2])
cdef class DeallocateMe(object):
def __dealloc__(self):
--- /dev/null
+# mode: run
+# tag: readonly, const, numpy
+
+import numpy as np
+
+def new_array():
+ return np.arange(10).astype('float')
+
+ARRAY = new_array()
+
+
+cdef getmax(const double[:] x):
+ """Example code, should work with both ro and rw memoryviews"""
+ cdef double max_val = -float('inf')
+ for val in x:
+ if val > max_val:
+ max_val = val
+ return max_val
+
+
+cdef update_array(double [:] x):
+ """Modifying a ro memoryview should raise an error"""
+ x[0] = 23.
+
+
+cdef getconst(const double [:] x):
+ """Should accept ro memoryviews"""
+ return x[0]
+
+
+def test_mmview_rw(x):
+ """
+ >>> test_mmview_rw(ARRAY)
+ 9.0
+ """
+ return getmax(x)
+
+
+def test_mmview_ro(x):
+ """
+ >>> test_mmview_ro(new_array())
+ 9.0
+ """
+ x.setflags(write=False)
+ assert x.flags.writeable is False
+ return getmax(x)
+
+
+def test_update_mmview_rw(x):
+ """
+ >>> test_update_mmview_rw(new_array())
+ 23.0
+ """
+ update_array(x)
+ return x[0]
+
+
+def test_update_mmview_ro(x):
+ """
+ >>> test_update_mmview_ro(new_array())
+ 0.0
+ """
+ x.setflags(write=False)
+ assert x.flags.writeable is False
+ try:
+ update_array(x)
+ except ValueError: pass
+ else:
+ assert False, "RO error not raised!"
+ return getconst(x)
+
+
+def test_rw_call_getmax(double[:] x):
+ """
+ >>> test_rw_call_getmax(new_array())
+ 23.0
+ """
+ update_array(x)
+ assert getconst(x) == 23
+ return getmax(x)
+
+
+def test_const_mmview_ro(x):
+ """
+ >>> test_const_mmview_ro(new_array())
+ 0.0
+ """
+ x.setflags(write=False)
+ assert x.flags.writeable is False
+ return getconst(x)
+
+
+def test_two_views(x):
+ """
+ >>> test_two_views(new_array())
+ 23.0
+ """
+ cdef double[:] rw = x
+ cdef const double[:] ro = rw
+ rw[0] = 23
+ return ro[0]
+
+
+def test_assign_ro_to_rw(x):
+ """
+ >>> test_assign_ro_to_rw(new_array())
+ 2.0
+ """
+ cdef const double[:] ro = x
+ cdef double[:] rw = np.empty_like(ro)
+ rw[:] = ro
+ return rw[2]
def ctypes_def(a: list, b: cython.int = 2, c: cython.long = 3, d: cython.float = 4) -> list:
"""
- >>> pytypes_def([1])
- ('list object', 'Python object', 'Python object', 'double')
+ >>> ctypes_def([1])
+ ('list object', 'int', 'long', 'float')
[1, 2, 3, 4.0]
- >>> pytypes_def([1], 3)
- ('list object', 'Python object', 'Python object', 'double')
+ >>> ctypes_def([1], 3)
+ ('list object', 'int', 'long', 'float')
[1, 3, 3, 4.0]
- >>> pytypes_def(123)
+ >>> ctypes_def(123)
Traceback (most recent call last):
TypeError: Argument 'a' has incorrect type (expected list, got int)
"""
pass
+def py_float_default(price : float=None, ndigits=4):
+ """
+ Python default arguments should prevent C type inference.
+
+ >>> py_float_default()
+ (None, 4)
+ >>> py_float_default(2)
+ (2, 4)
+ >>> py_float_default(2.0)
+ (2.0, 4)
+ >>> py_float_default(2, 3)
+ (2, 3)
+ """
+ return price, ndigits
+
+
_WARNINGS = """
8:32: Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.
8:47: Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.
8:85: Python type declaration in signature annotation does not refer to a Python type
8:85: Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.
211:44: Unknown type declaration in annotation, ignoring
+218:29: Ambiguous types in annotation, ignoring
# BUG:
46:6: 'pytypes_cpdef' redeclared
121:0: 'struct_io' redeclared
-/* A set of mutually incompatable return types. */
+/* A set of mutually incompatible return types. */
struct short_return { char *msg; };
struct int_return { char *msg; };
result = loop.run_until_complete(task())
assert 3 == result, result
-runloop(from_asyncio_import.wait3)
+import sys
+if sys.version_info < (3, 7):
+ runloop(from_asyncio_import.wait3)
######## test_import.py ########
result = loop.run_until_complete(task())
assert 3 == result, result
-runloop(import_asyncio.wait3)
+import sys
+if sys.version_info < (3, 7):
+ runloop(import_asyncio.wait3)
######## test_async_def.py ########
import asyncio
ASYNCIO_SUPPORTS_COROUTINE = sys.version_info[:2] >= (3, 5)
+ASYNCIO_SUPPORTS_YIELD_FROM = sys.version_info[:2] < (3, 7)
def runloop(task):
loop = asyncio.get_event_loop()
assert 3 == result, result
import import_asyncio
-runloop(import_asyncio.wait3) # 1a)
+if ASYNCIO_SUPPORTS_YIELD_FROM:
+ runloop(import_asyncio.wait3) # 1a)
import from_asyncio_import
-runloop(from_asyncio_import.wait3) # 1b)
+if ASYNCIO_SUPPORTS_YIELD_FROM:
+ runloop(from_asyncio_import.wait3) # 1b)
import async_def
if ASYNCIO_SUPPORTS_COROUTINE:
- runloop(async_def.wait3) # 1c)
+ runloop(async_def.wait3) # 1c)
-runloop(from_asyncio_import.wait3) # 2a)
-runloop(import_asyncio.wait3) # 2b)
+if ASYNCIO_SUPPORTS_YIELD_FROM:
+ runloop(from_asyncio_import.wait3) # 2a)
+ runloop(import_asyncio.wait3) # 2b)
if ASYNCIO_SUPPORTS_COROUTINE:
- runloop(async_def.wait3) # 2c)
+ runloop(async_def.wait3) # 2c)
-runloop(from_asyncio_import.wait3) # 3a)
-runloop(import_asyncio.wait3) # 3b)
+import sys
+if ASYNCIO_SUPPORTS_YIELD_FROM:
+ runloop(from_asyncio_import.wait3) # 3a)
+ runloop(import_asyncio.wait3) # 3b)
if ASYNCIO_SUPPORTS_COROUTINE:
- runloop(async_def.wait3) # 3c)
+ runloop(async_def.wait3) # 3c)
try:
from collections.abc import Generator
"""
return b[:2]
+
+def infer_concatenation_types(bytearray b):
+ """
+ >>> b = bytearray(b'a\\xFEc')
+ >>> b2, c, d, e, tb, tc, td, te = infer_concatenation_types(b)
+ >>> tb, tc, td, te
+ ('bytearray object', 'bytearray object', 'bytearray object', 'bytearray object')
+ >>> b2, c, d, e
+ (bytearray(b'a\\xfec'), bytearray(b'a\\xfeca\\xfec'), bytearray(b'a\\xfeca\\xfec'), bytearray(b'a\\xfeca\\xfec'))
+ """
+ c = b[:]
+ c += b[:]
+
+ d = b[:]
+ d *= 2
+
+ e = b + b
+
+ return b, c, d, e, cython.typeof(b), cython.typeof(c), cython.typeof(d), cython.typeof(e)
+
+
def infer_index_types(bytearray b):
"""
>>> b = bytearray(b'a\\xFEc')
e = b[1]
return c, d, e, cython.typeof(c), cython.typeof(d), cython.typeof(e), cython.typeof(b[1])
+
def infer_slice_types(bytearray b):
"""
>>> b = bytearray(b'abc')
>>> print(infer_slice_types(b))
- (bytearray(b'bc'), bytearray(b'bc'), bytearray(b'bc'), 'Python object', 'Python object', 'Python object', 'bytearray object')
+ (bytearray(b'bc'), bytearray(b'bc'), bytearray(b'bc'), 'bytearray object', 'bytearray object', 'bytearray object', 'bytearray object')
"""
c = b[1:]
with cython.boundscheck(False):
e = b[1:]
return c, d, e, cython.typeof(c), cython.typeof(d), cython.typeof(e), cython.typeof(b[1:])
+
def assign_to_index(bytearray b, value):
"""
>>> b = bytearray(b'0abcdefg')
b.append(i)
b.append(o)
return b
+
+
+cdef class BytearraySubtype(bytearray):
+ """
+ >>> b = BytearraySubtype(b'abc')
+ >>> b._append(ord('x'))
+ >>> b.append(ord('y'))
+ >>> print(b.decode('ascii'))
+ abcxy
+ """
+ def _append(self, x):
+ self.append(x)
--- /dev/null
+cdef class CBase(object):
+ cdef int a
+ cdef c_method(self):
+ return "CBase"
+ cpdef cpdef_method(self):
+ return "CBase"
+
+class PyBase(object):
+ def py_method(self):
+ return "PyBase"
+
+cdef class Both(CBase, PyBase):
+ cdef dict __dict__
+ """
+ >>> b = Both()
+ >>> b.py_method()
+ 'PyBase'
+ >>> b.cp_method()
+ 'Both'
+ >>> b.call_c_method()
+ 'Both'
+
+ >>> isinstance(b, CBase)
+ True
+ >>> isinstance(b, PyBase)
+ True
+ """
+ cdef c_method(self):
+ return "Both"
+ cpdef cp_method(self):
+ return "Both"
+ def call_c_method(self):
+ return self.c_method()
+
+cdef class BothSub(Both):
+ """
+ >>> b = BothSub()
+ >>> b.py_method()
+ 'PyBase'
+ >>> b.cp_method()
+ 'Both'
+ >>> b.call_c_method()
+ 'Both'
+ """
+ pass
--- /dev/null
+PYTHON setup.py build_ext --inplace
+PYTHON -c "import runner"
+
+######## setup.py ########
+
+from Cython.Build.Dependencies import cythonize
+from distutils.core import setup
+
+setup(ext_modules=cythonize("*.pyx"))
+
+######## notheaptype.pyx ########
+
+cdef class Base:
+ pass
+
+Obj = type(object())
+
+cdef class Foo(Base, Obj):
+ pass
+
+######## wrongbase.pyx ########
+
+cdef class Base:
+ pass
+
+Str = type("")
+
+cdef class X(Base, Str):
+ pass
+
+######## badmro.pyx ########
+
+class Py(object):
+ pass
+
+cdef class X(object, Py):
+ pass
+
+######## nodict.pyx ########
+
+cdef class Base:
+ pass
+
+class Py(object):
+ pass
+
+cdef class X(Base, Py):
+ pass
+
+######## oldstyle.pyx ########
+
+cdef class Base:
+ cdef dict __dict__
+
+class OldStyle:
+ pass
+
+cdef class Foo(Base, OldStyle):
+ pass
+
+######## runner.py ########
+
+import sys
+
+try:
+ import notheaptype
+ assert False
+except TypeError as msg:
+ assert str(msg) == "base class 'object' is not a heap type"
+
+try:
+ import wrongbase
+ assert False
+except TypeError as msg:
+ assert str(msg) == "best base 'str' must be equal to first base 'wrongbase.Base'"
+
+try:
+ import badmro
+ assert False
+except TypeError as msg:
+ assert str(msg).startswith("Cannot create a consistent method resolution")
+
+try:
+ import nodict
+ assert False
+except TypeError as msg:
+ assert str(msg) == "extension type 'nodict.X' has no __dict__ slot, but base type 'Py' has: either add 'cdef dict __dict__' to the extension type or add '__slots__ = [...]' to the base type"
+
+try:
+ # This should work on Python 3 but fail on Python 2
+ import oldstyle
+ assert sys.version_info[0] >= 3
+except TypeError as msg:
+ assert str(msg) == "base class 'OldStyle' is an old-style class"
--- /dev/null
+# Copied from cdef_multiple_inheritance.pyx
+# but with __slots__ and without __dict__
+
+cdef class CBase(object):
+ cdef int a
+ cdef c_method(self):
+ return "CBase"
+ cpdef cpdef_method(self):
+ return "CBase"
+
+class PyBase(object):
+ __slots__ = []
+ def py_method(self):
+ return "PyBase"
+
+cdef class Both(CBase, PyBase):
+ """
+ >>> b = Both()
+ >>> b.py_method()
+ 'PyBase'
+ >>> b.cp_method()
+ 'Both'
+ >>> b.call_c_method()
+ 'Both'
+
+ >>> isinstance(b, CBase)
+ True
+ >>> isinstance(b, PyBase)
+ True
+ """
+ cdef c_method(self):
+ return "Both"
+ cpdef cp_method(self):
+ return "Both"
+ def call_c_method(self):
+ return self.c_method()
+
+cdef class BothSub(Both):
+ """
+ >>> b = BothSub()
+ >>> b.py_method()
+ 'PyBase'
+ >>> b.cp_method()
+ 'Both'
+ >>> b.call_c_method()
+ 'Both'
+ """
+ pass
--- /dev/null
+# mode: run
+# tag: pickle
+
+PYTHON main.py build_ext -i
+
+######################### lib/__init__.py #########################
+
+######################### lib/cy.pyx #########################
+# cython: binding=True
+
+cdef class WithoutC:
+ def hello(self):
+ return "Hello, World"
+
+cdef class WithCPDef:
+ cpdef str hello(self):
+ return "Hello, World"
+
+cdef class WithCDefWrapper:
+ def hello(self):
+ return _helloC(self)
+
+cpdef _helloC(object caller):
+ return "Hello, World"
+
+
+######################### lib/cy.pxd #########################
+# cython:language_level=3
+
+cdef class WithoutCPDef:
+ pass
+
+cdef class WithCPDef:
+ cpdef str hello(self)
+
+cdef class WithCDefWrapper:
+ pass
+
+cpdef _helloC(object caller)
+
+
+######################### main.py #########################
+#!/usr/bin/env python3
+
+from Cython.Build import cythonize
+from distutils.core import setup
+
+setup(
+ ext_modules = cythonize(["lib/*.pyx"]),
+)
+
+import pickle as pkl
+import os
+from lib.cy import WithoutC, WithCPDef, WithCDefWrapper
+
+def tryThis(obj):
+ print("Pickling %s ..." % obj.__class__.__name__)
+ try:
+ pkl.dump(obj, open("test.pkl", "wb"))
+ print("\t... OK")
+ except Exception as e:
+ print("\t... KO: %s" % str(e))
+
+try:
+ for t in WithoutC(), WithCPDef(), WithCDefWrapper():
+ tryThis(t)
+finally:
+ if os.path.exists("test.pkl"):
+ os.remove("test.pkl")
-# tag: cpp
+# mode: run
+# tag: cpp, warnings
-# This gives a warning, but should not give an error.
+# This gives a warning about the previous .pxd definition, but should not give an error.
cdef cppclass Foo:
int _foo
int get_foo():
return foo.get_foo()
finally:
del foo
+
+
+_WARNINGS = """
+5:5: 'Foo' already defined (ignoring second definition)
+"""
# mode: run
# tag: cpp, werror
+from libcpp.deque cimport deque
from libcpp.vector cimport vector
from cython.operator cimport dereference as deref
>>> test_vector([1, 2, 3])
[1, 2, 3]
"""
- cdef vector[int] v = py_v
+ cdef vector[int] vint = py_v
cdef vector[int] result
with nogil:
- for item in v:
+ for item in vint:
result.push_back(item)
return result
+def test_deque_iterator_subtraction(py_v):
+ """
+ >>> test_deque_iterator_subtraction([1, 2, 3])
+ 3
+ """
+ cdef deque[int] dint
+ for i in py_v:
+ dint.push_back(i)
+ cdef deque[int].iterator first = dint.begin()
+ cdef deque[int].iterator last = dint.end()
+
+ return last - first
+
+def test_vector_iterator_subtraction(py_v):
+ """
+ >>> test_vector_iterator_subtraction([1, 2, 3])
+ 3
+ """
+ cdef vector[int] vint = py_v
+ cdef vector[int].iterator first = vint.begin()
+ cdef vector[int].iterator last = vint.end()
+
+ return last - first
+
+def test_deque_iterator_addition(py_v):
+ """
+ >>> test_deque_iterator_addition([2, 4, 6])
+ 6
+ """
+ cdef deque[int] dint
+ for i in py_v:
+ dint.push_back(i)
+ cdef deque[int].iterator first = dint.begin()
+
+ return deref(first+2)
+
+def test_vector_iterator_addition(py_v):
+ """
+ >>> test_vector_iterator_addition([2, 4, 6])
+ 6
+ """
+ cdef vector[int] vint = py_v
+ cdef vector[int].iterator first = vint.begin()
+
+ return deref(first+2)
+
def test_ptrs():
"""
>>> test_ptrs()
cdef out(s, result_type=None):
print '%s [%s]' % (s.decode('ascii'), result_type)
-cdef extern from "cpp_operators_helper.h":
+cdef extern from "cpp_operators_helper.h" nogil:
cdef cppclass TestOps:
const_char* operator+()
"""
return add_strings(a, b)
+def test_c_string_convert(char *c_string):
+ """
+ >>> normalize(test_c_string_convert("abc".encode('ascii')))
+ 'abc'
+ """
+ cdef string s
+ with nogil:
+ s = c_string
+ return s
+
def test_int_vector(o):
"""
>>> test_int_vector([1, 2, 3])
# mode: run
-# tag: cpp, werror
+# tag: cpp, warnings
cimport cython
b_asdg = b'asdg'
b_s = b's'
+
+cdef int compare_to_asdf_ref(string& s) except -999:
+ return s.compare(b"asdf")
+
+def test_coerced_literal_ref():
+ """
+ >>> test_coerced_literal_ref()
+ 0
+ """
+ return compare_to_asdf_ref("asdf")
+
+
+cdef int compare_to_asdf_const_ref(const string& s) except -999:
+ return s.compare(b"asdf")
+
+def test_coerced_literal_const_ref():
+ """
+ >>> test_coerced_literal_const_ref()
+ 0
+ """
+ return compare_to_asdf_const_ref("asdf")
+
+
+cdef int compare_to_asdf_const(const string s) except -999:
+ return s.compare(b"asdf")
+
+def test_coerced_literal_const():
+ """
+ >>> test_coerced_literal_const()
+ 0
+ """
+ return compare_to_asdf_const("asdf")
+
+
def test_conversion(py_obj):
"""
>>> test_conversion(b_asdf) == b_asdf or test_conversion(b_asdf)
[]
"""
return [c for c in s]
+
+
+_WARNINGS = """
+21:31: Cannot pass Python object as C++ data structure reference (string &), will pass by copy.
+"""
-# tag: cpp
+# mode: run
+# tag: cpp, warnings
cimport cython
from libcpp.pair cimport pair
# tag: cpp
+cimport cython
from cython.operator import dereference as deref
cdef extern from "cpp_templates_helper.h":
finally:
del w
+def test_typeof(double x):
+ """
+ >>> test_func_ptr(3)
+ 9.0
+ >>> test_func_ptr(-1.5)
+ 2.25
+ """
+ try:
+ w = new Wrap[cython.typeof(&f)](&f)
+ return w.get()(x)
+ finally:
+ del w
+
def test_cast_template_pointer():
"""
>>> test_cast_template_pointer()
(1, 1.5)
"""
return Div[int].half(x), Div[double].half(x)
+
+def test_pure_syntax(int i):
+ """
+ >>> test_ptr(3)
+ 3
+ >>> test_ptr(5)
+ 5
+ """
+ try:
+ w = new Wrap[cython.pointer(int)](&i)
+ return deref(w.get())
+ finally:
+ del w
__doc__ = u"""
- >>> print(spam)
- eggseggseggseggs
- >>> print(grail)
- tomatotomatotomatotomatotomatotomatotomato
+>>> print(spam)
+eggseggseggseggs
+>>> print(uspam)
+eggseggseggseggs
+>>> print(bspam.decode('ascii'))
+eggseggseggseggs
+
+>>> print(grail)
+tomatotomatotomatotomatotomatotomatotomato
+>>> len(grail_long)
+4200
+>>> print(ugrail)
+tomatotomatotomatotomatotomatotomatotomato
+>>> len(ugrail_long)
+4200
+>>> print(bgrail.decode('ascii'))
+tomatotomatotomatotomatotomatotomatotomato
+>>> len(bgrail_long)
+4200
"""
-spam = u"eggs" * 4
-grail = 7 * u"tomato"
+bspam = b"eggs" * 4
+bgrail = 7 * b"tomato"
+bgrail_long = 700 * b"tomato"
+
+spam = "eggs" * 4
+grail = 7 * "tomato"
+grail_long = 700 * "tomato"
+
+uspam = u"eggs" * 4
+ugrail = 7 * u"tomato"
+ugrail_long = 700 * u"tomato"
# tag: cyfunction
import sys
+IS_PY2 = sys.version_info[0] < 3
IS_PY3 = sys.version_info[0] >= 3
IS_PY34 = sys.version_info > (3, 4, 0, 'beta', 3)
2
"""
return x
+
+
+cdef class TestUnboundMethodCdef:
+ """
+ >>> C = TestUnboundMethodCdef
+ >>> IS_PY2 or (C.meth is C.__dict__["meth"])
+ True
+ """
+ def meth(self): pass
+
+
+class TestUnboundMethod:
+ """
+ >>> C = TestUnboundMethod
+ >>> IS_PY2 or (C.meth is C.__dict__["meth"])
+ True
+ """
+ def meth(self): pass
print "i", i, "func result", f(1.0), "defaults", get_defaults(f)
+def test_memoryview_none(const unsigned char[:] b=None):
+ """
+ >>> test_memoryview_none()
+ >>> test_memoryview_none(None)
+ >>> test_memoryview_none(b'abc')
+ 97
+ """
+ if b is None:
+ return None
+ return b[0]
+
+
+def test_memoryview_bytes(const unsigned char[:] b=b'xyz'):
+ """
+ >>> test_memoryview_bytes()
+ 120
+ >>> test_memoryview_bytes(None)
+ >>> test_memoryview_bytes(b'abc')
+ 97
+ """
+ if b is None:
+ return None
+ return b[0]
+
+
@cython.test_fail_if_path_exists(
'//NameNode[@entry.in_closure = True]',
'//NameNode[@entry.from_closure = True]')
"""
return d[index]
+
+def getitem_str(dict d, obj, str index):
+ """
+ >>> d = {'abc': 1, 'xyz': 2, None: 3}
+ >>> getitem_str(d, d, 'abc')
+ (1, 1)
+ >>> getitem_str(d, d, 'xyz')
+ (2, 2)
+ >>> getitem_str(d, d, None)
+ (3, 3)
+
+ >>> class GetItem(object):
+ ... def __getitem__(self, name): return d[name]
+ >>> getitem_str(d, GetItem(), 'abc')
+ (1, 1)
+ >>> getitem_str(d, GetItem(), 'xyz')
+ (2, 2)
+ >>> getitem_str(d, GetItem(), None)
+ (3, 3)
+ >>> getitem_str(d, GetItem(), 'no')
+ Traceback (most recent call last):
+ KeyError: 'no'
+
+ >>> class GetItemFail(object):
+ ... def __getitem__(self, name): raise ValueError("failed")
+ >>> getitem_str(d, GetItemFail(), 'abc')
+ Traceback (most recent call last):
+ ValueError: failed
+ >>> getitem_str(d, GetItemFail(), None)
+ Traceback (most recent call last):
+ ValueError: failed
+ """
+ return d[index], obj[index]
+
+
+def getitem_unicode(dict d, obj, unicode index):
+ """
+ >>> d = {'abc': 1, 'xyz': 2, None: 3}
+ >>> getitem_unicode(d, d, u'abc')
+ (1, 1)
+ >>> getitem_unicode(d, d, u'xyz')
+ (2, 2)
+ >>> getitem_unicode(d, d, None)
+ (3, 3)
+
+ >>> class GetItem(object):
+ ... def __getitem__(self, name): return d[name]
+ >>> getitem_unicode(d, GetItem(), u'abc')
+ (1, 1)
+ >>> getitem_unicode(d, GetItem(), u'xyz')
+ (2, 2)
+ >>> getitem_unicode(d, GetItem(), None)
+ (3, 3)
+ >>> try: getitem_unicode(d, GetItem(), u'no')
+ ... except KeyError as exc: assert exc.args[0] == u'no', str(exc)
+ ... else: assert False, "KeyError not raised"
+
+ >>> class GetItemFail(object):
+ ... def __getitem__(self, name): raise ValueError("failed")
+ >>> getitem_unicode(d, GetItemFail(), u'abc')
+ Traceback (most recent call last):
+ ValueError: failed
+ """
+ return d[index], obj[index]
+
+
def getitem_tuple(dict d, index):
"""
>>> d = {1: 1, (1,): 2}
"""
return d[index], d[index,]
+
def getitem_in_condition(dict d, key, expected_result):
"""
>>> d = dict(a=1, b=2)
"""
return d[key] is expected_result or d[key] == expected_result
+
@cython.test_fail_if_path_exists('//NoneCheckNode')
def getitem_not_none(dict d not None, key):
"""
--- /dev/null
+
+cimport cython
+
+@cython.test_assert_path_exists("//PythonCapiCallNode")
+@cython.test_fail_if_path_exists("//AttributeNode")
+def dict_pop(dict d, key):
+ """
+ >>> d = { 1: 10, 2: 20 }
+ >>> dict_pop(d, 1)
+ (10, {2: 20})
+ >>> dict_pop(d, 3)
+ Traceback (most recent call last):
+ KeyError: 3
+ >>> dict_pop(d, 2)
+ (20, {})
+ """
+ return d.pop(key), d
+
+
+@cython.test_assert_path_exists("//PythonCapiCallNode")
+@cython.test_fail_if_path_exists("//AttributeNode")
+def dict_pop_default(dict d, key, default):
+ """
+ >>> d = { 1: 10, 2: 20 }
+ >>> dict_pop_default(d, 1, "default")
+ (10, {2: 20})
+ >>> dict_pop_default(d, 3, None)
+ (None, {2: 20})
+ >>> dict_pop_default(d, 3, "default")
+ ('default', {2: 20})
+ >>> dict_pop_default(d, 2, "default")
+ (20, {})
+ """
+ return d.pop(key, default), d
... else: a >= b
Traceback (most recent call last):
TypeError...
+
+ >>> print(a.__eq__.__doc__)
+ EQ
"""
def __eq__(self, other):
+ """EQ"""
assert 1 <= self.x <= 2
assert isinstance(self, ClassEq), type(self)
if isinstance(other, X):
... else: a >= b
Traceback (most recent call last):
TypeError...
+
+ #>>> print(a.__eq__.__doc__)
+ #EQ
+ >>> print(a.__ne__.__doc__)
+ NE
"""
def __ne__(self, other):
+ """NE"""
assert 1 <= self.x <= 2
assert isinstance(self, ClassEqNe), type(self)
if isinstance(other, X):
... else: a >= 'x'
Traceback (most recent call last):
TypeError...
+
+ #>>> print(a.__eq__.__doc__)
+ #EQ
+ #>>> print(a.__ne__.__doc__)
+ #NE
+ >>> print(a.__ge__.__doc__)
+ GE
"""
def __ge__(self, other):
+ """GE"""
assert 1 <= self.x <= 2
assert isinstance(self, ClassEqNeGe), type(self)
if isinstance(other, X):
--- /dev/null
+PYTHON setup.py build_ext --inplace
+PYTHON -c "import a"
+PYTHON -c "import b"
+
+######## setup.py ########
+
+from Cython.Build import cythonize
+from distutils.core import setup
+
+setup(
+ ext_modules = cythonize("*.pyx"),
+)
+
+######## a.pxd ########
+# cython: preliminary_late_includes_cy28=True
+
+cdef extern from "a_early.h":
+ ctypedef int my_int
+
+cdef extern from "a_late.h":
+ my_int square_value_plus_one()
+
+cdef my_int my_value "my_value"
+
+cdef my_int square "square"(my_int x)
+
+######## a.pyx ########
+
+my_value = 10
+
+cdef my_int square "square"(my_int x):
+ return x * x
+
+assert square_value_plus_one() == 101
+
+# Square must be explicitly used for its proto to be generated.
+cdef my_int use_square(x):
+ return square(x)
+
+######## a_early.h ########
+
+typedef int my_int;
+
+######## a_late.h ########
+
+static my_int square_value_plus_one() {
+ return square(my_value) + 1;
+}
+
+######## b.pyx ########
+
+cimport a
+
+# Likewise, a.square must be explicitly used.
+assert a.square(a.my_value) + 1 == 101
+assert a.square_value_plus_one() == 101
pack('i', v),
local_pack('f', v),
]
+
+
+cdef class SelfCast:
+ """
+ >>> f = SelfCast()
+ >>> f.index_of_self([f])
+ 0
+ >>> f.index_of_self([]) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ValueError...
+ """
+ def index_of_self(self, list orbit not None):
+ return orbit.index(self)
print j
-cdef extern from "for_from_pyvar_loop_T601_extern_def.h":
+cdef extern from *:
+ """typedef unsigned long Ulong;"""
ctypedef unsigned long Ulong
cdef Ulong size():
+++ /dev/null
-
-typedef unsigned long Ulong;
+# mode: run
# ticket: 372
cimport cython
print
return i,n
+
@cython.test_assert_path_exists("//ForFromStatNode")
@cython.test_fail_if_path_exists("//ForInStatNode")
def test_negindex():
n = 0
return i,n
+
@cython.test_assert_path_exists("//ForFromStatNode",
"//ForFromStatNode//PrintStatNode//CoerceToPyTypeNode")
@cython.test_fail_if_path_exists("//ForInStatNode")
n = 0
return i,n
+
@cython.test_assert_path_exists("//ForFromStatNode")
@cython.test_fail_if_path_exists("//ForInStatNode")
def test_fix():
print
return i
+
@cython.test_assert_path_exists("//ForFromStatNode")
@cython.test_fail_if_path_exists("//ForInStatNode")
def test_break():
print
return i,n
+
@cython.test_assert_path_exists("//ForFromStatNode")
@cython.test_fail_if_path_exists("//ForInStatNode")
def test_return():
"""
cdef RangeEnum n = EnumValue3
for i in range(n):
+ assert 0 <= <int>i < <int>n
assert cython.typeof(i) == "RangeEnum", cython.typeof(i)
return cython.typeof(i)
# mode: run
-# tag: f_strings, pep498
+# tag: f_strings, pep498, werror
####
# Cython specific PEP 498 tests in addition to test_fstring.pyx from CPython
import sys
IS_PYPY = hasattr(sys, 'pypy_version_info')
-cdef extern from *:
- int INT_MAX
- long LONG_MAX
- long LONG_MIN
+from libc.limits cimport INT_MAX, LONG_MAX, LONG_MIN
max_int = INT_MAX
max_long = LONG_MAX
return a, b, c
+ctypedef enum TestValues:
+ enum_ABC = 1
+ enum_XYZ = 2
+
+
+@cython.test_fail_if_path_exists(
+ "//CoerceToPyTypeNode",
+)
+def format_c_enum():
+ """
+ >>> s = format_c_enum()
+ >>> s == '1-2' or s
+ True
+ """
+ return f"{enum_ABC}-{enum_XYZ}"
+
+
def format_c_numbers(signed char c, short s, int n, long l, float f, double d):
"""
>>> s1, s2, s3, s4 = format_c_numbers(123, 135, 12, 12312312, 2.3456, 3.1415926)
return s1, s2, s3, s4
+@cython.test_fail_if_path_exists(
+ "//CoerceToPyTypeNode",
+)
def format_c_numbers_max(int n, long l):
"""
>>> n, l = max_int, max_long
return s1, s2
+def format_c_number_const():
+ """
+ >>> s = format_c_number_const()
+ >>> s == '{0}'.format(max_long) or s
+ True
+ """
+ return f"{LONG_MAX}"
+
+
+@cython.test_fail_if_path_exists(
+ "//CoerceToPyTypeNode",
+)
def format_c_number_range(int n):
"""
- >>> for i in range(-1000, 1000):
+ >>> for i in range(-1000, 1001):
... assert format_c_number_range(i) == str(i)
"""
return f'{n}'
+@cython.test_fail_if_path_exists(
+ "//CoerceToPyTypeNode",
+)
def format_c_number_range_width(int n):
"""
- >>> for i in range(-1000, 1000):
- ... assert format_c_number_range_width(i) == '%04d' % i, format_c_number_range_width(i)
+ >>> for i in range(-1000, 1001):
+ ... formatted = format_c_number_range_width(i)
+ ... expected = '{n:04d}'.format(n=i)
+ ... assert formatted == expected, "%r != %r" % (formatted, expected)
"""
return f'{n:04}'
+def format_c_number_range_width0(int n):
+ """
+ >>> for i in range(-100, 101):
+ ... formatted = format_c_number_range_width0(i)
+ ... expected = '{n:00d}'.format(n=i)
+ ... assert formatted == expected, "%r != %r" % (formatted, expected)
+ """
+ return f'{n:00}'
+
+
+@cython.test_fail_if_path_exists(
+ "//CoerceToPyTypeNode",
+)
+def format_c_number_range_width1(int n):
+ """
+ >>> for i in range(-100, 101):
+ ... formatted = format_c_number_range_width1(i)
+ ... expected = '{n:01d}'.format(n=i)
+ ... assert formatted == expected, "%r != %r" % (formatted, expected)
+ """
+ return f'{n:01}'
+
+
+@cython.test_fail_if_path_exists(
+ "//CoerceToPyTypeNode",
+)
+def format_c_number_range_width_m4(int n):
+ """
+ >>> for i in range(-100, 101):
+ ... formatted = format_c_number_range_width_m4(i)
+ ... expected = '{n:-4d}'.format(n=i)
+ ... assert formatted == expected, "%r != %r" % (formatted, expected)
+ """
+ return f'{n:-4}'
+
+
def format_c_number_range_dyn_width(int n, int width):
"""
- >>> for i in range(-1000, 1000):
+ >>> for i in range(-1000, 1001):
... assert format_c_number_range_dyn_width(i, 0) == str(i), format_c_number_range_dyn_width(i, 0)
... assert format_c_number_range_dyn_width(i, 1) == '%01d' % i, format_c_number_range_dyn_width(i, 1)
... assert format_c_number_range_dyn_width(i, 4) == '%04d' % i, format_c_number_range_dyn_width(i, 4)
return f'{n:0{width}}'
+@cython.test_fail_if_path_exists(
+ "//CoerceToPyTypeNode",
+)
def format_bool(bint x):
"""
>>> a, b, c, d = format_bool(1)
b = f'x{value!s:6}x'
assert isinstance(b, unicode), type(b)
return a, b
+
+
+@cython.test_fail_if_path_exists(
+ "//FormattedValueNode", # bytes.decode() returns unicode => formatting is useless
+ "//JoinedStrNode", # replaced by call to PyUnicode_Concat()
+ "//PythonCapiCallNode//PythonCapiCallNode",
+)
+def format_decoded_bytes(bytes value):
+ """
+ >>> print(format_decoded_bytes(b'xyz'))
+ U-xyz
+ """
+ return f"U-{value.decode('utf-8')}"
+
+
+@cython.test_fail_if_path_exists(
+ "//AddNode",
+ "//ModNode",
+)
+@cython.test_assert_path_exists(
+ "//FormattedValueNode",
+ "//JoinedStrNode",
+)
+def generated_fstring(int i, unicode u not None, o):
+ """
+ >>> i, u, o = 11, u'xyz', [1]
+ >>> print(((
+ ... u"(i) %s-%.3s-%r-%.3r-%d-%3d-%o-%04o-%x-%4x-%X-%03X-%.1f-%04.2f %% "
+ ... u"(u) %s-%.2s-%r-%.7r %% "
+ ... u"(o) %s-%.2s-%r-%.2r"
+ ... ) % (
+ ... i, i, i, i, i, i, i, i, i, i, i, i, i, i,
+ ... u, u, u, u,
+ ... o, o, o, o,
+ ... )).replace("-u'xyz'", "-'xyz'"))
+ (i) 11-11-11-11-11- 11-13-0013-b- b-B-00B-11.0-11.00 % (u) xyz-xy-'xyz'-'xyz' % (o) [1]-[1-[1]-[1
+
+ >>> print(generated_fstring(i, u, o).replace("-u'xyz'", "-'xyz'"))
+ (i) 11-11-11-11-11- 11-13-0013-b- b-B-00B-11.0-11.00 % (u) xyz-xy-'xyz'-'xyz' % (o) [1]-[1-[1]-[1
+ """
+ return (
+ u"(i) %s-%.3s-%r-%.3r-%d-%3d-%o-%04o-%x-%4x-%X-%03X-%.1f-%04.2f %% "
+ u"(u) %s-%.2s-%r-%.7r %% "
+ u"(o) %s-%.2s-%r-%.2r"
+ ) % (
+ i, i, i, i, i, i, i, i, i, i, i, i, i, i,
+ u, u, u, u,
+ o, o, o, o,
+ )
profile.runcall(func, 19)
assert_stats(profile, func.__name__)
+from collatz import run_generator, cy_generator
+func = cy_generator
+profile = line_profiler.LineProfiler(func)
+profile.runcall(run_generator, 19)
+assert_stats(profile, func.__name__)
+
+from collatz import run_coro, cy_coro
+func = cy_coro
+profile = line_profiler.LineProfiler(func)
+profile.runcall(run_coro, 19)
+assert_stats(profile, func.__name__)
+
from collatz import PyClass
obj = PyClass()
func = obj.py_pymethod
@cython.binding(True)
+def cy_generator(int n):
+ x = 1
+ for i in range(n):
+ yield x + 2
+
+
+@cython.binding(True)
+def run_generator(n):
+ assert len(list(cy_generator(n))) == n
+
+
+@cython.binding(True)
+async def cy_coro(int n):
+ while n > 1:
+ if n % 2 == 0:
+ n //= 2
+ else:
+ n = 3*n+1
+
+
+@cython.binding(True)
+def run_coro(n):
+ coro = cy_coro(n)
+ try:
+ coro.send(None)
+ except StopIteration:
+ assert True
+ else:
+ assert False, "Coroutine did not raise"
+
+
+@cython.binding(True)
class PyClass(object):
def py_pymethod(self):
x = 1
l1.reverse()
return l1
+
+@cython.test_assert_path_exists(
+ '//SimpleCallNode//AttributeNode[@entry.cname = "__Pyx_PyList_Append"]',
+)
def test_list_append():
"""
>>> test_list_append()
l1.append(4)
return l1
+
+@cython.test_assert_path_exists(
+ '//SimpleCallNode//NameNode[@entry.cname = "__Pyx_PyList_Append"]',
+)
+def test_list_append_unbound():
+ """
+ >>> test_list_append_unbound()
+ [1, 2, 3, 4]
+ """
+ cdef list l1 = [1,2]
+ list.append(l1, 3)
+ list.append(l1, 4)
+ return l1
+
+
+@cython.test_assert_path_exists(
+ '//SimpleCallNode//NameNode[@entry.cname = "__Pyx_PyList_Append"]',
+)
+def test_list_append_unbound_assigned():
+ """
+ >>> test_list_append_unbound_assigned()
+ [1, 2, 3, 4]
+ """
+ append = list.append
+ cdef list l1 = [1,2]
+ append(l1, 3)
+ append(l1, 4)
+ return l1
+
+
def test_list_append_insert():
"""
>>> test_list_append_insert()
return i == 2
return False
-def test_list_extend():
+
+@cython.test_assert_path_exists(
+ '//PythonCapiCallNode//PythonCapiFunctionNode[@cname = "__Pyx_ListComp_Append"]',
+ '//PythonCapiCallNode//PythonCapiFunctionNode[@cname = "__Pyx_PyList_Append"]',
+)
+def test_list_extend(seq=None, x=4):
"""
>>> test_list_extend()
- [1, 2, 3, 4, 5, 6]
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
+ >>> test_list_extend([])
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
+ >>> test_list_extend([1])
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1]
+ >>> test_list_extend([1, 2])
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2]
"""
cdef list l = [1,2,3]
l.extend([])
l.extend(())
- l.extend(set())
+ l.extend(set()) # not currently optimised (not worth the trouble)
assert l == [1,2,3]
assert len(l) == 3
- l.extend([4,5,6])
+ l.extend([4,x+1,6])
+ l.extend([7,8,9,10,11,12,13,14,15,16])
+ if seq is not None:
+ l.extend(seq)
return l
+
+@cython.test_assert_path_exists(
+ '//PythonCapiCallNode//PythonCapiFunctionNode[@cname = "__Pyx_ListComp_Append"]',
+ '//PythonCapiCallNode//PythonCapiFunctionNode[@cname = "__Pyx_PyList_Append"]',
+)
+def test_list_extend_unbound(seq=None, x=4):
+ """
+ >>> test_list_extend_unbound()
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
+ >>> test_list_extend_unbound([])
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
+ >>> test_list_extend_unbound([1])
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1]
+ >>> test_list_extend_unbound([1, 2])
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2]
+ """
+ cdef list l = [1,2,3]
+ list.extend(l, [])
+ list.extend(l, ())
+ try:
+ list.extend((), ())
+ except TypeError:
+ pass
+ else:
+ assert False, "TypeError not raised!"
+ list.extend(l, set()) # not currently optimised (not worth the trouble)
+ assert l == [1,2,3]
+ assert len(l) == 3
+ list.extend(l, [4,x+1,6])
+ list.extend(l, [7,8,9,10,11,12,13,14,15,16])
+ if seq is not None:
+ list.extend(l, seq)
+ return l
+
+@cython.test_assert_path_exists(
+ '//PythonCapiCallNode//PythonCapiFunctionNode[@cname = "__Pyx_ListComp_Append"]',
+ '//PythonCapiCallNode//PythonCapiFunctionNode[@cname = "__Pyx_PyList_Append"]',
+)
+def test_list_extend_sideeffect(seq=None, exc=False):
+ """
+ >>> test_list_extend_sideeffect()
+ ([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [4, 6, 7, 8])
+ >>> test_list_extend_sideeffect([])
+ ([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [4, 6, 7, 8])
+ >>> test_list_extend_sideeffect([], exc=True)
+ ([1, 2, 3, 10, 11, 12, 13, 14, 15, 16], [4, 7, 8])
+ >>> test_list_extend_sideeffect([1])
+ ([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1], [4, 6, 7, 8])
+ >>> test_list_extend_sideeffect([1], exc=True)
+ ([1, 2, 3, 10, 11, 12, 13, 14, 15, 16, 1], [4, 7, 8])
+ >>> test_list_extend_sideeffect([1, 2])
+ ([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2], [4, 6, 7, 8])
+ """
+ calls = []
+ def sideeffect(value):
+ calls.append(value)
+ return value
+ def fail(value):
+ if exc:
+ raise TypeError("HUHU")
+ return value
+
+ cdef list l = [1,2,3]
+ l.extend([])
+ l.extend(())
+ l.extend(set()) # not currently optimised (not worth the trouble)
+ assert l == [1,2,3]
+ assert len(l) == 3
+
+ # Must first build all items, then append them in order.
+ # If building one value fails, none of them must be appended.
+ try:
+ l.extend([sideeffect(4), fail(5), sideeffect(6)])
+ except TypeError as e:
+ assert exc
+ assert "HUHU" in str(e)
+ else:
+ assert not exc
+
+ try:
+ l.extend([sideeffect(7), sideeffect(8), fail(9)])
+ except TypeError as e:
+ assert exc
+ assert "HUHU" in str(e)
+ else:
+ assert not exc
+
+ l.extend([10,11,12,13,14,15,16])
+ if seq is not None:
+ l.extend(seq)
+ return l, calls
+
+
def test_none_list_extend(list l):
"""
>>> test_none_list_extend([])
# Make sure the dtype looks like we expect
assert descr.fields == {'a': (py_numpy.dtype('int32'), 0),
- 'b': (py_numpy.dtype(('<f8', (3, 3))), 4)}
+ 'b': (py_numpy.dtype(('<f8', (3, 3))), 4)}, descr.fields
# Make sure that HASSUBARRAY is working
assert not np.PyDataType_HASSUBARRAY(descr)
assert np.PyDataType_HASSUBARRAY(b_descr)
# Make sure the direct field access works
- assert <tuple>b_descr.subarray.shape == (3, 3)
+ assert <tuple>b_descr.subarray.shape == (3, 3), <tuple>b_descr.subarray.shape
# Make sure the safe high-level helper function works
- assert np.PyDataType_SHAPE(descr) == ()
- assert np.PyDataType_SHAPE(a_descr) == ()
- assert np.PyDataType_SHAPE(b_descr) == (3, 3)
+ assert np.PyDataType_SHAPE(descr) == (), np.PyDataType_SHAPE(descr)
+ assert np.PyDataType_SHAPE(a_descr) == (), np.PyDataType_SHAPE(a_descr)
+ assert np.PyDataType_SHAPE(b_descr) == (3, 3), np.PyDataType_SHAPE(b_descr)
[5 6 7 8 9]]
2 0 9 5
- >>> three_dim()
- [[[ 0. 1. 2. 3.]
- [ 4. 5. 6. 7.]]
- <_BLANKLINE_>
- [[ 8. 9. 10. 11.]
- [ 12. 13. 14. 15.]]
- <_BLANKLINE_>
- [[ 16. 17. 18. 19.]
- [ 20. 21. 22. 23.]]]
+ >>> three_dim() # doctest: +NORMALIZE_WHITESPACE
+ [[[0. 1. 2. 3.]
+ [4. 5. 6. 7.]]
+ <BLANKLINE>
+ [[8. 9. 10. 11.]
+ [12. 13. 14. 15.]]
+ <BLANKLINE>
+ [[16. 17. 18. 19.]
+ [20. 21. 22. 23.]]]
6.0 0.0 13.0 8.0
>>> obj_array()
def ndarray_str(arr):
u"""
- Since Py2.3 doctest don't support <BLANKLINE>, manually replace blank lines
- with <_BLANKLINE_>
+ Work around display differences in NumPy 1.14.
"""
- return unicode(arr).replace(u'\n\n', u'\n<_BLANKLINE_>\n')
+ return re.sub(ur'\[ +', '[', unicode(arr))
def basic():
cdef object[int, ndim=2] buf = np.arange(10, dtype='i').reshape((2, 5))
5
"""
return False or 5
+
+cdef class A(object):
+ def __repr__(self):
+ return "A"
+
+def test_GH2059_missing_cast():
+ """
+ >>> test_GH2059_missing_cast()
+ (A, A)
+ """
+ cdef A a = A()
+ cdef object o = None
+ cdef A a_first = a or o
+ cdef A a_second = o or a
+ return a_first, a_second
op_res = op(a, b)
except OverflowError:
assign_overflow = True
- assert func_overflow == assign_overflow, "Inconsistant overflow: %s(%s, %s)" % (func, a, b)
+ assert func_overflow == assign_overflow, "Inconsistent overflow: %s(%s, %s)" % (func, a, b)
if not func_overflow:
- assert res == op_res, "Inconsistant values: %s(%s, %s) == %s != %s" % (func, a, b, res, op_res)
+ assert res == op_res, "Inconsistent values: %s(%s, %s) == %s != %s" % (func, a, b, res, op_res)
medium_values = (max_value_ / 2, max_value_ / 3, min_value_ / 2, <INT>sqrt(<long double>max_value_) - <INT>1, <INT>sqrt(<long double>max_value_) + 1)
def run_test(func, op):
T = TypeVar('T')
-class Box(Generic[T]):
- def __init__(self, content):
- self.content: T = content
-
-
-box = Box(content=5)
+# FIXME: this fails in Py3.7 now
+#class Box(Generic[T]):
+# def __init__(self, content):
+# self.content: T = content
+#
+#box = Box(content=5)
class Cls(object):
(y): int = 0 # Same situation here.
+@cython.test_assert_path_exists(
+ "//WhileStatNode",
+ "//WhileStatNode//DictIterationNextNode",
+)
+def iter_declared_dict(d):
+ """
+ >>> d = {1.1: 2.5, 3.3: 4.5}
+ >>> iter_declared_dict(d)
+ 7.0
+
+ >>> class D(object):
+ ... def __getitem__(self, x): return 2
+ ... def __iter__(self): return iter([1, 2, 3])
+ >>> iter_declared_dict(D())
+ 6.0
+ """
+ typed_dict : Dict[float, float] = d
+ s = 0.0
+ for key in typed_dict:
+ s += d[key]
+ return s
+
+
+@cython.test_assert_path_exists(
+ "//WhileStatNode",
+ "//WhileStatNode//DictIterationNextNode",
+)
+def iter_declared_dict_arg(d : Dict[float, float]):
+ """
+ >>> d = {1.1: 2.5, 3.3: 4.5}
+ >>> iter_declared_dict_arg(d)
+ 7.0
+
+ >>> class D(object):
+ ... def __getitem__(self, x): return 2
+ ... def __iter__(self): return iter([1, 2, 3])
+ >>> iter_declared_dict_arg(D())
+ 6.0
+ """
+ s = 0.0
+ for key in d:
+ s += d[key]
+ return s
+
_WARNINGS = """
37:19: Unknown type declaration in annotation, ignoring
38:12: Unknown type declaration in annotation, ignoring
39:18: Unknown type declaration in annotation, ignoring
73:19: Unknown type declaration in annotation, ignoring
+# FIXME: these are sort-of evaluated now, so the warning is misleading
+126:21: Unknown type declaration in annotation, ignoring
+137:35: Unknown type declaration in annotation, ignoring
"""
+import sys
+IS_PY2 = sys.version_info[0] < 3
+
import cython
is_compiled = cython.compiled
if x == 0:
raise ValueError
return x+1
+
+
+@cython.final
+@cython.cclass
+class CClass(object):
+ """
+ >>> c = CClass(2)
+ >>> c.get_attr()
+ int
+ 2
+ """
+ cython.declare(attr=cython.int)
+
+ def __init__(self, attr):
+ self.attr = attr
+
+ def get_attr(self):
+ print(cython.typeof(self.attr))
+ return self.attr
+
+
+class TestUnboundMethod:
+ """
+ >>> C = TestUnboundMethod
+ >>> IS_PY2 or (C.meth is C.__dict__["meth"])
+ True
+ """
+ def meth(self): pass
--- /dev/null
+# mode: run
+# tag: asyncio, gh1685
+
+PYTHON setup.py build_ext -i
+PYTHON main.py
+
+
+######## setup.py ########
+
+from Cython.Build import cythonize
+from distutils.core import setup
+
+setup(
+ ext_modules = cythonize("*.pyx"),
+)
+
+
+######## main.py ########
+
+import asyncio
+import cy_test
+from contextlib import closing
+
+async def main():
+ await cy_test.say()
+
+with closing(asyncio.get_event_loop()) as loop:
+ print("Running Python coroutine ...")
+ loop.run_until_complete(main())
+
+ print("Running Cython coroutine ...")
+ loop.run_until_complete(cy_test.say())
+
+
+######## cy_test.pyx ########
+
+import asyncio
+from py_test import py_async
+
+async def cy_async():
+ print("- this one is from Cython")
+
+async def say():
+ await cb()
+
+async def cb():
+ print("awaiting:")
+ await cy_async()
+ await py_async()
+ print("sleeping:")
+ await asyncio.sleep(0.5)
+ print("done!")
+
+
+######## py_test.py ########
+
+async def py_async():
+ print("- and this one is from Python")
# tag: pep492, asyncfor, await
-def run_async(coro):
- #assert coro.__class__ is types.GeneratorType
- assert coro.__class__.__name__ in ('coroutine', 'GeneratorWrapper'), coro.__class__.__name__
+def run_async(coro, ignore_type=False):
+ if not ignore_type:
+ #assert coro.__class__ is types.GeneratorType
+ assert coro.__class__.__name__ in ('coroutine', 'GeneratorWrapper'), coro.__class__.__name__
buffer = []
result = None
return await awaitable
return simple, awaiting
+
+
+cimport cython
+
+def yield_from_cyobject():
+ """
+ >>> async def py_simple_nonit():
+ ... return 10
+
+ >>> async def run_await(awaitable):
+ ... return await awaitable
+
+ >>> def run_yield_from(it):
+ ... return (yield from it)
+
+ >>> simple_nonit, simple_it, awaiting, yield_from = yield_from_cyobject()
+
+ >>> buffer, result = run_async(run_await(simple_it()))
+ >>> result
+ 10
+ >>> buffer, result = run_async(run_await(awaiting(simple_it())))
+ >>> result
+ 10
+ >>> buffer, result = run_async(awaiting(run_await(simple_it())), ignore_type=True)
+ >>> result
+ 10
+ >>> buffer, result = run_async(run_await(py_simple_nonit()))
+ >>> result
+ 10
+
+ >>> buffer, result = run_async(run_yield_from(awaiting(run_await(simple_it()))), ignore_type=True)
+ >>> result
+ 10
+
+ >>> buffer, result = run_async(run_yield_from(simple_it()), ignore_type=True)
+ >>> result
+ 10
+ >>> buffer, result = run_async(yield_from(simple_it()), ignore_type=True)
+ >>> result
+ 10
+
+ >>> next(run_yield_from(simple_nonit())) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ TypeError: ...
+ >>> next(run_yield_from(py_simple_nonit())) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ TypeError: ...
+ >>> next(yield_from(py_simple_nonit()))
+ Traceback (most recent call last):
+ TypeError: 'coroutine' object is not iterable
+ """
+ async def simple_nonit():
+ return 10
+
+ @cython.iterable_coroutine
+ async def simple_it():
+ return 10
+
+ @cython.iterable_coroutine
+ async def awaiting(awaitable):
+ return await awaitable
+
+ def yield_from(it):
+ return (yield from it)
+
+ return simple_nonit, simple_it, awaiting, yield_from
+# mode: run
+# tag: pickle
+
import cython
import sys
return s1
+def test_set_contains(v):
+ """
+ >>> test_set_contains(1)
+ True
+ >>> test_set_contains(2)
+ False
+ >>> test_set_contains(frozenset([1, 2, 3]))
+ True
+ >>> test_set_contains(frozenset([1, 2]))
+ False
+ >>> test_set_contains(set([1, 2, 3]))
+ True
+ >>> test_set_contains(set([1, 2]))
+ False
+ >>> try: test_set_contains([1, 2])
+ ... except TypeError: pass
+ ... else: print("NOT RAISED!")
+ """
+ cdef set s1
+ s1 = set()
+ s1.add(1)
+ s1.add('a')
+ s1.add(frozenset([1, 2, 3]))
+ return v in s1
+
+
def test_set_update(v=None):
"""
>>> type(test_set_update()) is set
--- /dev/null
+# mode: run
+# tag: set
+
+cimport cython
+
+
+@cython.test_assert_path_exists(
+ "//SetIterationNextNode",
+)
+def set_iter_comp(set s):
+ """
+ >>> s = set([1, 2, 3])
+ >>> sorted(set_iter_comp(s))
+ [1, 2, 3]
+ """
+ return [x for x in s]
+
+
+@cython.test_assert_path_exists(
+ "//SetIterationNextNode",
+)
+def set_iter_comp_typed(set s):
+ """
+ >>> s = set([1, 2, 3])
+ >>> sorted(set_iter_comp(s))
+ [1, 2, 3]
+ """
+ cdef int x
+ return [x for x in s]
+
+
+@cython.test_assert_path_exists(
+ "//SetIterationNextNode",
+)
+def frozenset_iter_comp(frozenset s):
+ """
+ >>> s = frozenset([1, 2, 3])
+ >>> sorted(frozenset_iter_comp(s))
+ [1, 2, 3]
+ """
+ return [x for x in s]
+
+
+@cython.test_assert_path_exists(
+ "//SetIterationNextNode",
+)
+def set_iter_comp_frozenset(set s):
+ """
+ >>> s = set([1, 2, 3])
+ >>> sorted(set_iter_comp(s))
+ [1, 2, 3]
+ """
+ return [x for x in frozenset(s)]
+
+
+@cython.test_assert_path_exists(
+ "//SetIterationNextNode",
+)
+def set_iter_modify(set s, int value):
+ """
+ >>> s = set([1, 2, 3])
+ >>> sorted(set_iter_modify(s, 1))
+ [1, 2, 3]
+ >>> sorted(set_iter_modify(s, 2))
+ [1, 2, 3]
+ >>> sorted(set_iter_modify(s, 3))
+ [1, 2, 3]
+ >>> sorted(set_iter_modify(s, 4))
+ Traceback (most recent call last):
+ RuntimeError: set changed size during iteration
+ """
+ for x in s:
+ s.add(value)
+ return s
+
+
+@cython.test_fail_if_path_exists(
+ "//SimpleCallNode//NameNode[@name = 'enumerate']",
+)
+@cython.test_assert_path_exists(
+ "//AddNode",
+ "//SetIterationNextNode",
+)
+def set_iter_enumerate(set s):
+ """
+ >>> s = set(['a', 'b', 'c'])
+ >>> numbers, values = set_iter_enumerate(s)
+ >>> sorted(numbers)
+ [0, 1, 2]
+ >>> sorted(values)
+ ['a', 'b', 'c']
+ """
+ cdef int i
+ numbers = []
+ values = []
+ for i, x in enumerate(s):
+ numbers.append(i)
+ values.append(x)
+ return numbers, values
"""
return args + tuple(sorted(kwargs.items()))
+ @staticmethod
+ def no_args():
+ """
+ >>> ArgsKwargs().no_args()
+ OK!
+ """
+ print("OK!")
+
class StaticmethodSubclass(staticmethod):
"""
return s.endswith(sub, start, stop)
+def object_as_name(object):
+ """
+ >>> object_as_name('abx')
+ True
+ >>> object_as_name('abc')
+ False
+ """
+ return object.endswith("x")
+
+
+def str_as_name(str):
+ """
+ >>> str_as_name('abx')
+ True
+ >>> str_as_name('abc')
+ False
+ """
+ return str.endswith("x")
+
+
@cython.test_assert_path_exists(
"//SimpleCallNode",
"//SimpleCallNode//NoneCheckNode",
self.assertEqual(buffer, [1, 2, 'MyException'])
+ def test_asyncio_cython_crash_gh1999(self):
+ async def await_future(loop):
+ fut = loop.create_future()
+ loop.call_later(1, lambda: fut.set_result(1))
+ await fut
+
+ async def delegate_to_await_future(loop):
+ await await_future(loop)
+
+ ns = {}
+ __builtins__.exec("""
+ async def call(loop, await_func): # requires Py3.5+
+ await await_func(loop)
+ """.strip(), ns, ns)
+ call = ns['call']
+
+ import asyncio
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ loop.run_until_complete(call(loop, delegate_to_await_future))
+ finally:
+ loop.close()
+ asyncio.set_event_loop(None)
+
class SysSetCoroWrapperTest(unittest.TestCase):
--- /dev/null
+# mode: run
+
+from cpython.pythread cimport *
+
+
+cdef Py_tss_t *pass_py_tss_t_ptr(Py_tss_t *value):
+ return value
+
+
+def tss_create_delete():
+ """
+ >>> tss_create_delete()
+ (True, False)
+ """
+ cdef Py_tss_t tss_key
+ cdef bint after_create, after_delete
+ if PyThread_tss_create(&tss_key) != 0:
+ raise MemoryError()
+ after_create = PyThread_tss_is_created(&tss_key) != 0
+ assert after_create == PyThread_tss_is_created(pass_py_tss_t_ptr(&tss_key))
+ PyThread_tss_delete(&tss_key)
+ after_delete = PyThread_tss_is_created(&tss_key) != 0
+ return (after_create, after_delete)
+
+
+def tss_alloc_free():
+ """
+ >>> tss_alloc_free()
+ False
+ """
+ cdef Py_tss_t *ptr_key
+ cdef bint after_alloc, after_free
+ ptr_key = PyThread_tss_alloc()
+ if ptr_key == NULL:
+ raise MemoryError()
+ after_alloc = PyThread_tss_is_created(ptr_key) != 0
+ PyThread_tss_free(ptr_key)
+ return after_alloc
+
+
+def tss_alloc_create_delete_free():
+ """
+ >>> tss_alloc_create_delete_free()
+ (False, True, False)
+ """
+ cdef Py_tss_t *ptr_key
+ cdef bint after_alloc, after_free
+ ptr_key = PyThread_tss_alloc()
+ if ptr_key == NULL:
+ raise MemoryError()
+ after_alloc = PyThread_tss_is_created(ptr_key) != 0
+ if PyThread_tss_create(ptr_key) != 0:
+ raise MemoryError()
+ after_create = PyThread_tss_is_created(ptr_key) != 0
+ PyThread_tss_delete(ptr_key)
+ after_delete = PyThread_tss_is_created(ptr_key) != 0
+ PyThread_tss_free(ptr_key)
+ return (after_alloc, after_create, after_delete)
+
+
+def tss_set_get():
+ """
+ >>> tss_set_get()
+ 1
+ """
+ cdef Py_tss_t tss_key
+ cdef int the_value = 1
+ cdef int ret_value
+ if PyThread_tss_create(&tss_key) != 0:
+ raise MemoryError()
+ if PyThread_tss_get(&tss_key) == NULL:
+ PyThread_tss_set(&tss_key, <void *>&the_value)
+ ret_value = (<int *>PyThread_tss_get(&tss_key))[0]
+ PyThread_tss_delete(&tss_key)
+ return ret_value
TypeError: ...itera...
"""
return tuple(tuple(tuple(x)))
+
+
+@cython.test_fail_if_path_exists(
+ "//ExprStatNode//TupleNode",
+ "//ExprStatNode",
+)
+def unused_literals():
+ """
+ >>> unused_literals()
+ """
+ (1, 2, 3)
+ (1, 2, 3 + 4)
+ ("abc", 'def')
+ #(int(), 2, 3)
+
+
+@cython.test_assert_path_exists(
+ "//ExprStatNode",
+ "//ExprStatNode//TupleNode",
+)
+def unused_non_literal():
+ """
+ >>> unused_non_literal()
+ """
+ (set(), None)
+ (range(10), None)
a = 3
a = 4
a = 5
- assert typeof(a) == "long"
+ assert typeof(a) == "long", typeof(a)
b = a
b = 3.1
b = 3.14159
- assert typeof(b) == "double"
+ assert typeof(b) == "double", typeof(b)
c = a
c = b
c = [1,2,3]
- assert typeof(c) == "Python object"
+ assert typeof(c) == "Python object", typeof(c)
+ d = b'abc'
+ d = bytes()
+ d = bytes(b'xyz')
+ d = None
+ assert typeof(d) == "bytes object", typeof(d)
+
def arithmetic():
"""
+# mode: run
+# tag: unicode
+
__doc__ = u"""
>>> u('test')
u'test'
+ >>> e
+ u''
>>> z
u'test'
>>> c('testing')
# u'testing a C subtype'
"""
+
+cimport cython
+
import sys
if sys.version_info[0] >= 3:
__doc__ = __doc__.replace(u" u'", u" '")
u = unicode
+e = unicode()
z = unicode(u'test')
+
def c(string):
return unicode(string)
+
class subu(unicode):
pass
+
def sub(string):
return subu(string)
+
#cdef class csubu(unicode):
# pass
+
#def csub(string):
# return csubu(string)
+
+@cython.test_fail_if_path_exists("//SimpleCallNode")
+@cython.test_assert_path_exists("//PythonCapiCallNode")
+def typed(unicode s):
+ """
+ >>> print(typed(None))
+ None
+ >>> type(typed(None)) is u or type(typed(None))
+ True
+ >>> print(typed(u'abc'))
+ abc
+ >>> type(typed(u'abc')) is u or type(typed(u'abc'))
+ True
+ """
+ return unicode(s)
+
+
+@cython.test_fail_if_path_exists(
+ "//SimpleCallNode",
+ "//PythonCapiCallNode",
+)
+def typed_not_none(unicode s not None):
+ """
+ >>> print(typed(u'abc'))
+ abc
+ >>> type(typed(u'abc')) is u or type(typed(u'abc'))
+ True
+ """
+ return unicode(s)
--- /dev/null
+static long cube(long x)
+{
+ return x * x * x;
+}
+
+#define long broken_long
--- /dev/null
+cdef extern from "verbatiminclude.h":
+ long cube(long)
+
+cdef extern from *:
+ """
+ static long square(long x)
+ {
+ return x * x;
+ }
+ """
+ long square(long)
+
+
+cdef extern from "verbatiminclude.h":
+ "typedef int myint;"
+ ctypedef int myint
+
+cdef extern from "verbatiminclude.h":
+ "#undef long"
+
+
+cdef class C:
+ cdef myint val
+
+
+cdef extern from "Python.h":
+ """
+ #define Py_SET_SIZE(obj, size) Py_SIZE((obj)) = (size)
+ """
+ void Py_SET_SIZE(object, Py_ssize_t)
+
+
+def test_square(x):
+ """
+ >>> test_square(4)
+ 16
+ """
+ return square(x)
+
+
+def test_cube(x):
+ """
+ >>> test_cube(4)
+ 64
+ """
+ return cube(x)
+
+
+def test_class():
+ """
+ >>> test_class()
+ 42
+ """
+ cdef C x = C()
+ x.val = 42
+ return x.val
+
+
+def test_set_size(x, size):
+ # This function manipulates Python objects in a bad way, so we
+ # do not call it. The real test is that it compiles.
+ Py_SET_SIZE(x, size)