PAPEROPT_a4 = -D latex_elements.papersize=a4paper
PAPEROPT_letter = -D latex_elements.papersize=letterpaper
-ALLSPHINXOPTS = -b $(BUILDER) -d build/doctrees $(PAPEROPT_$(PAPER)) \
+ALLSPHINXOPTS = -b $(BUILDER) -d build/doctrees $(PAPEROPT_$(PAPER)) -j auto \
$(SPHINXOPTS) $(SPHINXERRORHANDLING) . build/$(BUILDER) $(SOURCES)
.PHONY: help build html htmlhelp latex text texinfo changes linkcheck \
and writer of much of the content;
* the `Docutils <http://docutils.sourceforge.net/>`_ project for creating
reStructuredText and the Docutils suite;
-* Fredrik Lundh for his `Alternative Python Reference
- <http://effbot.org/zone/pyref.htm>`_ project from which Sphinx got many good
- ideas.
+* Fredrik Lundh for his Alternative Python Reference project from which Sphinx
+ got many good ideas.
Contributors to the Python Documentation
`Helping with Documentation <https://devguide.python.org/docquality/#helping-with-documentation>`_
Comprehensive guide for individuals that are interested in contributing to Python documentation.
+ `Documentation Translations <https://devguide.python.org/documenting/#translating>`_
+ A list of GitHub pages for documentation translation and their primary contacts.
+
+
.. _using-the-tracker:
Using the Python issue tracker
Convert a Python integer to a C :c:type:`unsigned long long`
without overflow checking.
-``n`` (:class:`int`) [Py_ssize_t]
+``n`` (:class:`int`) [:c:type:`Py_ssize_t`]
Convert a Python integer to a C :c:type:`Py_ssize_t`.
``c`` (:class:`bytes` or :class:`bytearray` of length 1) [char]
``K`` (:class:`int`) [unsigned long long]
Convert a C :c:type:`unsigned long long` to a Python integer object.
- ``n`` (:class:`int`) [Py_ssize_t]
+ ``n`` (:class:`int`) [:c:type:`Py_ssize_t`]
Convert a C :c:type:`Py_ssize_t` to a Python integer.
``c`` (:class:`bytes` of length 1) [char]
Return a new bytearray object from any object, *o*, that implements the
:ref:`buffer protocol <bufferobjects>`.
- .. XXX expand about the buffer protocol, at least somewhere
-
.. c:function:: PyObject* PyByteArray_FromStringAndSize(const char *string, Py_ssize_t len)
Bytes Objects
-------------
-These functions raise :exc:`TypeError` when expecting a bytes parameter and are
+These functions raise :exc:`TypeError` when expecting a bytes parameter and
called with a non-bytes parameter.
.. index:: object: bytes
| :attr:`%lu` | unsigned long | Equivalent to |
| | | ``printf("%lu")``. [1]_ |
+-------------------+---------------+--------------------------------+
- | :attr:`%zd` | Py_ssize_t | Equivalent to |
- | | | ``printf("%zd")``. [1]_ |
+ | :attr:`%zd` | :c:type:`\ | Equivalent to |
+ | | Py_ssize_t` | ``printf("%zd")``. [1]_ |
+-------------------+---------------+--------------------------------+
| :attr:`%zu` | size_t | Equivalent to |
| | | ``printf("%zu")``. [1]_ |
:c:member:`~PyTypeObject.tp_new` and :c:member:`~PyTypeObject.tp_init`
also pass arguments this way.
-To call an object, use :c:func:`PyObject_Call` or other
+To call an object, use :c:func:`PyObject_Call` or another
:ref:`call API <capi-call>`.
.. c:function:: void PyErr_SyntaxLocation(const char *filename, int lineno)
- Like :c:func:`PyErr_SyntaxLocationEx`, but the col_offset parameter is
+ Like :c:func:`PyErr_SyntaxLocationEx`, but the *col_offset* parameter is
omitted.
Issue a warning message with explicit control over all warning attributes. This
is a straightforward wrapper around the Python function
- :func:`warnings.warn_explicit`, see there for more information. The *module*
+ :func:`warnings.warn_explicit`; see there for more information. The *module*
and *registry* arguments may be set to ``NULL`` to get the default effect
described there.
error indicator.
-.. c:function:: void PyErr_NormalizeException(PyObject**exc, PyObject**val, PyObject**tb)
+.. c:function:: void PyErr_NormalizeException(PyObject **exc, PyObject **val, PyObject **tb)
Under certain circumstances, the values returned by :c:func:`PyErr_Fetch` below
can be "unnormalized", meaning that ``*exc`` is a class object but ``*val`` is
+-----------------------------------------+---------------------------------+----------+
| C Name | Python Name | Notes |
+=========================================+=================================+==========+
-| :c:data:`PyExc_BaseException` | :exc:`BaseException` | \(1) |
+| :c:data:`PyExc_BaseException` | :exc:`BaseException` | [1]_ |
+-----------------------------------------+---------------------------------+----------+
-| :c:data:`PyExc_Exception` | :exc:`Exception` | \(1) |
+| :c:data:`PyExc_Exception` | :exc:`Exception` | [1]_ |
+-----------------------------------------+---------------------------------+----------+
-| :c:data:`PyExc_ArithmeticError` | :exc:`ArithmeticError` | \(1) |
+| :c:data:`PyExc_ArithmeticError` | :exc:`ArithmeticError` | [1]_ |
+-----------------------------------------+---------------------------------+----------+
| :c:data:`PyExc_AssertionError` | :exc:`AssertionError` | |
+-----------------------------------------+---------------------------------+----------+
+-----------------------------------------+---------------------------------+----------+
| :c:data:`PyExc_KeyboardInterrupt` | :exc:`KeyboardInterrupt` | |
+-----------------------------------------+---------------------------------+----------+
-| :c:data:`PyExc_LookupError` | :exc:`LookupError` | \(1) |
+| :c:data:`PyExc_LookupError` | :exc:`LookupError` | [1]_ |
+-----------------------------------------+---------------------------------+----------+
| :c:data:`PyExc_MemoryError` | :exc:`MemoryError` | |
+-----------------------------------------+---------------------------------+----------+
+-----------------------------------------+---------------------------------+----------+
| :c:data:`PyExc_NotImplementedError` | :exc:`NotImplementedError` | |
+-----------------------------------------+---------------------------------+----------+
-| :c:data:`PyExc_OSError` | :exc:`OSError` | \(1) |
+| :c:data:`PyExc_OSError` | :exc:`OSError` | [1]_ |
+-----------------------------------------+---------------------------------+----------+
| :c:data:`PyExc_OverflowError` | :exc:`OverflowError` | |
+-----------------------------------------+---------------------------------+----------+
+-----------------------------------------+---------------------------------+----------+
| :c:data:`PyExc_RecursionError` | :exc:`RecursionError` | |
+-----------------------------------------+---------------------------------+----------+
-| :c:data:`PyExc_ReferenceError` | :exc:`ReferenceError` | \(2) |
+| :c:data:`PyExc_ReferenceError` | :exc:`ReferenceError` | |
+-----------------------------------------+---------------------------------+----------+
| :c:data:`PyExc_RuntimeError` | :exc:`RuntimeError` | |
+-----------------------------------------+---------------------------------+----------+
+-------------------------------------+----------+
| :c:data:`PyExc_IOError` | |
+-------------------------------------+----------+
-| :c:data:`PyExc_WindowsError` | \(3) |
+| :c:data:`PyExc_WindowsError` | [2]_ |
+-------------------------------------+----------+
.. versionchanged:: 3.3
Notes:
-(1)
+.. [1]
This is a base class for other standard exceptions.
-(2)
+.. [2]
Only defined on Windows; protect code that uses this by testing that the
preprocessor macro ``MS_WINDOWS`` is defined.
+------------------------------------------+---------------------------------+----------+
| C Name | Python Name | Notes |
+==========================================+=================================+==========+
-| :c:data:`PyExc_Warning` | :exc:`Warning` | \(1) |
+| :c:data:`PyExc_Warning` | :exc:`Warning` | [3]_ |
+------------------------------------------+---------------------------------+----------+
| :c:data:`PyExc_BytesWarning` | :exc:`BytesWarning` | |
+------------------------------------------+---------------------------------+----------+
Notes:
-(1)
+.. [3]
This is a base class for other standard warning categories.
.. index:: single: version (in module sys)
The first word (up to the first space character) is the current Python version;
- the first three characters are the major and minor version separated by a
+ the first characters are the major and minor version separated by a
period. The returned string points into static storage; the caller should not
modify its value. The value is available to Python code as :data:`sys.version`.
argument is `NULL`.
.. note::
- A freed key becomes a dangling pointer, you should reset the key to
+ A freed key becomes a dangling pointer. You should reset the key to
`NULL`.
* The :ref:`Python Configuration <init-python-config>` can be used to build a
customized Python which behaves as the regular Python. For example,
- environments variables and command line arguments are used to configure
+ environment variables and command line arguments are used to configure
Python.
* The :ref:`Isolated Configuration <init-isolated-conf>` can be used to embed
Python into an application. It isolates Python from the system. For example,
- environments variables are ignored, the LC_CTYPE locale is left unchanged and
+ environment variables are ignored, the LC_CTYPE locale is left unchanged and
no signal handler is registered.
The :c:func:`Py_RunMain` function can be used to write a customized Python
isolate Python from the system. For example, to embed Python into an
application.
-This configuration ignores global configuration variables, environments
+This configuration ignores global configuration variables, environment
variables, command line arguments (:c:member:`PyConfig.argv` is not parsed)
and user site directory. The C standard streams (ex: ``stdout``) and the
LC_CTYPE locale are left unchanged. Signal handlers are not installed.
==================================================
This section is a private provisional API introducing multi-phase
-initialization, the core feature of the :pep:`432`:
+initialization, the core feature of :pep:`432`:
* "Core" initialization phase, "bare minimum Python":
of a complex number. These will be discussed together with the functions that
use them.
+.. c:type:: Py_ssize_t
+
+ A signed integral type such that ``sizeof(Py_ssize_t) == sizeof(size_t)``.
+ C99 doesn't define such a thing directly (size_t is an unsigned integral type).
+ See :pep:`353` for details. ``PY_SSIZE_T_MAX`` is the largest positive value
+ of type :c:type:`Py_ssize_t`.
+
.. _api-exceptions:
.. c:function:: int PyAIter_Check(PyObject *o)
- Returns non-zero if the object 'obj' provides :class:`AsyncIterator`
- protocols, and ``0`` otherwise. This function always succeeds.
+ Return non-zero if the object *o* provides the :class:`AsyncIterator`
+ protocol, and ``0`` otherwise. This function always succeeds.
.. versionadded:: 3.10
Return a new :c:type:`PyLongObject` object from *v*, or ``NULL`` on failure.
The current implementation keeps an array of integer objects for all integers
- between ``-5`` and ``256``, when you create an int in that range you actually
+ between ``-5`` and ``256``. When you create an int in that range you actually
just get back a reference to the existing object.
.. c:function:: int PyMapping_Check(PyObject *o)
- Return ``1`` if the object provides mapping protocol or supports slicing,
+ Return ``1`` if the object provides the mapping protocol or supports slicing,
and ``0`` otherwise. Note that it returns ``1`` for Python classes with
- a :meth:`__getitem__` method since in general case it is impossible to
- determine what type of keys it supports. This function always succeeds.
+ a :meth:`__getitem__` method, since in general it is impossible to
+ determine what type of keys the class supports. This function always succeeds.
.. c:function:: Py_ssize_t PyMapping_Size(PyObject *o)
.. note::
There is no guarantee that the memory returned by these allocators can be
- successfully casted to a Python object when intercepting the allocating
+ successfully cast to a Python object when intercepting the allocating
functions in this domain by the methods described in
the :ref:`Customize Memory Allocators <customize-memory-allocators>` section.
.. c:type:: PyMemAllocatorEx
Structure used to describe a memory block allocator. The structure has
- four fields:
+ the following fields:
+----------------------------------------------------------+---------------------------------------+
| Field | Meaning |
.. c:function:: PyObject* PyInstanceMethod_New(PyObject *func)
- Return a new instance method object, with *func* being any callable object
+ Return a new instance method object, with *func* being any callable object.
*func* is the function that will be called when the instance method is
called.
.. c:function:: PyObject* PyNumber_FloorDivide(PyObject *o1, PyObject *o2)
Return the floor of *o1* divided by *o2*, or ``NULL`` on failure. This is
- equivalent to the "classic" division of integers.
+ the equivalent of the Python expression ``o1 // o2``.
.. c:function:: PyObject* PyNumber_TrueDivide(PyObject *o1, PyObject *o2)
*o2*, or ``NULL`` on failure. The return value is "approximate" because binary
floating point numbers are approximate; it is not possible to represent all real
numbers in base two. This function can return a floating point value when
- passed two integers.
+ passed two integers. This is the equivalent of the Python expression ``o1 / o2``.
.. c:function:: PyObject* PyNumber_Remainder(PyObject *o1, PyObject *o2)
floating point numbers are approximate; it is not possible to represent all real
numbers in base two. This function can return a floating point value when
passed two integers. The operation is done *in-place* when *o1* supports it.
+ This is the equivalent of the Python statement ``o1 /= o2``.
.. c:function:: PyObject* PyNumber_InPlaceRemainder(PyObject *o1, PyObject *o2)
.. c:function:: Py_ssize_t PyNumber_AsSsize_t(PyObject *o, PyObject *exc)
- Returns *o* converted to a Py_ssize_t value if *o* can be interpreted as an
+ Returns *o* converted to a :c:type:`Py_ssize_t` value if *o* can be interpreted as an
integer. If the call fails, an exception is raised and ``-1`` is returned.
If *o* can be converted to a Python int but the attempt to
- convert to a Py_ssize_t value would raise an :exc:`OverflowError`, then the
+ convert to a :c:type:`Py_ssize_t` value would raise an :exc:`OverflowError`, then the
*exc* argument is the type of exception that will be raised (usually
:exc:`IndexError` or :exc:`OverflowError`). If *exc* is ``NULL``, then the
exception is cleared and the value is clipped to ``PY_SSIZE_T_MIN`` for a negative
.. c:function:: int PyIndex_Check(PyObject *o)
- Returns ``1`` if *o* is an index integer (has the nb_index slot of the
- tp_as_number structure filled in), and ``0`` otherwise.
+ Returns ``1`` if *o* is an index integer (has the ``nb_index`` slot of the
+ ``tp_as_number`` structure filled in), and ``0`` otherwise.
This function always succeeds.
return ``0`` on success. This is the equivalent of the Python statement
``o.attr_name = v``.
- If *v* is ``NULL``, the attribute is deleted, however this feature is
+ If *v* is ``NULL``, the attribute is deleted, but this feature is
deprecated in favour of using :c:func:`PyObject_DelAttrString`.
.. versionchanged:: 3.2
The return type is now Py_hash_t. This is a signed integer the same size
- as Py_ssize_t.
+ as :c:type:`Py_ssize_t`.
.. c:function:: Py_hash_t PyObject_HashNotImplemented(PyObject *o)
of object *o*. On failure, raises :exc:`SystemError` and returns ``NULL``. This
is equivalent to the Python expression ``type(o)``. This function increments the
reference count of the return value. There's really no reason to use this
- function instead of the common expression ``o->ob_type``, which returns a
+ function instead of the :c:func:`Py_TYPE()` function, which returns a
pointer of type :c:type:`PyTypeObject*`, except when the incremented reference
count is needed.
It is a good idea to use this macro whenever decrementing the reference
count of an object that might be traversed during garbage collection.
+.. c:function:: void Py_IncRef(PyObject *o)
+
+ Increment the reference count for object *o*. A function version of :c:func:`Py_XINCREF`.
+ It can be used for runtime dynamic embedding of Python.
+
+
+.. c:function:: void Py_DecRef(PyObject *o)
+
+ Decrement the reference count for object *o*. A function version of :c:func:`Py_XDECREF`.
+ It can be used for runtime dynamic embedding of Python.
-The following functions are for runtime dynamic embedding of Python:
-``Py_IncRef(PyObject *o)``, ``Py_DecRef(PyObject *o)``. They are
-simply exported function versions of :c:func:`Py_XINCREF` and
-:c:func:`Py_XDECREF`, respectively.
The following functions or macros are only for use within the interpreter core:
:c:func:`_Py_Dealloc`, :c:func:`_Py_ForgetReference`, :c:func:`_Py_NewReference`,
.. c:function:: int PySequence_Check(PyObject *o)
- Return ``1`` if the object provides sequence protocol, and ``0`` otherwise.
+ Return ``1`` if the object provides the sequence protocol, and ``0`` otherwise.
Note that it returns ``1`` for Python classes with a :meth:`__getitem__`
- method unless they are :class:`dict` subclasses since in general case it
- is impossible to determine what the type of keys it supports. This
+ method, unless they are :class:`dict` subclasses, since in general it
+ is impossible to determine what type of keys the class supports. This
function always succeeds.
is the equivalent of the Python statement ``o[i] = v``. This function *does
not* steal a reference to *v*.
- If *v* is ``NULL``, the element is deleted, however this feature is
+ If *v* is ``NULL``, the element is deleted, but this feature is
deprecated in favour of using :c:func:`PySequence_DelItem`.
Returns the length of *o*, assuming that *o* was returned by
:c:func:`PySequence_Fast` and that *o* is not ``NULL``. The size can also be
- gotten by calling :c:func:`PySequence_Size` on *o*, but
+ retrieved by calling :c:func:`PySequence_Size` on *o*, but
:c:func:`PySequence_Fast_GET_SIZE` is faster because it can assume *o* is a
list or tuple.
object: frozenset
This section details the public API for :class:`set` and :class:`frozenset`
-objects. Any functionality not listed below is best accessed using the either
+objects. Any functionality not listed below is best accessed using either
the abstract object protocol (including :c:func:`PyObject_CallMethod`,
:c:func:`PyObject_RichCompareBool`, :c:func:`PyObject_Hash`,
:c:func:`PyObject_Repr`, :c:func:`PyObject_IsTrue`, :c:func:`PyObject_Print`, and
in that it is a fixed size for small sets (much like tuple storage) and will
point to a separate, variable sized block of memory for medium and large sized
sets (much like list storage). None of the fields of this structure should be
- considered public and are subject to change. All access should be done through
+ considered public and all are subject to change. All access should be done through
the documented API rather than by manipulating the values in the structure.
.. c:function:: int PySet_Add(PyObject *set, PyObject *key)
Add *key* to a :class:`set` instance. Also works with :class:`frozenset`
- instances (like :c:func:`PyTuple_SetItem` it can be used to fill-in the values
+ instances (like :c:func:`PyTuple_SetItem` it can be used to fill in the values
of brand new frozensets before they are exposed to other code). Return ``0`` on
success or ``-1`` on failure. Raise a :exc:`TypeError` if the *key* is
unhashable. Raise a :exc:`MemoryError` if there is no room to grow. Raise a
+=============+==================+===================================+
| name | const char \* | attribute name |
+-------------+------------------+-----------------------------------+
- | get | getter | C Function to get the attribute |
+ | get | getter | C function to get the attribute |
+-------------+------------------+-----------------------------------+
| set | setter | optional C function to set or |
| | | delete the attribute, if omitted |
Return a pointer to a newly allocated byte string, use :c:func:`PyMem_Free`
to free the memory. Return ``NULL`` on encoding error or memory allocation
- error
+ error.
If error_pos is not ``NULL``, ``*error_pos`` is set to ``(size_t)-1`` on
success, or set to the index of the invalid character on encoding error.
.. versionchanged:: 3.8
The function now uses the UTF-8 encoding on Windows if
- :c:data:`Py_LegacyWindowsFSEncodingFlag` is zero;
+ :c:data:`Py_LegacyWindowsFSEncodingFlag` is zero.
.. _systemfunctions:
leaks.)
Note that ``#`` format characters should always be treated as
- ``Py_ssize_t``, regardless of whether ``PY_SSIZE_T_CLEAN`` was defined.
+ :c:type:`Py_ssize_t`, regardless of whether ``PY_SSIZE_T_CLEAN`` was defined.
:func:`sys.audit` performs the same function from Python code.
.. versionchanged:: 3.8.2
- Require ``Py_ssize_t`` for ``#`` format characters. Previously, an
+ Require :c:type:`Py_ssize_t` for ``#`` format characters. Previously, an
unavoidable deprecation warning was raised.
.. c:function:: int PySys_AddAuditHook(Py_AuditHookFunction hook, void *userData)
Append the callable *hook* to the list of active auditing hooks.
- Return zero for success
+ Return zero on success
and non-zero on failure. If the runtime has been initialized, also set an
error on failure. Hooks added through this API are called for all
interpreters created by the runtime.
.. versionchanged:: 3.9
- Slots in :c:type:`PyBufferProcs` in may be set in the unlimited API.
+ Slots in :c:type:`PyBufferProcs` may be set in the unlimited API.
.. c:member:: void *PyType_Slot.pfunc
+================================================+===================================+===================+===+===+===+===+
| <R> :c:member:`~PyTypeObject.tp_name` | const char * | __name__ | X | X | | |
+------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+
- | :c:member:`~PyTypeObject.tp_basicsize` | Py_ssize_t | | X | X | | X |
+ | :c:member:`~PyTypeObject.tp_basicsize` | :c:type:`Py_ssize_t` | | X | X | | X |
+------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+
- | :c:member:`~PyTypeObject.tp_itemsize` | Py_ssize_t | | | X | | X |
+ | :c:member:`~PyTypeObject.tp_itemsize` | :c:type:`Py_ssize_t` | | | X | | X |
+------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+
| :c:member:`~PyTypeObject.tp_dealloc` | :c:type:`destructor` | | X | X | | X |
+------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+
- | :c:member:`~PyTypeObject.tp_vectorcall_offset` | Py_ssize_t | | | X | | X |
+ | :c:member:`~PyTypeObject.tp_vectorcall_offset` | :c:type:`Py_ssize_t` | | | X | | X |
+------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+
| (:c:member:`~PyTypeObject.tp_getattr`) | :c:type:`getattrfunc` | __getattribute__, | | | | G |
| | | __getattr__ | | | | |
| | | __gt__, | | | | |
| | | __ge__ | | | | |
+------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+
- | :c:member:`~PyTypeObject.tp_weaklistoffset` | Py_ssize_t | | | X | | ? |
+ | :c:member:`~PyTypeObject.tp_weaklistoffset` | :c:type:`Py_ssize_t` | | | X | | ? |
+------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+
| :c:member:`~PyTypeObject.tp_iter` | :c:type:`getiterfunc` | __iter__ | | | | X |
+------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+
| :c:member:`~PyTypeObject.tp_descr_set` | :c:type:`descrsetfunc` | __set__, | | | | X |
| | | __delete__ | | | | |
+------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+
- | :c:member:`~PyTypeObject.tp_dictoffset` | Py_ssize_t | | | X | | ? |
+ | :c:member:`~PyTypeObject.tp_dictoffset` | :c:type:`Py_ssize_t` | | | X | | ? |
+------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+
| :c:member:`~PyTypeObject.tp_init` | :c:type:`initproc` | __init__ | X | X | | X |
+------------------------------------------------+-----------------------------------+-------------------+---+---+---+---+
| :c:type:`allocfunc` | .. line-block:: | :c:type:`PyObject` * |
| | | |
| | :c:type:`PyTypeObject` * | |
-| | Py_ssize_t | |
+| | :c:type:`Py_ssize_t` | |
+-----------------------------+-----------------------------+----------------------+
| :c:type:`destructor` | void * | void |
+-----------------------------+-----------------------------+----------------------+
+-----------------------------+-----------------------------+----------------------+
| :c:type:`iternextfunc` | :c:type:`PyObject` * | :c:type:`PyObject` * |
+-----------------------------+-----------------------------+----------------------+
-| :c:type:`lenfunc` | :c:type:`PyObject` * | Py_ssize_t |
+| :c:type:`lenfunc` | :c:type:`PyObject` * | :c:type:`Py_ssize_t` |
+-----------------------------+-----------------------------+----------------------+
| :c:type:`getbufferproc` | .. line-block:: | int |
| | | |
| :c:type:`ssizeargfunc` | .. line-block:: | :c:type:`PyObject` * |
| | | |
| | :c:type:`PyObject` * | |
-| | Py_ssize_t | |
+| | :c:type:`Py_ssize_t` | |
+-----------------------------+-----------------------------+----------------------+
| :c:type:`ssizeobjargproc` | .. line-block:: | int |
| | | |
| | :c:type:`PyObject` * | |
-| | Py_ssize_t | |
+| | :c:type:`Py_ssize_t` | |
+-----------------------------+-----------------------------+----------------------+
| :c:type:`objobjproc` | .. line-block:: | int |
| | | |
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "mymod.MyObject",
.tp_basicsize = sizeof(MyObject),
- .tp_doc = "My objects",
+ .tp_doc = PyDoc_STR("My objects"),
.tp_new = myobj_new,
.tp_dealloc = (destructor)myobj_dealloc,
.tp_repr = (reprfunc)myobj_repr,
0, /* tp_setattro */
0, /* tp_as_buffer */
0, /* tp_flags */
- "My objects", /* tp_doc */
+ PyDoc_STR("My objects"), /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "mymod.MyObject",
.tp_basicsize = sizeof(MyObject),
- .tp_doc = "My objects",
+ .tp_doc = PyDoc_STR("My objects"),
.tp_weaklistoffset = offsetof(MyObject, weakreflist),
.tp_dictoffset = offsetof(MyObject, inst_dict),
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
.tp_name = "mymod.MyStr",
.tp_basicsize = sizeof(MyStr),
.tp_base = NULL, // set to &PyUnicode_Type in module init
- .tp_doc = "my custom str",
+ .tp_doc = PyDoc_STR("my custom str"),
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION,
.tp_repr = (reprfunc)myobj_repr,
};
| :attr:`%llu` | unsigned long long | Equivalent to |
| | | ``printf("%llu")``. [1]_ |
+-------------------+---------------------+----------------------------------+
- | :attr:`%zd` | Py_ssize_t | Equivalent to |
- | | | ``printf("%zd")``. [1]_ |
+ | :attr:`%zd` | :c:type:`\ | Equivalent to |
+ | | Py_ssize_t` | ``printf("%zd")``. [1]_ |
+-------------------+---------------------+----------------------------------+
- | :attr:`%zi` | Py_ssize_t | Equivalent to |
- | | | ``printf("%zi")``. [1]_ |
+ | :attr:`%zi` | :c:type:`\ | Equivalent to |
+ | | Py_ssize_t` | ``printf("%zi")``. [1]_ |
+-------------------+---------------------+----------------------------------+
| :attr:`%zu` | size_t | Equivalent to |
| | | ``printf("%zu")``. [1]_ |
This caches the UTF-8 representation of the string in the Unicode object, and
subsequent calls will return a pointer to the same buffer. The caller is not
- responsible for deallocating the buffer.
+ responsible for deallocating the buffer. The buffer is deallocated and
+ pointers to it become invalid when the Unicode object is garbage collected.
.. versionadded:: 3.3
This is what a Custom object will contain. ``PyObject_HEAD`` is mandatory
at the start of each object struct and defines a field called ``ob_base``
of type :c:type:`PyObject`, containing a pointer to a type object and a
-reference count (these can be accessed using the macros :c:macro:`Py_REFCNT`
-and :c:macro:`Py_TYPE` respectively). The reason for the macro is to
+reference count (these can be accessed using the macros :c:macro:`Py_TYPE`
+and :c:macro:`Py_REFCNT` respectively). The reason for the macro is to
abstract away the layout and to enable additional fields in :ref:`debug builds
<debug-build>`.
static PyTypeObject CustomType = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "custom.Custom",
- .tp_doc = "Custom objects",
+ .tp_doc = PyDoc_STR("Custom objects"),
.tp_basicsize = sizeof(CustomObject),
.tp_itemsize = 0,
.tp_flags = Py_TPFLAGS_DEFAULT,
We provide a doc string for the type in :c:member:`~PyTypeObject.tp_doc`. ::
- .tp_doc = "Custom objects",
+ .tp_doc = PyDoc_STR("Custom objects"),
To enable object creation, we have to provide a :c:member:`~PyTypeObject.tp_new`
handler. This is the equivalent of the Python method :meth:`__new__`, but
Windows Python is built in Microsoft Visual C++; using other compilers may or
-may not work (though Borland seems to). The rest of this section is MSVC++
-specific.
+may not work. The rest of this section is MSVC++ specific.
When creating DLLs in Windows, you must pass :file:`pythonXY.lib` to the linker.
To build two DLLs, spam and ni (which uses C functions found in spam), you could
need, adding about 100K to your executable. To get rid of them, use the Project
Settings dialog, Link tab, to specify *ignore default libraries*. Add the
correct :file:`msvcrtxx.lib` to the list of libraries.
-
`Cython <http://cython.org/>`_ compiles a modified version of Python with
optional annotations into C extensions. `Nuitka <http://www.nuitka.net/>`_ is
an up-and-coming compiler of Python into C++ code, aiming to support the full
-Python language. For compiling to Java you can consider
-`VOC <https://voc.readthedocs.io>`_.
+Python language.
How does Python manage memory?
operating systems that only have BSD curses, but there don't seem to be any
currently maintained OSes that fall into this category.
-For Windows: use `the consolelib module
-<http://effbot.org/zone/console-index.htm>`_.
-
Is there an equivalent to C's onexit() in Python?
-------------------------------------------------
Python binary to produce a single executable.
One is to use the freeze tool, which is included in the Python source tree as
-``Tools/freeze``. It converts Python byte code to C arrays; a C compiler you can
+``Tools/freeze``. It converts Python byte code to C arrays; with a C compiler you can
embed all your modules into a new program, which is then linked with the
standard Python modules.
obvious; otherwise, you might need a little more guidance.
Unless you use some sort of integrated development environment, you will end up
-*typing* Windows commands into what is variously referred to as a "DOS window"
-or "Command prompt window". Usually you can create such a window from your
+*typing* Windows commands into what is referred to as a
+"Command prompt window". Usually you can create such a window from your
search bar by searching for ``cmd``. You should be able to recognize
when you have started such a window because you will see a Windows "command
prompt", which usually looks like this:
by the Windows ``GetProcAddress()`` routine. Macros can make using these
pointers transparent to any C code that calls routines in Python's C API.
- Borland note: convert :file:`python{NN}.lib` to OMF format using Coff2Omf.exe
- first.
-
.. XXX what about static linking?
2. If you use SWIG, it is easy to create a Python "extension module" that will
Use the :mod:`msvcrt` module. This is a standard Windows-specific extension module.
It defines a function ``kbhit()`` which checks whether a keyboard hit is
present, and ``getch()`` which gets one character without echoing it.
-
See also :term:`borrowed reference`.
text encoding
- A codec which encodes Unicode strings to bytes.
+ A string in Python is a sequence of Unicode code points (in range
+ ``U+0000``--``U+10FFFF``). To store or transfer a string, it needs to be
+ serialized as a sequence of bytes.
+
+ Serializing a string into a sequence of bytes is known as "encoding", and
+ recreating the string from the sequence of bytes is known as "decoding".
+
+ There are a variety of different text serialization
+ :ref:`codecs <standard-encodings>`, which are collectively referred to as
+ "text encodings".
text file
A :term:`file object` able to read and write :class:`str` objects.
/*[python end generated code: output=da39a3ee5e6b4b0d input=35521e4e733823c7]*/
This block adds a converter to Argument Clinic named ``ssize_t``. Parameters
-declared as ``ssize_t`` will be declared as type ``Py_ssize_t``, and will
+declared as ``ssize_t`` will be declared as type :c:type:`Py_ssize_t`, and will
be parsed by the ``'O&'`` format unit, which will call the
``ssize_t_converter`` converter function. ``ssize_t`` variables
automatically support default values.
The Windows version of Python doesn't include the :mod:`curses`
module. A ported version called `UniCurses
-<https://pypi.org/project/UniCurses>`_ is available. You could
-also try `the Console module <http://effbot.org/zone/console-index.htm>`_
-written by Fredrik Lundh, which doesn't
-use the same API as curses but provides cursor-addressable text output
-and full support for mouse and keyboard input.
+<https://pypi.org/project/UniCurses>`_ is available.
The Python curses module
In addition to :meth:`~generator.send`, there are two other methods on
generators:
-* :meth:`throw(type, value=None, traceback=None) <generator.throw>` is used to
+* :meth:`throw(value) <generator.throw>` is used to
raise an exception inside the generator; the exception is raised by the
``yield`` expression where the generator's execution is paused.
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level, ...)
-The call to :func:`basicConfig` should come *before* any calls to :func:`debug`,
-:func:`info` etc. As it's intended as a one-off simple configuration facility,
-only the first call will actually do anything: subsequent calls are effectively
-no-ops.
+The call to :func:`basicConfig` should come *before* any calls to
+:func:`debug`, :func:`info`, etc. Otherwise, those functions will call
+:func:`basicConfig` for you with the default options. As it's intended as a
+one-off simple configuration facility, only the first call will actually do
+anything: subsequent calls are effectively no-ops.
If you run the above script several times, the messages from successive runs
are appended to the file *example.log*. If you want each run to start afresh,
characters. If you wanted to match only lowercase letters, your RE would be
``[a-z]``.
-Metacharacters are not active inside classes. For example, ``[akm$]`` will
+Metacharacters (except ``\``) are not active inside classes. For example, ``[akm$]`` will
match any of the characters ``'a'``, ``'k'``, ``'m'``, or ``'$'``; ``'$'`` is
usually a metacharacter, but inside a character class it's stripped of its
special nature.
>>> standard_way
[('red', 1), ('red', 2), ('blue', 1), ('blue', 2)]
-* The sort routines are guaranteed to use :meth:`__lt__` when making comparisons
+* The sort routines use ``<`` when making comparisons
between two objects. So, it is easy to add a standard sort order to a class by
defining an :meth:`__lt__` method:
>>> sorted(student_objects)
[('dave', 'B', 10), ('jane', 'B', 12), ('john', 'A', 15)]
+ However, note that ``<`` can fall back to using :meth:`__gt__` if
+ :meth:`__lt__` is not implemented (see :func:`object.__lt__`).
+
* Key functions need not depend directly on the objects being sorted. A key
function can also access external resources. For instance, if the student grades
are stored in a dictionary, they can be used to sort a separate list of student
static PyTypeObject CustomType = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "custom.Custom",
- .tp_doc = "Custom objects",
+ .tp_doc = PyDoc_STR("Custom objects"),
.tp_basicsize = sizeof(CustomObject),
.tp_itemsize = 0,
.tp_flags = Py_TPFLAGS_DEFAULT,
static PyTypeObject CustomType = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "custom2.Custom",
- .tp_doc = "Custom objects",
+ .tp_doc = PyDoc_STR("Custom objects"),
.tp_basicsize = sizeof(CustomObject),
.tp_itemsize = 0,
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
static PyTypeObject CustomType = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "custom3.Custom",
- .tp_doc = "Custom objects",
+ .tp_doc = PyDoc_STR("Custom objects"),
.tp_basicsize = sizeof(CustomObject),
.tp_itemsize = 0,
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
static PyTypeObject CustomType = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "custom4.Custom",
- .tp_doc = "Custom objects",
+ .tp_doc = PyDoc_STR("Custom objects"),
.tp_basicsize = sizeof(CustomObject),
.tp_itemsize = 0,
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
static PyTypeObject SubListType = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "sublist.SubList",
- .tp_doc = "SubList objects",
+ .tp_doc = PyDoc_STR("SubList objects"),
.tp_basicsize = sizeof(SubListObject),
.tp_itemsize = 0,
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
| generator_stop | 3.5.0b1 | 3.7 | :pep:`479`: |
| | | | *StopIteration handling inside generators* |
+------------------+-------------+--------------+---------------------------------------------+
-| annotations | 3.7.0b1 | 3.11 | :pep:`563`: |
+| annotations | 3.7.0b1 | TBD [1]_ | :pep:`563`: |
| | | | *Postponed evaluation of annotations* |
+------------------+-------------+--------------+---------------------------------------------+
.. XXX Adding a new entry? Remember to update simple_stmts.rst, too.
+.. [1]
+ ``from __future__ import annotations`` was previously scheduled to
+ become mandatory in Python 3.10, but the Python Steering Council
+ twice decided to delay the change
+ (`announcement for Python 3.10 <https://mail.python.org/archives/list/python-dev@python.org/message/CLVXXPQ2T2LQ5MP2Y53VVQFCXYWQJHKZ/>`__;
+ `announcement for Python 3.11 <https://mail.python.org/archives/list/python-dev@python.org/message/VIZEBX5EYMSYIJNDBF6DMUMZOCWHARSO/>`__).
+ No final decision has been made yet. See also :pep:`563` and :pep:`649`.
+
.. seealso::
.. deprecated:: 3.11
- The :mod:`aifc` module is deprecated (see :pep:`594` for details).
+ The :mod:`aifc` module is deprecated
+ (see :pep:`PEP 594 <594#aifc>` for details).
--------------
fromfile_prefix_chars
^^^^^^^^^^^^^^^^^^^^^
-Sometimes, for example when dealing with a particularly long argument lists, it
+Sometimes, for example when dealing with a particularly long argument list, it
may make sense to keep the list of arguments in a file rather than typing it out
at the command line. If the ``fromfile_prefix_chars=`` argument is given to the
:class:`ArgumentParser` constructor, then arguments that start with any of the
.. method:: ArgumentParser.add_subparsers([title], [description], [prog], \
[parser_class], [action], \
- [option_string], [dest], [required], \
+ [option_strings], [dest], [required], \
[help], [metavar])
Many programs split up their functionality into a number of sub-commands,
**Source code:** :source:`Lib/asynchat.py`
.. deprecated:: 3.6
- :mod:`asynchat` will be removed in Python 3.12 (:pep:`594`).
+ :mod:`asynchat` will be removed in Python 3.12
+ (see :pep:`PEP 594 <594#asynchat>` for details).
Please use :mod:`asyncio` instead.
--------------
=========
.. data:: asyncio.subprocess.PIPE
+ :module:
Can be passed to the *stdin*, *stdout* or *stderr* parameters.
attributes will point to :class:`StreamReader` instances.
.. data:: asyncio.subprocess.STDOUT
+ :module:
Special value that can be used as the *stderr* argument and indicates
that standard error should be redirected into standard output.
.. data:: asyncio.subprocess.DEVNULL
+ :module:
Special value that can be used as the *stdin*, *stdout* or *stderr* argument
to process creation functions. It indicates that the special file
their completion.
.. class:: asyncio.subprocess.Process
+ :module:
An object that wraps OS processes created by the
:func:`create_subprocess_exec` and :func:`create_subprocess_shell`
Use the :meth:`communicate` method rather than
:attr:`process.stdin.write() <stdin>`,
:attr:`await process.stdout.read() <stdout>` or
- :attr:`await process.stderr.read <stderr>`.
+ :attr:`await process.stderr.read() <stderr>`.
This avoids deadlocks due to streams pausing reading or writing
and blocking the child process.
:term:`Coroutines <coroutine>` declared with the async/await syntax is the
preferred way of writing asyncio applications. For example, the following
-snippet of code (requires Python 3.7+) prints "hello", waits 1 second,
+snippet of code prints "hello", waits 1 second,
and then prints "world"::
>>> import asyncio
:exc:`RuntimeError` is raised if there is no running loop in
current thread.
- This function has been **added in Python 3.7**. Prior to
- Python 3.7, the low-level :func:`asyncio.ensure_future` function
- can be used instead::
-
- async def coro():
- ...
-
- # In Python 3.7+
- task = asyncio.create_task(coro())
- ...
-
- # This works in all Python versions but is less readable
- task = asyncio.ensure_future(coro())
- ...
-
.. important::
Save a reference to the result of this function, to avoid
await asyncio.sleep(1)
print('... World!')
- # Python 3.7+
asyncio.run(main())
asyncio is a library to write **concurrent** code using
**Source code:** :source:`Lib/asyncore.py`
.. deprecated:: 3.6
- :mod:`asyncore` will be removed in Python 3.12 (:pep:`594`).
+ :mod:`asyncore` will be removed in Python 3.12
+ (see :pep:`PEP 594 <594#asyncore>` for details).
Please use :mod:`asyncio` instead.
--------------
:deprecated:
.. deprecated:: 3.11
- The :mod:`audioop` module is deprecated (see :pep:`594` for details).
+ The :mod:`audioop` module is deprecated
+ (see :pep:`PEP 594 <594#audioop>` for details).
--------------
``all(val >= x for val in a[i : hi])`` for the right side.
*key* specifies a :term:`key function` of one argument that is used to
- extract a comparison key from each input element. The default value is
- ``None`` (compare the elements directly).
+ extract a comparison key from each element in the array. To support
+ searching complex records, the key function is not applied to the *x* value.
+
+ If *key* is ``None``, the elements are compared directly with no
+ intervening function call.
.. versionchanged:: 3.10
Added the *key* parameter.
``all(val > x for val in a[i : hi])`` for the right side.
*key* specifies a :term:`key function` of one argument that is used to
- extract a comparison key from each input element. The default value is
- ``None`` (compare the elements directly).
+ extract a comparison key from each element in the array. To support
+ searching complex records, the key function is not applied to the *x* value.
+
+ If *key* is ``None``, the elements are compared directly with no
+ intervening function call.
.. versionchanged:: 3.10
Added the *key* parameter.
Insert *x* in *a* in sorted order.
- *key* specifies a :term:`key function` of one argument that is used to
- extract a comparison key from each input element. The default value is
- ``None`` (compare the elements directly).
-
This function first runs :func:`bisect_left` to locate an insertion point.
Next, it runs the :meth:`insert` method on *a* to insert *x* at the
appropriate position to maintain sort order.
+ To support inserting records in a table, the *key* function (if any) is
+ applied to *x* for the search step but not for the insertion step.
+
Keep in mind that the ``O(log n)`` search is dominated by the slow O(n)
insertion step.
Similar to :func:`insort_left`, but inserting *x* in *a* after any existing
entries of *x*.
- *key* specifies a :term:`key function` of one argument that is used to
- extract a comparison key from each input element. The default value is
- ``None`` (compare the elements directly).
-
This function first runs :func:`bisect_right` to locate an insertion point.
Next, it runs the :meth:`insert` method on *a* to insert *x* at the
appropriate position to maintain sort order.
+ To support inserting records in a table, the *key* function (if any) is
+ applied to *x* for the search step but not for the insertion step.
+
Keep in mind that the ``O(log n)`` search is dominated by the slow O(n)
insertion step.
>>> [grade(score) for score in [33, 99, 77, 70, 89, 90, 100]]
['F', 'A', 'C', 'C', 'B', 'A', 'A']
-One technique to avoid repeated calls to a key function is to search a list of
-precomputed keys to find the index of a record::
+The :func:`bisect` and :func:`insort` functions also work with lists of
+tuples. The *key* argument can serve to extract the field used for ordering
+records in a table::
+
+ >>> from collections import namedtuple
+ >>> from operator import attrgetter
+ >>> from bisect import bisect, insort
+ >>> from pprint import pprint
+
+ >>> Movie = namedtuple('Movie', ('name', 'released', 'director'))
+
+ >>> movies = [
+ ... Movie('Jaws', 1975, 'Speilberg'),
+ ... Movie('Titanic', 1997, 'Cameron'),
+ ... Movie('The Birds', 1963, 'Hitchcock'),
+ ... Movie('Aliens', 1986, 'Scott')
+ ... ]
+
+ >>> # Find the first movie released on or after 1960
+ >>> by_year = attrgetter('released')
+ >>> movies.sort(key=by_year)
+ >>> movies[bisect(movies, 1960, key=by_year)]
+ Movie(name='The Birds', released=1963, director='Hitchcock')
+
+ >>> # Insert a movie while maintaining sort order
+ >>> romance = Movie('Love Story', 1970, 'Hiller')
+ >>> insort(movies, romance, key=by_year)
+ >>> pprint(movies)
+ [Movie(name='The Birds', released=1963, director='Hitchcock'),
+ Movie(name='Love Story', released=1970, director='Hiller'),
+ Movie(name='Jaws', released=1975, director='Speilberg'),
+ Movie(name='Aliens', released=1986, director='Scott'),
+ Movie(name='Titanic', released=1997, director='Cameron')]
+
+If the key function is expensive, it is possible to avoid repeated function
+calls by searching a list of precomputed keys to find the index of a record::
>>> data = [('red', 5), ('blue', 1), ('yellow', 8), ('black', 0)]
>>> data.sort(key=lambda r: r[1]) # Or use operator.itemgetter(1).
('red', 5)
>>> data[bisect_left(keys, 8)]
('yellow', 8)
-
single: Common Gateway Interface
.. deprecated:: 3.11
- The :mod:`cgi` module is deprecated (see :pep:`594` for details).
+ The :mod:`cgi` module is deprecated
+ (see :pep:`PEP 594 <594#cgi>` for details and alternatives).
--------------
single: tracebacks; in CGI scripts
.. deprecated:: 3.11
- The :mod:`cgitb` module is deprecated (see :pep:`594` for details).
+ The :mod:`cgitb` module is deprecated
+ (see :pep:`PEP 594 <594#cgitb>` for details).
--------------
single: RMFF
.. deprecated:: 3.11
- The :mod:`chunk` module is deprecated (see :pep:`594` for details).
+ The :mod:`chunk` module is deprecated
+ (see :pep:`PEP 594 <594#chunk>` for details).
--------------
This module defines base classes for standard Python codecs (encoders and
decoders) and provides access to the internal Python codec registry, which
manages the codec and error handling lookup process. Most standard codecs
-are :term:`text encodings <text encoding>`, which encode text to bytes,
-but there are also codecs provided that encode text to text, and bytes to
-bytes. Custom codecs may encode and decode between arbitrary types, but some
-module features are restricted to use specifically with
-:term:`text encodings <text encoding>`, or with codecs that encode to
+are :term:`text encodings <text encoding>`, which encode text to bytes (and
+decode bytes to text), but there are also codecs provided that encode text to
+text, and bytes to bytes. Custom codecs may encode and decode between arbitrary
+types, but some module features are restricted to be used specifically with
+:term:`text encodings <text encoding>` or with codecs that encode to
:class:`bytes`.
The module defines the following functions for encoding and decoding with
Error Handlers
^^^^^^^^^^^^^^
-To simplify and standardize error handling,
-codecs may implement different error handling schemes by
-accepting the *errors* string argument. The following string values are
-defined and implemented by all standard Python codecs:
+To simplify and standardize error handling, codecs may implement different
+error handling schemes by accepting the *errors* string argument:
-.. tabularcolumns:: |l|L|
-
-+-------------------------+-----------------------------------------------+
-| Value | Meaning |
-+=========================+===============================================+
-| ``'strict'`` | Raise :exc:`UnicodeError` (or a subclass); |
-| | this is the default. Implemented in |
-| | :func:`strict_errors`. |
-+-------------------------+-----------------------------------------------+
-| ``'ignore'`` | Ignore the malformed data and continue |
-| | without further notice. Implemented in |
-| | :func:`ignore_errors`. |
-+-------------------------+-----------------------------------------------+
-
-The following error handlers are only applicable to
-:term:`text encodings <text encoding>`:
+ >>> 'German ß, ♬'.encode(encoding='ascii', errors='backslashreplace')
+ b'German \\xdf, \\u266c'
+ >>> 'German ß, ♬'.encode(encoding='ascii', errors='xmlcharrefreplace')
+ b'German ß, ♬'
.. index::
+ pair: strict; error handler's name
+ pair: ignore; error handler's name
+ pair: replace; error handler's name
+ pair: backslashreplace; error handler's name
+ pair: surrogateescape; error handler's name
single: ? (question mark); replacement character
single: \ (backslash); escape sequence
single: \x; escape sequence
single: \u; escape sequence
single: \U; escape sequence
- single: \N; escape sequence
+
+The following error handlers can be used with all Python
+:ref:`standard-encodings` codecs:
+
+.. tabularcolumns:: |l|L|
+-------------------------+-----------------------------------------------+
| Value | Meaning |
+=========================+===============================================+
-| ``'replace'`` | Replace with a suitable replacement |
-| | marker; Python will use the official |
-| | ``U+FFFD`` REPLACEMENT CHARACTER for the |
-| | built-in codecs on decoding, and '?' on |
-| | encoding. Implemented in |
-| | :func:`replace_errors`. |
+| ``'strict'`` | Raise :exc:`UnicodeError` (or a subclass), |
+| | this is the default. Implemented in |
+| | :func:`strict_errors`. |
+-------------------------+-----------------------------------------------+
-| ``'xmlcharrefreplace'`` | Replace with the appropriate XML character |
-| | reference (only for encoding). Implemented |
-| | in :func:`xmlcharrefreplace_errors`. |
+| ``'ignore'`` | Ignore the malformed data and continue without|
+| | further notice. Implemented in |
+| | :func:`ignore_errors`. |
++-------------------------+-----------------------------------------------+
+| ``'replace'`` | Replace with a replacement marker. On |
+| | encoding, use ``?`` (ASCII character). On |
+| | decoding, use ``�`` (U+FFFD, the official |
+| | REPLACEMENT CHARACTER). Implemented in |
+| | :func:`replace_errors`. |
+-------------------------+-----------------------------------------------+
| ``'backslashreplace'`` | Replace with backslashed escape sequences. |
+| | On encoding, use hexadecimal form of Unicode |
+| | code point with formats ``\xhh`` ``\uxxxx`` |
+| | ``\Uxxxxxxxx``. On decoding, use hexadecimal |
+| | form of byte value with format ``\xhh``. |
| | Implemented in |
| | :func:`backslashreplace_errors`. |
+-------------------------+-----------------------------------------------+
-| ``'namereplace'`` | Replace with ``\N{...}`` escape sequences |
-| | (only for encoding). Implemented in |
-| | :func:`namereplace_errors`. |
-+-------------------------+-----------------------------------------------+
| ``'surrogateescape'`` | On decoding, replace byte with individual |
| | surrogate code ranging from ``U+DC80`` to |
| | ``U+DCFF``. This code will then be turned |
| | more.) |
+-------------------------+-----------------------------------------------+
+.. index::
+ pair: xmlcharrefreplace; error handler's name
+ pair: namereplace; error handler's name
+ single: \N; escape sequence
+
+The following error handlers are only applicable to encoding (within
+:term:`text encodings <text encoding>`):
+
++-------------------------+-----------------------------------------------+
+| Value | Meaning |
++=========================+===============================================+
+| ``'xmlcharrefreplace'`` | Replace with XML/HTML numeric character |
+| | reference, which is a decimal form of Unicode |
+| | code point with format ``&#num;`` Implemented |
+| | in :func:`xmlcharrefreplace_errors`. |
++-------------------------+-----------------------------------------------+
+| ``'namereplace'`` | Replace with ``\N{...}`` escape sequences, |
+| | what appears in the braces is the Name |
+| | property from Unicode Character Database. |
+| | Implemented in :func:`namereplace_errors`. |
++-------------------------+-----------------------------------------------+
+
+.. index::
+ pair: surrogatepass; error handler's name
+
In addition, the following error handler is specific to the given codecs:
+-------------------+------------------------+-------------------------------------------+
| Value | Codecs | Meaning |
+===================+========================+===========================================+
-|``'surrogatepass'``| utf-8, utf-16, utf-32, | Allow encoding and decoding of surrogate |
-| | utf-16-be, utf-16-le, | codes. These codecs normally treat the |
-| | utf-32-be, utf-32-le | presence of surrogates as an error. |
+|``'surrogatepass'``| utf-8, utf-16, utf-32, | Allow encoding and decoding surrogate code|
+| | utf-16-be, utf-16-le, | point (``U+D800`` - ``U+DFFF``) as normal |
+| | utf-32-be, utf-32-le | code point. Otherwise these codecs treat |
+| | | the presence of surrogate code point in |
+| | | :class:`str` as an error. |
+-------------------+------------------------+-------------------------------------------+
.. versionadded:: 3.1
The ``'surrogateescape'`` and ``'surrogatepass'`` error handlers.
.. versionchanged:: 3.4
- The ``'surrogatepass'`` error handlers now works with utf-16\* and utf-32\* codecs.
+ The ``'surrogatepass'`` error handler now works with utf-16\* and utf-32\*
+ codecs.
.. versionadded:: 3.5
The ``'namereplace'`` error handler.
.. versionchanged:: 3.5
- The ``'backslashreplace'`` error handlers now works with decoding and
+ The ``'backslashreplace'`` error handler now works with decoding and
translating.
The set of allowed values can be extended by registering a new named error
.. function:: strict_errors(exception)
- Implements the ``'strict'`` error handling: each encoding or
- decoding error raises a :exc:`UnicodeError`.
+ Implements the ``'strict'`` error handling.
+ Each encoding or decoding error raises a :exc:`UnicodeError`.
-.. function:: replace_errors(exception)
- Implements the ``'replace'`` error handling (for :term:`text encodings
- <text encoding>` only): substitutes ``'?'`` for encoding errors
- (to be encoded by the codec), and ``'\ufffd'`` (the Unicode replacement
- character) for decoding errors.
+.. function:: ignore_errors(exception)
+ Implements the ``'ignore'`` error handling.
-.. function:: ignore_errors(exception)
+ Malformed data is ignored; encoding or decoding is continued without
+ further notice.
- Implements the ``'ignore'`` error handling: malformed data is ignored and
- encoding or decoding is continued without further notice.
+.. function:: replace_errors(exception)
-.. function:: xmlcharrefreplace_errors(exception)
+ Implements the ``'replace'`` error handling.
- Implements the ``'xmlcharrefreplace'`` error handling (for encoding with
- :term:`text encodings <text encoding>` only): the
- unencodable character is replaced by an appropriate XML character reference.
+ Substitutes ``?`` (ASCII character) for encoding errors or ``�`` (U+FFFD,
+ the official REPLACEMENT CHARACTER) for decoding errors.
.. function:: backslashreplace_errors(exception)
- Implements the ``'backslashreplace'`` error handling (for
- :term:`text encodings <text encoding>` only): malformed data is
- replaced by a backslashed escape sequence.
+ Implements the ``'backslashreplace'`` error handling.
+
+ Malformed data is replaced by a backslashed escape sequence.
+ On encoding, use the hexadecimal form of Unicode code point with formats
+ ``\xhh`` ``\uxxxx`` ``\Uxxxxxxxx``. On decoding, use the hexadecimal form of
+ byte value with format ``\xhh``.
+
+ .. versionchanged:: 3.5
+ Works with decoding and translating.
+
+
+.. function:: xmlcharrefreplace_errors(exception)
+
+ Implements the ``'xmlcharrefreplace'`` error handling (for encoding within
+ :term:`text encoding` only).
+
+ The unencodable character is replaced by an appropriate XML/HTML numeric
+ character reference, which is a decimal form of Unicode code point with
+ format ``&#num;`` .
+
.. function:: namereplace_errors(exception)
- Implements the ``'namereplace'`` error handling (for encoding with
- :term:`text encodings <text encoding>` only): the
- unencodable character is replaced by a ``\N{...}`` escape sequence.
+ Implements the ``'namereplace'`` error handling (for encoding within
+ :term:`text encoding` only).
+
+ The unencodable character is replaced by a ``\N{...}`` escape sequence. The
+ set of characters that appear in the braces is the Name property from
+ Unicode Character Database. For example, the German lowercase letter ``'ß'``
+ will be converted to byte sequence ``\N{LATIN SMALL LETTER SHARP S}`` .
.. versionadded:: 3.5
function interfaces of the stateless encoder and decoder:
-.. method:: Codec.encode(input[, errors])
+.. method:: Codec.encode(input, errors='strict')
Encodes the object *input* and returns a tuple (output object, length consumed).
For instance, :term:`text encoding` converts
of the output object type in this situation.
-.. method:: Codec.decode(input[, errors])
+.. method:: Codec.decode(input, errors='strict')
Decodes the object *input* and returns a tuple (output object, length
consumed). For instance, for a :term:`text encoding`, decoding converts
object.
- .. method:: encode(object[, final])
+ .. method:: encode(object, final=False)
Encodes *object* (taking the current state of the encoder into account)
and returns the resulting encoded object. If this is the last call to
object.
- .. method:: decode(object[, final])
+ .. method:: decode(object, final=False)
Decodes *object* (taking the current state of the decoder into account)
and returns the resulting decoded object. If this is the last call to
:func:`register_error`.
- .. method:: read([size[, chars, [firstline]]])
+ .. method:: read(size=-1, chars=-1, firstline=False)
Decodes data from the stream and returns the resulting object.
available on the stream, these should be read too.
- .. method:: readline([size[, keepends]])
+ .. method:: readline(size=None, keepends=True)
Read one line from the input stream and return the decoded data.
returned.
- .. method:: readlines([sizehint[, keepends]])
+ .. method:: readlines(sizehint=None, keepends=True)
Read all lines available on the input stream and return them as a list of
lines.
---------------------
Strings are stored internally as sequences of code points in
-range ``0x0``--``0x10FFFF``. (See :pep:`393` for
+range ``U+0000``--``U+10FFFF``. (See :pep:`393` for
more details about the implementation.)
Once a string object is used outside of CPU and memory, endianness
and how these arrays are stored as bytes become an issue. As with other
``U+FEFF``. This character can be prepended to every ``UTF-16`` or ``UTF-32``
byte sequence. The byte swapped version of this character (``0xFFFE``) is an
illegal character that may not appear in a Unicode text. So when the
-first character in an ``UTF-16`` or ``UTF-32`` byte sequence
+first character in a ``UTF-16`` or ``UTF-32`` byte sequence
appears to be a ``U+FFFE`` the bytes have to be swapped on decoding.
Unfortunately the character ``U+FEFF`` had a second purpose as
a ``ZERO WIDTH NO-BREAK SPACE``: a character that has no width and doesn't allow
decode any random byte sequence. However that's not possible with UTF-8, as
UTF-8 byte sequences have a structure that doesn't allow arbitrary byte
sequences. To increase the reliability with which a UTF-8 encoding can be
-detected, Microsoft invented a variant of UTF-8 (that Python 2.5 calls
+detected, Microsoft invented a variant of UTF-8 (that Python calls
``"utf-8-sig"``) for its Notepad program: Before any of the Unicode characters
is written to the file, a UTF-8 encoded BOM (which looks like this as a byte
sequence: ``0xef``, ``0xbb``, ``0xbf``) is written. As it's rather improbable
and :mod:`stringprep`.
If you need the IDNA 2008 standard from :rfc:`5891` and :rfc:`5895`, use the
-third-party `idna module <https://pypi.org/project/idna/>_`.
+third-party `idna module <https://pypi.org/project/idna/>`_.
These RFCs together define a protocol to support non-ASCII characters in domain
names. A domain name containing non-ASCII characters (such as
``page.close()`` will be called when the :keyword:`with` block is exited.
-.. class:: aclosing(thing)
+.. function:: aclosing(thing)
Return an async context manager that calls the ``aclose()`` method of *thing*
upon completion of the block. This is basically equivalent to::
# the with statement, even if attempts to open files later
# in the list raise an exception
+ The :meth:`__enter__` method returns the :class:`ExitStack` instance, and
+ performs no additional operations.
+
Each instance maintains a stack of registered callbacks that are called in
reverse order when the instance is closed (either explicitly or implicitly
at the end of a :keyword:`with` statement). Note that callbacks are *not*
pair: cipher; DES
.. deprecated:: 3.11
- The :mod:`crypt` module is deprecated (see :pep:`594` for details).
+ The :mod:`crypt` module is deprecated
+ (see :pep:`PEP 594 <594#crypt>` for details and alternatives).
+ The :mod:`hashlib` module is a potential replacement for certain use cases.
--------------
:func:`mksalt`, one of the ``crypt.METHOD_*`` values (though not all
may be available on all platforms), or a full encrypted password
including salt, as returned by this function. If *salt* is not
- provided, the strongest method will be used (as returned by
- :func:`methods`).
+ provided, the strongest method available in :attr:`methods` will be used.
Checking a password is usually done by passing the plain-text password
as *word* and the full results of a previous :func:`crypt` call,
.. function:: mksalt(method=None, *, rounds=None)
Return a randomly generated salt of the specified method. If no
- *method* is given, the strongest method available as returned by
- :func:`methods` is used.
+ *method* is given, the strongest method available in :attr:`methods` is
+ used.
The return value is a string suitable for passing as the *salt* argument
to :func:`crypt`.
functions in these libraries use the ``stdcall`` calling convention, and are
assumed to return :c:type:`int` by default.
- On Windows CE only the standard calling convention is used, for convenience the
- :class:`WinDLL` and :class:`OleDLL` use the standard calling convention on this
- platform.
-
The Python :term:`global interpreter lock` is released before calling any
function exported by these libraries, and reacquired afterwards.
.. function:: WINFUNCTYPE(restype, *argtypes, use_errno=False, use_last_error=False)
Windows only: The returned function prototype creates functions that use the
- ``stdcall`` calling convention, except on Windows CE where
- :func:`WINFUNCTYPE` is the same as :func:`CFUNCTYPE`. The function will
+ ``stdcall`` calling convention. The function will
release the GIL during the call. *use_errno* and *use_last_error* have the
same meaning as above.
Abstract base class for arrays.
The recommended way to create concrete array types is by multiplying any
- :mod:`ctypes` data type with a positive integer. Alternatively, you can subclass
+ :mod:`ctypes` data type with a non-negative integer. Alternatively, you can subclass
this type and define :attr:`_length_` and :attr:`_type_` class variables.
Array elements can be read and written using standard
subscript and slice accesses; for slice reads, the resulting object is
>>> Decimal(321).exp()
Decimal('2.561702493119680037517373933E+139')
- .. method:: from_float(f)
+ .. classmethod:: from_float(f)
- Classmethod that converts a float to a decimal number, exactly.
+ Alternative constructor that only accepts instances of :class:`float` or
+ :class:`int`.
Note `Decimal.from_float(0.1)` is not the same as `Decimal('0.1')`.
Since 0.1 is not exactly representable in binary floating point, the
Any classes found are recursively searched similarly, to test docstrings in
their contained methods and nested classes.
-.. impl-detail::
- Prior to version 3.4, extension modules written in C were not fully
- searched by doctest.
-
.. _doctest-finding-examples:
.. data:: IGNORE_EXCEPTION_DETAIL
- When specified, an example that expects an exception passes if an exception of
- the expected type is raised, even if the exception detail does not match. For
- example, an example expecting ``ValueError: 42`` will pass if the actual
- exception raised is ``ValueError: 3*14``, but will fail, e.g., if
- :exc:`TypeError` is raised.
+ When specified, doctests expecting exceptions pass so long as an exception
+ of the expected type is raised, even if the details
+ (message and fully-qualified exception name) don't match.
+
+ For example, an example expecting ``ValueError: 42`` will pass if the actual
+ exception raised is ``ValueError: 3*14``, but will fail if, say, a
+ :exc:`TypeError` is raised instead.
+ It will also ignore any fully-qualified name included before the
+ exception class, which can vary between implementations and versions
+ of Python and the code/libraries in use.
+ Hence, all three of these variations will work with the flag specified:
- It will also ignore the module name used in Python 3 doctest reports. Hence
- both of these variations will work with the flag specified, regardless of
- whether the test is run under Python 2.7 or Python 3.2 (or later versions)::
+ .. code-block:: pycon
- >>> raise CustomError('message')
+ >>> raise Exception('message')
Traceback (most recent call last):
- CustomError: message
+ Exception: message
- >>> raise CustomError('message')
+ >>> raise Exception('message')
Traceback (most recent call last):
- my_module.CustomError: message
+ builtins.Exception: message
- Note that :const:`ELLIPSIS` can also be used to ignore the
- details of the exception message, but such a test may still fail based
- on whether or not the module details are printed as part of the
- exception name. Using :const:`IGNORE_EXCEPTION_DETAIL` and the details
- from Python 2.3 is also the only clear way to write a doctest that doesn't
- care about the exception detail yet continues to pass under Python 2.3 or
- earlier (those releases do not support :ref:`doctest directives
- <doctest-directives>` and ignore them as irrelevant comments). For example::
-
- >>> (1, 2)[3] = 'moo'
+ >>> raise Exception('message')
Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- TypeError: object doesn't support item assignment
+ __main__.Exception: message
- passes under Python 2.3 and later Python versions with the flag specified,
- even though the detail
- changed in Python 2.4 to say "does not" instead of "doesn't".
+ Note that :const:`ELLIPSIS` can also be used to ignore the
+ details of the exception message, but such a test may still fail based
+ on whether the module name is present or matches exactly.
.. versionchanged:: 3.2
:const:`IGNORE_EXCEPTION_DETAIL` now also ignores any information relating
An example's doctest directives modify doctest's behavior for that single
example. Use ``+`` to enable the named behavior, or ``-`` to disable it.
-For example, this test passes::
+For example, this test passes:
- >>> print(list(range(20))) # doctest: +NORMALIZE_WHITESPACE
+.. doctest::
+ :no-trim-doctest-flags:
+
+ >>> print(list(range(20))) # doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
Without the directive it would fail, both because the actual output doesn't have
two blanks before the single-digit list elements, and because the actual output
is on a single line. This test also passes, and also requires a directive to do
-so::
+so:
+
+.. doctest::
+ :no-trim-doctest-flags:
- >>> print(list(range(20))) # doctest: +ELLIPSIS
+ >>> print(list(range(20))) # doctest: +ELLIPSIS
[0, 1, ..., 18, 19]
Multiple directives can be used on a single physical line, separated by
-commas::
+commas:
- >>> print(list(range(20))) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
+.. doctest::
+ :no-trim-doctest-flags:
+
+ >>> print(list(range(20))) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[0, 1, ..., 18, 19]
If multiple directive comments are used for a single example, then they are
-combined::
+combined:
+
+.. doctest::
+ :no-trim-doctest-flags:
- >>> print(list(range(20))) # doctest: +ELLIPSIS
- ... # doctest: +NORMALIZE_WHITESPACE
+ >>> print(list(range(20))) # doctest: +ELLIPSIS
+ ... # doctest: +NORMALIZE_WHITESPACE
[0, 1, ..., 18, 19]
As the previous example shows, you can add ``...`` lines to your example
containing only directives. This can be useful when an example is too long for
-a directive to comfortably fit on the same line::
+a directive to comfortably fit on the same line:
+
+.. doctest::
+ :no-trim-doctest-flags:
>>> print(list(range(5)) + list(range(10, 20)) + list(range(30, 40)))
... # doctest: +ELLIPSIS
>>> d
['Harry', 'Hermione']
-.. note::
-
- Before Python 3.6, when printing a dict, Python did not guarantee that
- the key-value pairs was printed in any particular order.
-
There are others, but you get the idea.
-Another bad idea is to print things that embed an object address, like ::
+Another bad idea is to print things that embed an object address, like
+
+.. doctest::
- >>> id(1.0) # certain to fail some of the time
+ >>> id(1.0) # certain to fail some of the time # doctest: +SKIP
7948648
>>> class C: pass
- >>> C() # the default repr() for instances embeds an address
- <__main__.C instance at 0x00AC18F0>
+ >>> C() # the default repr() for instances embeds an address # doctest: +SKIP
+ <C object at 0x00AC18F0>
+
+The :const:`ELLIPSIS` directive gives a nice approach for the last example:
-The :const:`ELLIPSIS` directive gives a nice approach for the last example::
+.. doctest::
+ :no-trim-doctest-flags:
- >>> C() #doctest: +ELLIPSIS
- <__main__.C instance at 0x...>
+ >>> C() # doctest: +ELLIPSIS
+ <C object at 0x...>
Floating-point numbers are also subject to small output variations across
platforms, because Python defers to the platform C library for float formatting,
.. attribute:: header_encoding
If the character set must be encoded before it can be used in an email
- header, this attribute will be set to ``Charset.QP`` (for
- quoted-printable), ``Charset.BASE64`` (for base64 encoding), or
- ``Charset.SHORTEST`` for the shortest of QP or BASE64 encoding. Otherwise,
+ header, this attribute will be set to ``charset.QP`` (for
+ quoted-printable), ``charset.BASE64`` (for base64 encoding), or
+ ``charset.SHORTEST`` for the shortest of QP or BASE64 encoding. Otherwise,
it will be ``None``.
Same as *header_encoding*, but describes the encoding for the mail
message's body, which indeed may be different than the header encoding.
- ``Charset.SHORTEST`` is not allowed for *body_encoding*.
+ ``charset.SHORTEST`` is not allowed for *body_encoding*.
.. attribute:: output_charset
*charset* is the input character set, and must be the canonical name of a
character set.
- Optional *header_enc* and *body_enc* is either ``Charset.QP`` for
- quoted-printable, ``Charset.BASE64`` for base64 encoding,
- ``Charset.SHORTEST`` for the shortest of quoted-printable or base64 encoding,
+ Optional *header_enc* and *body_enc* is either ``charset.QP`` for
+ quoted-printable, ``charset.BASE64`` for base64 encoding,
+ ``charset.SHORTEST`` for the shortest of quoted-printable or base64 encoding,
or ``None`` for no encoding. ``SHORTEST`` is only valid for
*header_enc*. The default is ``None`` for no encoding.
This module makes available standard ``errno`` system symbols. The value of each
symbol is the corresponding integer value. The names and descriptions are
-borrowed from :file:`linux/include/errno.h`, which should be pretty
+borrowed from :file:`linux/include/errno.h`, which should be
all-inclusive.
.. data:: EPERM
- Operation not permitted
+ Operation not permitted. This error is mapped to the exception
+ :exc:`PermissionError`.
.. data:: ENOENT
- No such file or directory
+ No such file or directory. This error is mapped to the exception
+ :exc:`FileNotFoundError`.
.. data:: ESRCH
- No such process
+ No such process. This error is mapped to the exception
+ :exc:`ProcessLookupError`.
.. data:: EINTR
- Interrupted system call.
-
- .. seealso::
- This error is mapped to the exception :exc:`InterruptedError`.
+ Interrupted system call. This error is mapped to the exception
+ :exc:`InterruptedError`.
.. data:: EIO
.. data:: ECHILD
- No child processes
+ No child processes. This error is mapped to the exception
+ :exc:`ChildProcessError`.
.. data:: EAGAIN
- Try again
+ Try again. This error is mapped to the exception :exc:`BlockingIOError`.
.. data:: ENOMEM
.. data:: EACCES
- Permission denied
+ Permission denied. This error is mapped to the exception
+ :exc:`PermissionError`.
.. data:: EFAULT
.. data:: EEXIST
- File exists
+ File exists. This error is mapped to the exception
+ :exc:`FileExistsError`.
.. data:: EXDEV
.. data:: ENOTDIR
- Not a directory
+ Not a directory. This error is mapped to the exception
+ :exc:`NotADirectoryError`.
.. data:: EISDIR
- Is a directory
+ Is a directory. This error is mapped to the exception
+ :exc:`IsADirectoryError`.
.. data:: EINVAL
.. data:: EPIPE
- Broken pipe
+ Broken pipe. This error is mapped to the exception
+ :exc:`BrokenPipeError`.
.. data:: EDOM
.. data:: EWOULDBLOCK
- Operation would block
+ Operation would block. This error is mapped to the exception
+ :exc:`BlockingIOError`.
.. data:: ENOMSG
.. data:: ECONNABORTED
- Software caused connection abort
+ Software caused connection abort. This error is mapped to the
+ exception :exc:`ConnectionAbortedError`.
.. data:: ECONNRESET
- Connection reset by peer
+ Connection reset by peer. This error is mapped to the exception
+ :exc:`ConnectionResetError`.
.. data:: ENOBUFS
.. data:: ESHUTDOWN
- Cannot send after transport endpoint shutdown
+ Cannot send after transport endpoint shutdown. This error is mapped
+ to the exception :exc:`BrokenPipeError`.
.. data:: ETOOMANYREFS
.. data:: ETIMEDOUT
- Connection timed out
+ Connection timed out. This error is mapped to the exception
+ :exc:`TimeoutError`.
.. data:: ECONNREFUSED
- Connection refused
+ Connection refused. This error is mapped to the exception
+ :exc:`ConnectionRefusedError`.
.. data:: EHOSTDOWN
.. data:: EALREADY
- Operation already in progress
+ Operation already in progress. This error is mapped to the
+ exception :exc:`BlockingIOError`.
.. data:: EINPROGRESS
- Operation now in progress
+ Operation now in progress. This error is mapped to the exception
+ :exc:`BlockingIOError`.
.. data:: ESTALE
accidentally caught by code that catches :exc:`Exception` and thus prevent
the interpreter from exiting.
+ .. note::
+
+ Catching a :exc:`KeyboardInterrupt` requires special consideration.
+ Because it can be raised at unpredictable points, it may, in some
+ circumstances, leave the running program in an inconsistent state. It is
+ generally best to allow :exc:`KeyboardInterrupt` to end the program as
+ quickly as possible or avoid raising it entirely. (See
+ :ref:`handlers-and-exceptions`.)
+
.. exception:: MemoryError
Raised when an operation would block on an object (e.g. socket) set
for non-blocking operation.
- Corresponds to :c:data:`errno` ``EAGAIN``, ``EALREADY``,
- ``EWOULDBLOCK`` and ``EINPROGRESS``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.EAGAIN`, :py:data:`~errno.EALREADY`,
+ :py:data:`~errno.EWOULDBLOCK` and :py:data:`~errno.EINPROGRESS`.
In addition to those of :exc:`OSError`, :exc:`BlockingIOError` can have
one more attribute:
.. exception:: ChildProcessError
Raised when an operation on a child process failed.
- Corresponds to :c:data:`errno` ``ECHILD``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.ECHILD`.
.. exception:: ConnectionError
A subclass of :exc:`ConnectionError`, raised when trying to write on a
pipe while the other end has been closed, or trying to write on a socket
which has been shutdown for writing.
- Corresponds to :c:data:`errno` ``EPIPE`` and ``ESHUTDOWN``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.EPIPE` and :py:data:`~errno.ESHUTDOWN`.
.. exception:: ConnectionAbortedError
A subclass of :exc:`ConnectionError`, raised when a connection attempt
is aborted by the peer.
- Corresponds to :c:data:`errno` ``ECONNABORTED``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.ECONNABORTED`.
.. exception:: ConnectionRefusedError
A subclass of :exc:`ConnectionError`, raised when a connection attempt
is refused by the peer.
- Corresponds to :c:data:`errno` ``ECONNREFUSED``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.ECONNREFUSED`.
.. exception:: ConnectionResetError
A subclass of :exc:`ConnectionError`, raised when a connection is
reset by the peer.
- Corresponds to :c:data:`errno` ``ECONNRESET``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.ECONNRESET`.
.. exception:: FileExistsError
Raised when trying to create a file or directory which already exists.
- Corresponds to :c:data:`errno` ``EEXIST``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.EEXIST`.
.. exception:: FileNotFoundError
Raised when a file or directory is requested but doesn't exist.
- Corresponds to :c:data:`errno` ``ENOENT``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.ENOENT`.
.. exception:: InterruptedError
Raised when a file operation (such as :func:`os.remove`) is requested
on a directory.
- Corresponds to :c:data:`errno` ``EISDIR``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.EISDIR`.
.. exception:: NotADirectoryError
something which is not a directory. On most POSIX platforms, it may also be
raised if an operation attempts to open or traverse a non-directory file as if
it were a directory.
- Corresponds to :c:data:`errno` ``ENOTDIR``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.ENOTDIR`.
.. exception:: PermissionError
Raised when trying to run an operation without the adequate access
rights - for example filesystem permissions.
- Corresponds to :c:data:`errno` ``EACCES`` and ``EPERM``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.EACCES` and :py:data:`~errno.EPERM`.
.. exception:: ProcessLookupError
Raised when a given process doesn't exist.
- Corresponds to :c:data:`errno` ``ESRCH``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.ESRCH`.
.. exception:: TimeoutError
Raised when a system function timed out at the system level.
- Corresponds to :c:data:`errno` ``ETIMEDOUT``.
+ Corresponds to :c:data:`errno` :py:data:`~errno.ETIMEDOUT`.
.. versionadded:: 3.3
All the above :exc:`OSError` subclasses were added.
On macOS, the fcntl module exposes the ``F_GETPATH`` constant, which obtains
the path of a file from a file descriptor.
On Linux(>=3.15), the fcntl module exposes the ``F_OFD_GETLK``, ``F_OFD_SETLK``
- and ``F_OFD_SETLKW`` constants, which working with open file description locks.
+ and ``F_OFD_SETLKW`` constants, which are used when working with open file
+ description locks.
.. versionchanged:: 3.10
On Linux >= 2.6.11, the fcntl module exposes the ``F_GETPIPE_SZ`` and
Added the optional *errors* parameter.
.. deprecated:: 3.10
- This function is deprecated since :func:`input` and :class:`FileInput`
+ This function is deprecated since :func:`fileinput.input` and :class:`FileInput`
now have *encoding* and *errors* parameters.
.. versionadded:: 3.8
- .. method:: from_float(flt)
+ .. classmethod:: from_float(flt)
- This class method constructs a :class:`Fraction` representing the exact
- value of *flt*, which must be a :class:`float`. Beware that
+ Alternative constructor which only accepts instances of
+ :class:`float` or :class:`numbers.Integral`. Beware that
``Fraction.from_float(0.3)`` is not the same value as ``Fraction(3, 10)``.
.. note::
:class:`Fraction` instance directly from a :class:`float`.
- .. method:: from_decimal(dec)
+ .. classmethod:: from_decimal(dec)
- This class method constructs a :class:`Fraction` representing the exact
- value of *dec*, which must be a :class:`decimal.Decimal` instance.
+ Alternative constructor which only accepts instances of
+ :class:`decimal.Decimal` or :class:`numbers.Integral`.
.. note::
.. audit-event:: builtins.input/result result input
- Raises an auditing event ``builtins.input/result`` with the result after
- successfully reading input.
+ Raises an :ref:`auditing event <auditing>` ``builtins.input/result``
+ with the result after successfully reading input.
.. class:: int([x])
Return ``True`` if *class* is a subclass (direct, indirect, or :term:`virtual
<abstract base class>`) of *classinfo*. A
class is considered a subclass of itself. *classinfo* may be a tuple of class
- objects or a :ref:`types-union`, in which case return ``True`` if *class* is a
+ objects (or recursively, other such tuples)
+ or a :ref:`types-union`, in which case return ``True`` if *class* is a
subclass of any entry in *classinfo*. In any other case, a :exc:`TypeError`
exception is raised.
*buffering* is an optional integer used to set the buffering policy. Pass 0
to switch buffering off (only allowed in binary mode), 1 to select line
buffering (only usable in text mode), and an integer > 1 to indicate the size
- in bytes of a fixed-size chunk buffer. When no *buffering* argument is
+ in bytes of a fixed-size chunk buffer. Note that specifying a buffer size this
+ way applies for binary buffered I/O, but ``TextIOWrapper`` (i.e., files opened
+ with ``mode='r+'``) would have another buffering. To disable buffering in
+ ``TextIOWrapper``, consider using the ``write_through`` flag for
+ :func:`io.TextIOWrapper.reconfigure`. When no *buffering* argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer is
arbitrary order. No tilde expansion is done, but ``*``, ``?``, and character
ranges expressed with ``[]`` will be correctly matched. This is done by using
the :func:`os.scandir` and :func:`fnmatch.fnmatch` functions in concert, and
-not by actually invoking a subshell. Note that unlike :func:`fnmatch.fnmatch`,
-:mod:`glob` treats filenames beginning with a dot (``.``) as special cases.
+not by actually invoking a subshell.
+
+Note that files beginning with a dot (``.``) can only be matched by
+patterns that also start with a dot,
+unlike :func:`fnmatch.fnmatch` or :func:`pathlib.Path.glob`.
(For tilde and shell variable expansion, use :func:`os.path.expanduser` and
:func:`os.path.expandvars`.)
Group database entries are reported as a tuple-like object, whose attributes
correspond to the members of the ``group`` structure (Attribute field below, see
-``<pwd.h>``):
+``<grp.h>``):
+-------+-----------+---------------------------------+
| Index | Attribute | Meaning |
**Source code:** :source:`Lib/imghdr.py`
.. deprecated:: 3.11
- The :mod:`imghdr` module is deprecated (see :pep:`594` for details).
+ The :mod:`imghdr` module is deprecated
+ (see :pep:`PEP 594 <594#imghdr>` for details and alternatives).
--------------
object
+-- Finder (deprecated)
- | +-- MetaPathFinder
- | +-- PathEntryFinder
+ +-- MetaPathFinder
+ +-- PathEntryFinder
+-- Loader
+-- ResourceLoader --------+
+-- InspectLoader |
.. class:: MetaPathFinder
- An abstract base class representing a :term:`meta path finder`. For
- compatibility, this is a subclass of :class:`Finder`.
+ An abstract base class representing a :term:`meta path finder`.
.. versionadded:: 3.3
If you need to find out if a module can be imported without actually doing the
import, then you should use :func:`importlib.util.find_spec`.
+
+Note that if ``name`` is a submodule (contains a dot),
+:func:`importlib.util.find_spec` will import the parent module.
::
import importlib.util
windows.rst
unix.rst
superseded.rst
- undoc.rst
security_warnings.rst
If the documentation string for an object is not provided and the object is
a class, a method, a property or a descriptor, retrieve the documentation
string from the inheritance hierarchy.
+ Return ``None`` if the documentation string is invalid or missing.
.. versionchanged:: 3.5
Documentation strings are now inherited if not overridden.
.. function:: getmodule(object)
- Try to guess which module an object was defined in.
+ Try to guess which module an object was defined in. Return ``None``
+ if the module cannot be determined.
.. function:: getsourcefile(object)
- Return the name of the Python source file in which an object was defined. This
+ Return the name of the Python source file in which an object was defined
+ or ``None`` if no way can be identified to get the source. This
will fail with a :exc:`TypeError` if the object is a built-in module, class, or
function.
.. versionadded:: 3.8
-.. function:: text_encoding(encoding, stacklevel=2)
+.. function:: text_encoding(encoding, stacklevel=2, /)
This is a helper function for callables that use :func:`open` or
:class:`TextIOWrapper` and have an ``encoding=None`` parameter.
Return ``True`` if the stream can be read from. If ``False``, :meth:`read`
will raise :exc:`OSError`.
- .. method:: readline(size=-1)
+ .. method:: readline(size=-1, /)
Read and return one line from the stream. If *size* is specified, at
most *size* bytes will be read.
the *newline* argument to :func:`open` can be used to select the line
terminator(s) recognized.
- .. method:: readlines(hint=-1)
+ .. method:: readlines(hint=-1, /)
Read and return a list of lines from the stream. *hint* can be specified
to control the number of lines read: no more lines will be read if the
Note that it's already possible to iterate on file objects using ``for
line in file: ...`` without calling ``file.readlines()``.
- .. method:: seek(offset, whence=SEEK_SET)
+ .. method:: seek(offset, whence=SEEK_SET, /)
Change the stream position to the given byte *offset*. *offset* is
interpreted relative to the position indicated by *whence*. The default
Return the current stream position.
- .. method:: truncate(size=None)
+ .. method:: truncate(size=None, /)
Resize the stream to the given *size* in bytes (or the current position
if *size* is not specified). The current stream position isn't changed.
Return ``True`` if the stream supports writing. If ``False``,
:meth:`write` and :meth:`truncate` will raise :exc:`OSError`.
- .. method:: writelines(lines)
+ .. method:: writelines(lines, /)
Write a list of lines to the stream. Line separators are not added, so it
is usual for each of the lines provided to have a line separator at the
:class:`RawIOBase` provides these methods in addition to those from
:class:`IOBase`:
- .. method:: read(size=-1)
+ .. method:: read(size=-1, /)
Read up to *size* bytes from the object and return them. As a convenience,
if *size* is unspecified or -1, all bytes until EOF are returned.
Read and return all the bytes from the stream until EOF, using multiple
calls to the stream if necessary.
- .. method:: readinto(b)
+ .. method:: readinto(b, /)
Read bytes into a pre-allocated, writable
:term:`bytes-like object` *b*, and return the
If the object is in non-blocking mode and no bytes
are available, ``None`` is returned.
- .. method:: write(b)
+ .. method:: write(b, /)
Write the given :term:`bytes-like object`, *b*, to the
underlying raw stream, and return the number of
.. versionadded:: 3.1
- .. method:: read(size=-1)
+ .. method:: read(size=-1, /)
Read and return up to *size* bytes. If the argument is omitted, ``None``,
or negative, data is read and returned until EOF is reached. An empty
A :exc:`BlockingIOError` is raised if the underlying raw stream is in
non blocking-mode, and has no data available at the moment.
- .. method:: read1([size])
+ .. method:: read1(size=-1, /)
Read and return up to *size* bytes, with at most one call to the
underlying raw stream's :meth:`~RawIOBase.read` (or
If *size* is ``-1`` (the default), an arbitrary number of bytes are
returned (more than zero unless EOF is reached).
- .. method:: readinto(b)
+ .. method:: readinto(b, /)
Read bytes into a pre-allocated, writable
:term:`bytes-like object` *b* and return the number of bytes read.
A :exc:`BlockingIOError` is raised if the underlying raw stream is in non
blocking-mode, and has no data available at the moment.
- .. method:: readinto1(b)
+ .. method:: readinto1(b, /)
Read bytes into a pre-allocated, writable
:term:`bytes-like object` *b*, using at most one call to
.. versionadded:: 3.5
- .. method:: write(b)
+ .. method:: write(b, /)
Write the given :term:`bytes-like object`, *b*, and return the number
of bytes written (always equal to the length of *b* in bytes, since if
Buffered I/O streams provide a higher-level interface to an I/O device
than raw I/O does.
-.. class:: BytesIO([initial_bytes])
+.. class:: BytesIO(initial_bytes=b'')
A binary stream using an in-memory bytes buffer. It inherits
:class:`BufferedIOBase`. The buffer is discarded when the
Return :class:`bytes` containing the entire contents of the buffer.
- .. method:: read1([size])
+ .. method:: read1(size=-1, /)
In :class:`BytesIO`, this is the same as :meth:`~BufferedIOBase.read`.
.. versionchanged:: 3.7
The *size* argument is now optional.
- .. method:: readinto1(b)
+ .. method:: readinto1(b, /)
In :class:`BytesIO`, this is the same as :meth:`~BufferedIOBase.readinto`.
:class:`BufferedReader` provides or overrides these methods in addition to
those from :class:`BufferedIOBase` and :class:`IOBase`:
- .. method:: peek([size])
+ .. method:: peek(size=0, /)
Return bytes from the stream without advancing the position. At most one
single read on the raw stream is done to satisfy the call. The number of
bytes returned may be less or more than requested.
- .. method:: read([size])
+ .. method:: read(size=-1, /)
Read and return *size* bytes, or if *size* is not given or negative, until
EOF or if the read call would block in non-blocking mode.
- .. method:: read1([size])
+ .. method:: read1(size=-1, /)
Read and return up to *size* bytes with only one call on the raw stream.
If at least one byte is buffered, only buffered bytes are returned.
Force bytes held in the buffer into the raw stream. A
:exc:`BlockingIOError` should be raised if the raw stream blocks.
- .. method:: write(b)
+ .. method:: write(b, /)
Write the :term:`bytes-like object`, *b*, and return the
number of bytes written. When in non-blocking mode, a
are guaranteed to be implemented.
-.. class:: BufferedRWPair(reader, writer, buffer_size=DEFAULT_BUFFER_SIZE)
+.. class:: BufferedRWPair(reader, writer, buffer_size=DEFAULT_BUFFER_SIZE, /)
A buffered binary stream providing higher-level access to two non seekable
:class:`RawIOBase` raw binary streams---one readable, the other writeable.
.. versionadded:: 3.1
- .. method:: read(size=-1)
+ .. method:: read(size=-1, /)
Read and return at most *size* characters from the stream as a single
:class:`str`. If *size* is negative or ``None``, reads until EOF.
- .. method:: readline(size=-1)
+ .. method:: readline(size=-1, /)
Read until newline or EOF and return a single ``str``. If the stream is
already at EOF, an empty string is returned.
If *size* is specified, at most *size* characters will be read.
- .. method:: seek(offset, whence=SEEK_SET)
+ .. method:: seek(offset, whence=SEEK_SET, /)
Change the stream position to the given *offset*. Behaviour depends on
the *whence* parameter. The default value for *whence* is
does not usually represent a number of bytes in the underlying
binary storage.
- .. method:: write(s)
+ .. method:: write(s, /)
Write the string *s* to the stream and return the number of characters
written.
This module's encoders and decoders preserve input and output order by
default. Order is only lost if the underlying containers are unordered.
- Prior to Python 3.7, :class:`dict` was not guaranteed to be ordered, so
- inputs and outputs were typically scrambled unless
- :class:`collections.OrderedDict` was specifically requested. Starting
- with Python 3.7, the regular :class:`dict` became order preserving, so
- it is no longer necessary to specify :class:`collections.OrderedDict` for
- JSON generation and parsing.
-
Basic Usage
-----------
.. versionadded:: 3.10
-.. function:: atof(string)
+.. function:: atof(string, func=float)
- Converts a string to a floating point number, following the :const:`LC_NUMERIC`
- settings.
+ Converts a string to a number, following the :const:`LC_NUMERIC` settings,
+ by calling *func* on the result of calling :func:`delocalize` on *string*.
.. function:: atoi(string)
above example). In such circumstances, it is likely that specialized
:class:`Formatter`\ s would be used with particular :class:`Handler`\ s.
+ If no handler is attached to this logger (or any of its ancestors,
+ taking into account the relevant :attr:`Logger.propagate` attributes),
+ the message will be sent to the handler set on :attr:`lastResort`.
+
.. versionchanged:: 3.2
The *stack_info* parameter was added.
above example). In such circumstances, it is likely that specialized
:class:`Formatter`\ s would be used with particular :class:`Handler`\ s.
+ This function (as well as :func:`info`, :func:`warning`, :func:`error` and
+ :func:`critical`) will call :func:`basicConfig` if the root logger doesn't
+ have any handler attached.
+
.. versionchanged:: 3.2
The *stack_info* parameter was added.
Logs a message with level *level* on the root logger. The other arguments are
interpreted as for :func:`debug`.
- .. note:: The above module-level convenience functions, which delegate to the
- root logger, call :func:`basicConfig` to ensure that at least one handler
- is available. Because of this, they should *not* be used in threads,
- in versions of Python earlier than 2.7.1 and 3.2, unless at least one
- handler has been added to the root logger *before* the threads are
- started. In earlier versions of Python, due to a thread safety shortcoming
- in :func:`basicConfig`, this can (under rare circumstances) lead to
- handlers being added multiple times to the root logger, which can in turn
- lead to multiple messages for the same event.
-
.. function:: disable(level=CRITICAL)
Provides an overriding level *level* for all loggers which takes precedence over
.. module:: mailcap
:synopsis: Mailcap file handling.
+ :deprecated:
**Source code:** :source:`Lib/mailcap.py`
+.. deprecated:: 3.11
+ The :mod:`mailcap` module is deprecated
+ (see :pep:`PEP 594 <594#mailcap>` for details).
+ The :mod:`mimetypes` module provides an alternative.
+
--------------
Mailcap files are used to configure how MIME-aware applications such as mail
.. function:: ceil(x)
Return the ceiling of *x*, the smallest integer greater than or equal to *x*.
- If *x* is not a float, delegates to ``x.__ceil__()``, which should return an
- :class:`~numbers.Integral` value.
+ If *x* is not a float, delegates to :meth:`x.__ceil__ <object.__ceil__>`,
+ which should return an :class:`~numbers.Integral` value.
.. function:: comb(n, k)
.. function:: floor(x)
- Return the floor of *x*, the largest integer less than or equal to *x*.
- If *x* is not a float, delegates to ``x.__floor__()``, which should return an
- :class:`~numbers.Integral` value.
+ Return the floor of *x*, the largest integer less than or equal to *x*. If
+ *x* is not a float, delegates to :meth:`x.__floor__ <object.__floor__>`, which
+ should return an :class:`~numbers.Integral` value.
.. function:: fmod(x, y)
.. function:: trunc(x)
- Return the :class:`~numbers.Real` value *x* truncated to an
- :class:`~numbers.Integral` (usually an integer). Delegates to
- :meth:`x.__trunc__() <object.__trunc__>`.
+ Return *x* with the fractional part
+ removed, leaving the integer part. This rounds toward 0: ``trunc()`` is
+ equivalent to :func:`floor` for positive *x*, and equivalent to :func:`ceil`
+ for negative *x*. If *x* is not a float, delegates to :meth:`x.__trunc__
+ <object.__trunc__>`, which should return an :class:`~numbers.Integral` value.
.. function:: ulp(x)
.. data:: nan
- A floating-point "not a number" (NaN) value. Equivalent to the output of
- ``float('nan')``.
+ A floating-point "not a number" (NaN) value. Equivalent to the output of
+ ``float('nan')``. Due to the requirements of the `IEEE-754 standard
+ <https://en.wikipedia.org/wiki/IEEE_754>`_, ``math.nan`` and ``float('nan')`` are
+ not considered to equal to any other numeric value, including themselves. To check
+ whether a number is a NaN, use the :func:`isnan` function to test
+ for NaNs instead of ``is`` or ``==``.
+ Example::
+
+ >>> import math
+ >>> math.nan == math.nan
+ False
+ >>> float('nan') == float('nan')
+ False
+ >>> math.isnan(math.nan)
+ True
+ >>> math.isnan(float('nan'))
+ True
.. versionadded:: 3.5
.. index:: single: msi
.. deprecated:: 3.11
- The :mod:`msilib` module is deprecated (see :pep:`594` for details).
+ The :mod:`msilib` module is deprecated
+ (see :pep:`PEP 594 <594#msilib>` for details).
--------------
proxies.
.. function:: multiprocessing.Manager()
+ :module:
Returns a started :class:`~multiprocessing.managers.SyncManager` object which
can be used for sharing objects between processes. The returned manager
email.rst
json.rst
- mailcap.rst
mailbox.rst
mimetypes.rst
base64.rst
.. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
.. deprecated:: 3.11
- The :mod:`nis` module is deprecated (see :pep:`594` for details).
+ The :mod:`nis` module is deprecated
+ (see :pep:`PEP 594 <594#nis>` for details).
--------------
These option syntaxes are not supported by :mod:`optparse`, and they never
will be. This is deliberate: the first three are non-standard on any
environment, and the last only makes sense if you're exclusively targeting
- VMS, MS-DOS, and/or Windows.
+ Windows or certain legacy platforms (e.g. VMS, MS-DOS).
option argument
an argument that follows an option, is closely associated with that option,
:synopsis: Operations on pathnames.
**Source code:** :source:`Lib/posixpath.py` (for POSIX) and
-:source:`Lib/ntpath.py` (for Windows NT).
+:source:`Lib/ntpath.py` (for Windows).
.. index:: single: path; operations
--------------
-This module implements some useful functions on pathnames. To read or
-write files see :func:`open`, and for accessing the filesystem see the
-:mod:`os` module. The path parameters can be passed as either strings,
-or bytes. Applications are encouraged to represent file names as
-(Unicode) character strings. Unfortunately, some file names may not be
-representable as strings on Unix, so applications that need to support
-arbitrary file names on Unix should use bytes objects to represent
-path names. Vice versa, using bytes objects cannot represent all file
-names on Windows (in the standard ``mbcs`` encoding), hence Windows
-applications should use string objects to access all files.
+This module implements some useful functions on pathnames. To read or write
+files see :func:`open`, and for accessing the filesystem see the :mod:`os`
+module. The path parameters can be passed as strings, or bytes, or any object
+implementing the :class:`os.PathLike` protocol.
Unlike a unix shell, Python does not do any *automatic* path expansions.
Functions such as :func:`expanduser` and :func:`expandvars` can be invoked
their parameters. The result is an object of the same type, if a path or
file name is returned.
-
.. note::
Since different operating systems have different path name conventions, there
:ref:`error handler <error-handlers>` being enabled for :data:`sys.stdin`
and :data:`sys.stdout` (:data:`sys.stderr` continues to use
``backslashreplace`` as it does in the default locale-aware mode)
-* On Unix, :func:`os.device_encoding` returns ``'UTF-8'``. rather than the
+* On Unix, :func:`os.device_encoding` returns ``'UTF-8'`` rather than the
device encoding.
Note that the standard stream settings in UTF-8 mode can be overridden by
This mapping is captured the first time the :mod:`os` module is imported,
typically during Python startup as part of processing :file:`site.py`. Changes
- to the environment made after this time are not reflected in ``os.environ``,
- except for changes made by modifying ``os.environ`` directly.
+ to the environment made after this time are not reflected in :data:`os.environ`,
+ except for changes made by modifying :data:`os.environ` directly.
This mapping may be used to modify the environment as well as query the
environment. :func:`putenv` will be called automatically when the mapping
.. note::
- Calling :func:`putenv` directly does not change ``os.environ``, so it's better
- to modify ``os.environ``.
+ Calling :func:`putenv` directly does not change :data:`os.environ`, so it's better
+ to modify :data:`os.environ`.
.. note::
You can delete items in this mapping to unset environment variables.
:func:`unsetenv` will be called automatically when an item is deleted from
- ``os.environ``, and when one of the :meth:`pop` or :meth:`clear` methods is
+ :data:`os.environ`, and when one of the :meth:`pop` or :meth:`clear` methods is
called.
.. versionchanged:: 3.9
.. function:: getenv(key, default=None)
Return the value of the environment variable *key* if it exists, or
- *default* if it doesn't. *key*, *default* and the result are str.
+ *default* if it doesn't. *key*, *default* and the result are str. Note that
+ since :func:`getenv` uses :data:`os.environ`, the mapping of :func:`getenv` is
+ similarly also captured on import, and the function may not reflect
+ future environment changes.
On Unix, keys and values are decoded with :func:`sys.getfilesystemencoding`
and ``'surrogateescape'`` error handler. Use :func:`os.getenvb` if you
.. function:: getenvb(key, default=None)
Return the value of the environment variable *key* if it exists, or
- *default* if it doesn't. *key*, *default* and the result are bytes.
+ *default* if it doesn't. *key*, *default* and the result are bytes. Note that
+ since :func:`getenvb` uses :data:`os.environb`, the mapping of :func:`getenvb` is
+ similarly also captured on import, and the function may not reflect
+ future environment changes.
+
:func:`getenvb` is only available if :data:`supports_bytes_environ`
is ``True``.
changes to the environment affect subprocesses started with :func:`os.system`,
:func:`popen` or :func:`fork` and :func:`execv`.
- Assignments to items in ``os.environ`` are automatically translated into
+ Assignments to items in :data:`os.environ` are automatically translated into
corresponding calls to :func:`putenv`; however, calls to :func:`putenv`
- don't update ``os.environ``, so it is actually preferable to assign to items
- of ``os.environ``.
+ don't update :data:`os.environ`, so it is actually preferable to assign to items
+ of :data:`os.environ`. This also applies to :func:`getenv` and :func:`getenvb`, which
+ respectively use :data:`os.environ` and :data:`os.environb` in their implementations.
.. note::
environment affect subprocesses started with :func:`os.system`, :func:`popen` or
:func:`fork` and :func:`execv`.
- Deletion of items in ``os.environ`` is automatically translated into a
+ Deletion of items in :data:`os.environ` is automatically translated into a
corresponding call to :func:`unsetenv`; however, calls to :func:`unsetenv`
- don't update ``os.environ``, so it is actually preferable to delete items of
- ``os.environ``.
+ don't update :data:`os.environ`, so it is actually preferable to delete items of
+ :data:`os.environ`.
.. audit-event:: os.unsetenv key os.unsetenv
.. function:: replace(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
- Rename the file or directory *src* to *dst*. If *dst* is a directory,
+ Rename the file or directory *src* to *dst*. If *dst* is a non-empty directory,
:exc:`OSError` will be raised. If *dst* exists and is a file, it will
be replaced silently if the user has permission. The operation may fail
if *src* and *dst* are on different filesystems. If successful,
Add a path to the DLL search path.
This search path is used when resolving dependencies for imported
- extension modules (the module itself is resolved through sys.path),
- and also by :mod:`ctypes`.
+ extension modules (the module itself is resolved through
+ :data:`sys.path`), and also by :mod:`ctypes`.
Remove the directory by calling **close()** on the returned object
or using it in a :keyword:`with` statement.
:deprecated:
.. deprecated:: 3.11
- The :mod:`ossaudiodev` module is deprecated (see :pep:`594` for details).
+ The :mod:`ossaudiodev` module is deprecated
+ (see :pep:`PEP 594 <594#ossaudiodev>` for details).
--------------
Rename this file or directory to the given *target*, and return a new Path
instance pointing to *target*. On Unix, if *target* exists and is a file,
- it will be replaced silently if the user has permission. *target* can be
- either a string or another path object::
+ it will be replaced silently if the user has permission.
+ On Windows, if *target* exists, :exc:`FileExistsError` will be raised.
+ *target* can be either a string or another path object::
>>> p = Path('foo')
>>> p.open('w').write('some text')
Rename this file or directory to the given *target*, and return a new Path
instance pointing to *target*. If *target* points to an existing file or
- directory, it will be unconditionally replaced.
+ empty directory, it will be unconditionally replaced.
The target path may be absolute or relative. Relative paths are interpreted
relative to the current working directory, *not* the directory of the Path
single ``;`` is not used as it is the separator for multiple commands in a line
that is passed to the Python parser.) No intelligence is applied to separating
the commands; the input is split at the first ``;;`` pair, even if it is in the
-middle of a quoted string.
+middle of a quoted string. A workaround for strings with double semicolons
+is to use implicit string concatenation ``';'';'`` or ``";"";"``.
.. index::
pair: .pdbrc; file
earlier versions of Python.
* Protocol version 2 was introduced in Python 2.3. It provides much more
- efficient pickling of :term:`new-style class`\es. Refer to :pep:`307` for
+ efficient pickling of :term:`new-style classes <new-style class>`. Refer to :pep:`307` for
information about improvements brought by protocol 2.
* Protocol version 3 was added in Python 3.0. It has explicit support for
protocol argument is needed. Bytes past the pickled representation
of the object are ignored.
- Arguments *file*, *fix_imports*, *encoding*, *errors*, *strict* and *buffers*
+ Arguments *fix_imports*, *encoding*, *errors*, *strict* and *buffers*
have the same meaning as in the :class:`Unpickler` constructor.
.. versionchanged:: 3.8
.. versionadded:: 3.3
- .. method:: reducer_override(self, obj)
+ .. method:: reducer_override(obj)
Special reducer that can be defined in :class:`Pickler` subclasses. This
method has priority over any reducer in the :attr:`dispatch_table`. It
The following types can be pickled:
-* ``None``, ``True``, and ``False``
-
-* integers, floating point numbers, complex numbers
+* ``None``, ``True``, and ``False``;
-* strings, bytes, bytearrays
+* integers, floating-point numbers, complex numbers;
-* tuples, lists, sets, and dictionaries containing only picklable objects
+* strings, bytes, bytearrays;
-* functions defined at the top level of a module (using :keyword:`def`, not
- :keyword:`lambda`)
+* tuples, lists, sets, and dictionaries containing only picklable objects;
-* built-in functions defined at the top level of a module
+* functions (built-in and user-defined) accessible from the top level of a
+ module (using :keyword:`def`, not :keyword:`lambda`);
-* classes that are defined at the top level of a module
+* classes accessible from the top level of a module;
* instances of such classes whose :attr:`~object.__dict__` or the result of
calling :meth:`__getstate__` is picklable (see section :ref:`pickle-inst` for
raised in this case. You can carefully raise this limit with
:func:`sys.setrecursionlimit`.
-Note that functions (built-in and user-defined) are pickled by "fully qualified"
-name reference, not by value. [#]_ This means that only the function name is
-pickled, along with the name of the module the function is defined in. Neither
+Note that functions (built-in and user-defined) are pickled by fully
+:term:`qualified name`, not by value. [#]_ This means that only the function name is
+pickled, along with the name of the containing module and classes. Neither
the function's code, nor any of its function attributes are pickled. Thus the
defining module must be importable in the unpickling environment, and the module
must contain the named object, otherwise an exception will be raised. [#]_
-Similarly, classes are pickled by named reference, so the same restrictions in
+Similarly, classes are pickled by fully qualified name, so the same restrictions in
the unpickling environment apply. Note that none of the class's code or data is
pickled, so in the following example the class attribute ``attr`` is not
restored in the unpickling environment::
picklestring = pickle.dumps(Foo)
-These restrictions are why picklable functions and classes must be defined in
+These restrictions are why picklable functions and classes must be defined at
the top level of a module.
Similarly, when class instances are pickled, their class's code and data are not
def save(obj):
return (obj.__class__, obj.__dict__)
- def load(cls, attributes):
+ def restore(cls, attributes):
obj = cls.__new__(cls)
obj.__dict__.update(attributes)
return obj
f = io.BytesIO()
p = MyPickler(f)
-does the same, but all instances of ``MyPickler`` will by default
-share the same dispatch table. The equivalent code using the
-:mod:`copyreg` module is ::
+does the same but all instances of ``MyPickler`` will by default
+share the private dispatch table. On the other hand, the code ::
copyreg.pickle(SomeClass, reduce_SomeClass)
f = io.BytesIO()
p = pickle.Pickler(f)
+modifies the global dispatch table shared by all users of the :mod:`copyreg` module.
+
.. _pickle-state:
Handling Stateful Objects
"""Helper function analogous to pickle.loads()."""
return RestrictedUnpickler(io.BytesIO(s)).load()
-A sample usage of our unpickler working has intended::
+A sample usage of our unpickler working as intended::
>>> restricted_loads(pickle.dumps([1, 2, range(15)]))
[1, 2, range(0, 15)]
# An arbitrary collection of objects supported by pickle.
data = {
- 'a': [1, 2.0, 3, 4+6j],
+ 'a': [1, 2.0, 3+4j],
'b': ("character string", b"byte string"),
'c': {None, True, False}
}
operations.
.. [#] The limitation on alphanumeric characters is due to the fact
- the persistent IDs, in protocol 0, are delimited by the newline
+ that persistent IDs in protocol 0 are delimited by the newline
character. Therefore if any kind of newline characters occurs in
- persistent IDs, the resulting pickle will become unreadable.
+ persistent IDs, the resulting pickled data will become unreadable.
**Source code:** :source:`Lib/pipes.py`
.. deprecated:: 3.11
- The :mod:`pipes` module is deprecated (see :pep:`594` for details).
+ The :mod:`pipes` module is deprecated
+ (see :pep:`PEP 594 <594#pipes>` for details).
+ Please use the :mod:`subprocess` module instead.
--------------
__path__ = extend_path(__path__, __name__)
This will add to the package's ``__path__`` all subdirectories of directories
- on ``sys.path`` named after the package. This is useful if one wants to
+ on :data:`sys.path` named after the package. This is useful if one wants to
distribute different parts of a single logical package as multiple
directories.
Yield :term:`finder` objects for the given module name.
- If fullname contains a '.', the finders will be for the package
+ If fullname contains a ``'.'``, the finders will be for the package
containing fullname, otherwise they will be all registered top level
- finders (i.e. those on both sys.meta_path and sys.path_hooks).
+ finders (i.e. those on both :data:`sys.meta_path` and :data:`sys.path_hooks`).
If the named module is in a package, that package is imported as a side
effect of invoking this function.
.. function:: iter_modules(path=None, prefix='')
Yields :class:`ModuleInfo` for all submodules on *path*, or, if
- *path* is ``None``, all top-level modules on ``sys.path``.
+ *path* is ``None``, all top-level modules on :data:`sys.path`.
*path* should be either ``None`` or a list of paths to look for modules in.
.. function:: release()
- Returns the system's release, e.g. ``'2.2.0'`` or ``'NT'`` An empty string is
+ Returns the system's release, e.g. ``'2.2.0'`` or ``'NT'``. An empty string is
returned if the value cannot be determined.
Entries which cannot be determined are set to ``''``.
.. versionchanged:: 3.3
- Result changed from a tuple to a namedtuple.
+ Result changed from a tuple to a :func:`~collections.namedtuple`.
Java Platform
Get additional version information from the Windows Registry and return a tuple
``(release, version, csd, ptype)`` referring to OS release, version number,
- CSD level (service pack) and OS type (multi/single processor).
+ CSD level (service pack) and OS type (multi/single processor). Values which
+ cannot be determined are set to the defaults given as parameters (which all
+ default to an empty string).
As a hint: *ptype* is ``'Uniprocessor Free'`` on single processor NT machines
and ``'Multiprocessor Free'`` on multi processor machines. The *'Free'* refers
.. function:: win32_edition()
- Returns a string representing the current Windows edition. Possible
- values include but are not limited to ``'Enterprise'``, ``'IoTUAP'``,
- ``'ServerStandard'``, and ``'nanoserver'``.
+ Returns a string representing the current Windows edition, or ``None`` if the
+ value cannot be determined. Possible values include but are not limited to
+ ``'Enterprise'``, ``'IoTUAP'``, ``'ServerStandard'``, and ``'nanoserver'``.
.. versionadded:: 3.8
.. method:: Queue.put_nowait(item)
- Equivalent to ``put(item, False)``.
+ Equivalent to ``put(item, block=False)``.
.. method:: Queue.get(block=True, timeout=None)
.. method:: SimpleQueue.put_nowait(item)
- Equivalent to ``put(item)``, provided for compatibility with
+ Equivalent to ``put(item, block=False)``, provided for compatibility with
:meth:`Queue.put_nowait`.
regular expressions. Most non-trivial applications always use the compiled
form.
+
+Flags
+^^^^^
+
.. versionchanged:: 3.6
Flag constants are now instances of :class:`RegexFlag`, which is a subclass of
:class:`enum.IntFlag`.
-.. function:: compile(pattern, flags=0)
-
- Compile a regular expression pattern into a :ref:`regular expression object
- <re-objects>`, which can be used for matching using its
- :func:`~Pattern.match`, :func:`~Pattern.search` and other methods, described
- below.
-
- The expression's behaviour can be modified by specifying a *flags* value.
- Values can be any of the following variables, combined using bitwise OR (the
- ``|`` operator).
-
- The sequence ::
-
- prog = re.compile(pattern)
- result = prog.match(string)
-
- is equivalent to ::
-
- result = re.match(pattern, string)
-
- but using :func:`re.compile` and saving the resulting regular expression
- object for reuse is more efficient when the expression will be used several
- times in a single program.
-
- .. note::
-
- The compiled versions of the most recent patterns passed to
- :func:`re.compile` and the module-level matching functions are cached, so
- programs that use only a few regular expressions at a time needn't worry
- about compiling regular expressions.
.. data:: A
Corresponds to the inline flag ``(?x)``.
+Functions
+^^^^^^^^^
+
+.. function:: compile(pattern, flags=0)
+
+ Compile a regular expression pattern into a :ref:`regular expression object
+ <re-objects>`, which can be used for matching using its
+ :func:`~Pattern.match`, :func:`~Pattern.search` and other methods, described
+ below.
+
+ The expression's behaviour can be modified by specifying a *flags* value.
+ Values can be any of the following variables, combined using bitwise OR (the
+ ``|`` operator).
+
+ The sequence ::
+
+ prog = re.compile(pattern)
+ result = prog.match(string)
+
+ is equivalent to ::
+
+ result = re.match(pattern, string)
+
+ but using :func:`re.compile` and saving the resulting regular expression
+ object for reuse is more efficient when the expression will be used several
+ times in a single program.
+
+ .. note::
+
+ The compiled versions of the most recent patterns passed to
+ :func:`re.compile` and the module-level matching functions are cached, so
+ programs that use only a few regular expressions at a time needn't worry
+ about compiling regular expressions.
+
+
.. function:: search(pattern, string, flags=0)
Scan through *string* looking for the first location where the regular expression
Clear the regular expression cache.
+Exceptions
+^^^^^^^^^^
+
.. exception:: error(msg, pattern=None, pos=None)
Exception raised when a string passed to one of the functions here is not a
run this way, as well as ensuring the real module name is always
accessible as ``__spec__.name``.
-.. function:: run_path(file_path, init_globals=None, run_name=None)
+.. function:: run_path(path_name, init_globals=None, run_name=None)
.. index::
module: __main__
A number of alterations are also made to the :mod:`sys` module. Firstly,
``sys.path`` may be altered as described above. ``sys.argv[0]`` is updated
- with the value of ``file_path`` and ``sys.modules[__name__]`` is updated
+ with the value of ``path_name`` and ``sys.modules[__name__]`` is updated
with a temporary module object for the module being executed. All
modifications to items in :mod:`sys` are reverted before the function
returns.
.. testcode::
import secrets
- url = 'https://mydomain.com/reset=' + secrets.token_urlsafe()
+ url = 'https://example.com/reset=' + secrets.token_urlsafe()
Copies the file *src* to the file or directory *dst*. *src* and *dst*
should be :term:`path-like objects <path-like object>` or strings. If
*dst* specifies a directory, the file will be copied into *dst* using the
- base filename from *src*. Returns the path to the newly created file.
+ base filename from *src*. If *dst* specifies a file that already exists,
+ it will be replaced. Returns the path to the newly created file.
If *follow_symlinks* is false, and *src* is a symbolic link,
*dst* will be created as a symbolic link. If *follow_symlinks*
dirs_exist_ok=False)
Recursively copy an entire directory tree rooted at *src* to a directory
- named *dst* and return the destination directory. *dirs_exist_ok* dictates
- whether to raise an exception in case *dst* or any missing parent directory
- already exists.
+ named *dst* and return the destination directory. All intermediate
+ directories needed to contain *dst* will also be created by default.
Permissions and times of directories are copied with :func:`copystat`,
individual files are copied using :func:`~shutil.copy2`.
If *copy_function* is given, it must be a callable that will be used to copy
each file. It will be called with the source path and the destination path
- as arguments. By default, :func:`~shutil.copy2` is used, but any function
- that supports the same signature (like :func:`~shutil.copy`) can be used.
+ as arguments. By default, :func:`~shutil.copy2` is used, but any function
+ that supports the same signature (like :func:`~shutil.copy`) can be used.
+
+ If *dirs_exist_ok* is false (the default) and *dst* already exists, a
+ :exc:`FileExistsError` is raised. If *dirs_exist_ok* is true, the copying
+ operation will continue if it encounters existing directories, and files
+ within the *dst* tree will be overwritten by corresponding files from the
+ *src* tree.
.. audit-event:: shutil.copytree src,dst shutil.copytree
.. versionchanged:: 3.2
Added the *copy_function* argument to be able to provide a custom copy
function.
- Added the *ignore_dangling_symlinks* argument to silent dangling symlinks
+ Added the *ignore_dangling_symlinks* argument to silence dangling symlinks
errors when *symlinks* is false.
.. versionchanged:: 3.8
copytree example
~~~~~~~~~~~~~~~~
-This example is the implementation of the :func:`copytree` function, described
-above, with the docstring omitted. It demonstrates many of the other functions
-provided by this module. ::
-
- def copytree(src, dst, symlinks=False):
- names = os.listdir(src)
- os.makedirs(dst)
- errors = []
- for name in names:
- srcname = os.path.join(src, name)
- dstname = os.path.join(dst, name)
- try:
- if symlinks and os.path.islink(srcname):
- linkto = os.readlink(srcname)
- os.symlink(linkto, dstname)
- elif os.path.isdir(srcname):
- copytree(srcname, dstname, symlinks)
- else:
- copy2(srcname, dstname)
- # XXX What about devices, sockets etc.?
- except OSError as why:
- errors.append((srcname, dstname, str(why)))
- # catch the Error from the recursive copytree so that we can
- # continue with other files
- except Error as err:
- errors.extend(err.args[0])
- try:
- copystat(src, dst)
- except OSError as why:
- # can't copy file access times on Windows
- if why.winerror is None:
- errors.extend((src, dst, str(why)))
- if errors:
- raise Error(errors)
-
-Another example that uses the :func:`ignore_patterns` helper::
+An example that uses the :func:`ignore_patterns` helper::
from shutil import copytree, ignore_patterns
.. audit-event:: shutil.unpack_archive filename,extract_dir,format shutil.unpack_archive
+ .. warning::
+
+ Never extract archives from untrusted sources without prior inspection.
+ It is possible that files are created outside of the path specified in
+ the *extract_dir* argument, e.g. members that have absolute filenames
+ starting with "/" or filenames with two dots "..".
+
.. versionchanged:: 3.7
Accepts a :term:`path-like object` for *filename* and *extract_dir*.
-
.. function:: register_unpack_format(name, extensions, function[, extra_args[, description]])
Registers an unpack format. *name* is the name of the format and
arbitrary amount of time, regardless of any signals received. The Python
signal handlers will be called when the calculation finishes.
+* If the handler raises an exception, it will be raised "out of thin air" in
+ the main thread. See the :ref:`note below <handlers-and-exceptions>` for a
+ discussion.
.. _signals-and-threads:
if __name__ == '__main__':
main()
-Do not set :const:`SIGPIPE`'s disposition to :const:`SIG_DFL`
-in order to avoid :exc:`BrokenPipeError`. Doing that would cause
-your program to exit unexpectedly also whenever any socket connection
-is interrupted while your program is still writing to it.
+Do not set :const:`SIGPIPE`'s disposition to :const:`SIG_DFL` in
+order to avoid :exc:`BrokenPipeError`. Doing that would cause
+your program to exit unexpectedly whenever any socket
+connection is interrupted while your program is still writing to
+it.
+
+.. _handlers-and-exceptions:
+
+Note on Signal Handlers and Exceptions
+--------------------------------------
+
+If a signal handler raises an exception, the exception will be propagated to
+the main thread and may be raised after any :term:`bytecode` instruction. Most
+notably, a :exc:`KeyboardInterrupt` may appear at any point during execution.
+Most Python code, including the standard library, cannot be made robust against
+this, and so a :exc:`KeyboardInterrupt` (or any other exception resulting from
+a signal handler) may on rare occasions put the program in an unexpected state.
+
+To illustrate this issue, consider the following code::
+
+ class SpamContext:
+ def __init__(self):
+ self.lock = threading.Lock()
+
+ def __enter__(self):
+ # If KeyboardInterrupt occurs here, everything is fine
+ self.lock.acquire()
+ # If KeyboardInterrupt occcurs here, __exit__ will not be called
+ ...
+ # KeyboardInterrupt could occur just before the function returns
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ ...
+ self.lock.release()
+
+For many programs, especially those that merely want to exit on
+:exc:`KeyboardInterrupt`, this is not a problem, but applications that are
+complex or require high reliability should avoid raising exceptions from signal
+handlers. They should also avoid catching :exc:`KeyboardInterrupt` as a means
+of gracefully shutting down. Instead, they should install their own
+:const:`SIGINT` handler. Below is an example of an HTTP server that avoids
+:exc:`KeyboardInterrupt`::
+
+ import signal
+ import socket
+ from selectors import DefaultSelector, EVENT_READ
+ from http.server import HTTPServer, SimpleHTTPRequestHandler
+
+ interrupt_read, interrupt_write = socket.socketpair()
+
+ def handler(signum, frame):
+ print('Signal handler called with signal', signum)
+ interrupt_write.send(b'\0')
+ signal.signal(signal.SIGINT, handler)
+
+ def serve_forever(httpd):
+ sel = DefaultSelector()
+ sel.register(interrupt_read, EVENT_READ)
+ sel.register(httpd, EVENT_READ)
+
+ while True:
+ for key, _ in sel.select():
+ if key.fileobj == interrupt_read:
+ interrupt_read.recv(1)
+ return
+ if key.fileobj == httpd:
+ httpd.handle_request()
+
+ print("Serving on port 8000")
+ httpd = HTTPServer(('', 8000), SimpleHTTPRequestHandler)
+ serve_forever(httpd)
+ print("Shutdown...")
This module offers several classes to implement SMTP (email) servers.
.. deprecated:: 3.6
- :mod:`smtpd` will be removed in Python 3.12 (:pep:`594`).
+ :mod:`smtpd` will be removed in Python 3.12
+ (see :pep:`PEP 594 <594#smtpd>` for details).
The `aiosmtpd <https://aiosmtpd.readthedocs.io/>`_ package is a recommended
replacement for this module. It is based on :mod:`asyncio` and provides a
more straightforward API.
single: u-LAW
.. deprecated:: 3.11
- The :mod:`sndhdr` module is deprecated (see :pep:`594` for details).
+ The :mod:`sndhdr` module is deprecated
+ (see :pep:`PEP 594 <594#sndhdr>` for details and alternatives).
--------------
numeric address in *host* portion.
All errors raise exceptions. The normal exceptions for invalid argument types
-and out-of-memory conditions can be raised; starting from Python 3.3, errors
+and out-of-memory conditions can be raised. Errors
related to socket or address semantics raise :exc:`OSError` or one of its
-subclasses (they used to raise :exc:`socket.error`).
+subclasses.
Non-blocking mode is supported through :meth:`~socket.setblocking`. A
generalization of this based on timeouts is supported through
:deprecated:
.. deprecated:: 3.11
- The :mod:`spwd` module is deprecated (see :pep:`594` for details).
+ The :mod:`spwd` module is deprecated
+ (see :pep:`PEP 594 <594#spwd>` for details and alternatives).
--------------
.. method:: execute(sql[, parameters])
- This is a nonstandard shortcut that creates a cursor object by calling
- the :meth:`~Connection.cursor` method, calls the cursor's
- :meth:`~Cursor.execute` method with the *parameters* given, and returns
- the cursor.
+ Create a new :class:`Cursor` object and call
+ :meth:`~Cursor.execute` on it with the given *sql* and *parameters*.
+ Return the new cursor object.
.. method:: executemany(sql[, parameters])
- This is a nonstandard shortcut that creates a cursor object by
- calling the :meth:`~Connection.cursor` method, calls the cursor's
- :meth:`~Cursor.executemany` method with the *parameters* given, and
- returns the cursor.
+ Create a new :class:`Cursor` object and call
+ :meth:`~Cursor.executemany` on it with the given *sql* and *parameters*.
+ Return the new cursor object.
.. method:: executescript(sql_script)
- This is a nonstandard shortcut that creates a cursor object by
- calling the :meth:`~Connection.cursor` method, calls the cursor's
- :meth:`~Cursor.executescript` method with the given *sql_script*, and
- returns the cursor.
+ Create a new :class:`Cursor` object and call
+ :meth:`~Cursor.executescript` on it with the given *sql_script*.
+ Return the new cursor object.
.. method:: create_function(name, num_params, func, *, deterministic=False)
.. method:: create_collation(name, callable)
- Creates a collation with the specified *name* and *callable*. The callable will
- be passed two string arguments. It should return -1 if the first is ordered
- lower than the second, 0 if they are ordered equal and 1 if the first is ordered
- higher than the second. Note that this controls sorting (ORDER BY in SQL) so
- your comparisons don't affect other SQL operations.
+ Create a collation named *name* using the collating function *callable*.
+ *callable* is passed two :class:`string <str>` arguments,
+ and it should return an :class:`integer <int>`:
- Note that the callable will get its parameters as Python bytestrings, which will
- normally be encoded in UTF-8.
+ * ``1`` if the first is ordered higher than the second
+ * ``-1`` if the first is ordered lower than the second
+ * ``0`` if they are ordered equal
- The following example shows a custom collation that sorts "the wrong way":
+ The following example shows a reverse sorting collation:
.. literalinclude:: ../includes/sqlite3/collation_reverse.py
- To remove a collation, call ``create_collation`` with ``None`` as callable::
-
- con.create_collation("reverse", None)
+ Remove a collation function by setting *callable* to :const:`None`.
.. method:: interrupt()
Selects SSL version 2 as the channel encryption protocol.
This protocol is not available if OpenSSL is compiled with the
- ``OPENSSL_NO_SSL2`` flag.
+ ``no-ssl2`` option.
.. warning::
Selects SSL version 3 as the channel encryption protocol.
- This protocol is not be available if OpenSSL is compiled with the
- ``OPENSSL_NO_SSLv3`` flag.
+ This protocol is not available if OpenSSL is compiled with the
+ ``no-ssl3`` option.
.. warning::
.. method:: SSLSocket.version()
Return the actual SSL protocol version negotiated by the connection
- as a string, or ``None`` is no secure connection is established.
+ as a string, or ``None`` if no secure connection is established.
As of this writing, possible return values include ``"SSLv2"``,
``"SSLv3"``, ``"TLSv1"``, ``"TLSv1.1"`` and ``"TLSv1.2"``.
Recent OpenSSL versions may define more return values.
string must be the path to a single file in PEM format containing the
certificate as well as any number of CA certificates needed to establish
the certificate's authenticity. The *keyfile* string, if present, must
- point to a file containing the private key in. Otherwise the private
+ point to a file containing the private key. Otherwise the private
key will be taken from *certfile* as well. See the discussion of
:ref:`ssl-certificates` for more information on how the certificate
is stored in the *certfile*.
context.load_cert_chain(certfile="mycertfile", keyfile="mykeyfile")
bindsocket = socket.socket()
- bindsocket.bind(('myaddr.mydomain.com', 10023))
+ bindsocket.bind(('myaddr.example.com', 10023))
bindsocket.listen(5)
When a client connects, you'll call :meth:`accept` on the socket to get the
for f in os.listdir(top):
pathname = os.path.join(top, f)
- mode = os.stat(pathname).st_mode
+ mode = os.lstat(pathname).st_mode
if S_ISDIR(mode):
# It's a directory, recurse into it
walktree(pathname, callback)
depends on whether *encoding* or *errors* is given, as follows.
If neither *encoding* nor *errors* is given, ``str(object)`` returns
- :meth:`object.__str__() <object.__str__>`, which is the "informal" or nicely
+ :meth:`type(object).__str__(object) <object.__str__>`,
+ which is the "informal" or nicely
printable string representation of *object*. For string objects, this is
the string itself. If *object* does not have a :meth:`~object.__str__`
method, then :func:`str` falls back to returning
>>> "they're bill's friends from the UK".title()
"They'Re Bill'S Friends From The Uk"
- A workaround for apostrophes can be constructed using regular expressions::
+ The :func:`string.capwords` function does not have this problem, as it
+ splits words on spaces only.
+
+ Alternatively, a workaround for apostrophes can be constructed using regular
+ expressions::
>>> import re
>>> def titlecase(s):
since it is often more useful than e.g. ``bytes([46, 46, 46])``. You can
always convert a bytes object into a list of integers using ``list(b)``.
-.. note::
- For Python 2.x users: In the Python 2.x series, a variety of implicit
- conversions between 8-bit strings (the closest thing 2.x offers to a
- built-in binary data type) and Unicode strings were permitted. This was a
- backwards compatibility workaround to account for the fact that Python
- originally only supported 8-bit text, and Unicode text was a later
- addition. In Python 3.x, those implicit conversions are gone - conversions
- between 8-bit binary data and Unicode text must be explicit, and bytes and
- string objects will always compare unequal.
-
.. _typebytearray:
| | be used for Python2/3 code bases. | |
+------------+-----------------------------------------------------+-------+
| ``'a'`` | Bytes (converts any Python object using | \(5) |
-| | ``repr(obj).encode('ascii','backslashreplace)``). | |
+| | ``repr(obj).encode('ascii', 'backslashreplace')``). | |
+------------+-----------------------------------------------------+-------+
| ``'r'`` | ``'r'`` is an alias for ``'a'`` and should only | \(7) |
| | be used for Python2/3 code bases. | |
however, that since computers store floating-point numbers as approximations it
is usually unwise to use them as dictionary keys.)
-Dictionaries can be created by placing a comma-separated list of ``key: value``
-pairs within braces, for example: ``{'jack': 4098, 'sjoerd': 4127}`` or ``{4098:
-'jack', 4127: 'sjoerd'}``, or by the :class:`dict` constructor.
-
.. class:: dict(**kwargs)
dict(mapping, **kwargs)
dict(iterable, **kwargs)
function for all use cases it can handle. For more advanced use cases, the
underlying :class:`Popen` interface can be used directly.
-The :func:`run` function was added in Python 3.5; if you need to retain
-compatibility with older versions, see the :ref:`call-function-trio` section.
-
.. function:: run(args, *, stdin=None, input=None, stdout=None, stderr=None,\
capture_output=False, shell=False, cwd=None, timeout=None, \
.. exception:: CalledProcessError
Subclass of :exc:`SubprocessError`, raised when a process run by
- :func:`check_call` or :func:`check_output` returns a non-zero exit status.
+ :func:`check_call`, :func:`check_output`, or :func:`run` (with ``check=True``)
+ returns a non-zero exit status.
+
.. attribute:: returncode
**Source code:** :source:`Lib/sunau.py`
.. deprecated:: 3.11
- The :mod:`sunau` module is deprecated (see :pep:`594` for details).
+ The :mod:`sunau` module is deprecated
+ (see :pep:`PEP 594 <594#sunau>` for details).
--------------
crypt.rst
imghdr.rst
imp.rst
+ mailcap.rst
msilib.rst
- nntplib.rst
nis.rst
+ nntplib.rst
optparse.rst
ossaudiodev.rst
pipes.rst
.. index:: single: protocol; Telnet
.. deprecated:: 3.11
- The :mod:`telnetlib` module is deprecated (see :pep:`594` for details).
+ The :mod:`telnetlib` module is deprecated
+ (see :pep:`PEP 594 <594#telnetlib>` for details and alternatives).
--------------
file-like object. Whether the name can be
used to open the file a second time, while the named temporary file is
still open, varies across platforms (it can be so used on Unix; it cannot
- on Windows NT or later). If *delete* is true (the default), the file is
+ on Windows). If *delete* is true (the default), the file is
deleted as soon as it is closed.
The returned object is always a file-like object whose :attr:`!file`
attribute is the underlying true file object. This file-like object can
.. data:: MISSING_C_DOCSTRINGS
- Return ``True`` if running on CPython, not on Windows, and configuration
- not set with ``WITH_DOC_STRINGS``.
+ Set to ``True`` if Python is built without docstrings (the
+ :c:macro:`WITH_DOC_STRINGS` macro is not defined).
+ See the :option:`configure --without-doc-strings <--without-doc-strings>` option.
+
+ See also the :data:`HAVE_DOCSTRINGS` variable.
.. data:: HAVE_DOCSTRINGS
- Check for presence of docstrings.
+ Set to ``True`` if function docstrings are available.
+ See the :option:`python -OO <-O>` option, which strips docstrings of functions implemented in Python.
+
+ See also the :data:`MISSING_C_DOCSTRINGS` variable.
.. data:: TEST_HTTP_URL
Used when tests are executed by :mod:`test.regrtest`.
-.. function:: system_must_validate_cert(f)
-
- Raise :exc:`unittest.SkipTest` on TLS certification validation failures.
-
-
.. function:: sortdict(dict)
Return a repr of *dict* with keys sorted.
.. function:: match_test(test)
- Match *test* to patterns set in :func:`set_match_tests`.
+ Determine whether *test* matches the patterns set in :func:`set_match_tests`.
-.. function:: set_match_tests(patterns)
+.. function:: set_match_tests(accept_patterns=None, ignore_patterns=None)
- Define match test with regular expression *patterns*.
+ Define match patterns on test filenames and test method names for filtering tests.
.. function:: run_unittest(*classes)
.. function:: check_impl_detail(**guards)
Use this check to guard CPython's implementation-specific tests or to
- run them only on the implementations guarded by the arguments::
+ run them only on the implementations guarded by the arguments. This
+ function returns ``True`` or ``False`` depending on the host platform.
+ Example usage::
check_impl_detail() # Only on CPython (default).
check_impl_detail(jython=True) # Only on Jython.
time the regrtest began.
-.. function:: get_original_stdout
+.. function:: get_original_stdout()
Return the original stdout set by :func:`record_original_stdout` or
``sys.stdout`` if it's not set.
.. function:: disable_faulthandler()
- A context manager that replaces ``sys.stderr`` with ``sys.__stderr__``.
+ A context manager that temporary disables :mod:`faulthandler`.
.. function:: gc_collect()
.. function:: disable_gc()
- A context manager that disables the garbage collector upon entry and
- reenables it upon exit.
+ A context manager that disables the garbage collector on entry. On
+ exit, the garbage collector is restored to its prior state.
.. function:: swap_attr(obj, attr, new_val)
.. function:: calcobjsize(fmt)
- Return :func:`struct.calcsize` for ``nP{fmt}0n`` or, if ``gettotalrefcount``
- exists, ``2PnP{fmt}0P``.
+ Return the size of the :c:type:`PyObject` whose structure members are
+ defined by *fmt*. The returned value includes the size of the Python object header and alignment.
.. function:: calcvobjsize(fmt)
- Return :func:`struct.calcsize` for ``nPn{fmt}0n`` or, if ``gettotalrefcount``
- exists, ``2PnPn{fmt}0P``.
+ Return the size of the :c:type:`PyVarObject` whose structure members are
+ defined by *fmt*. The returned value includes the size of the Python object header and alignment.
.. function:: checksizeof(test, o, size)
have an associated comment identifying the relevant tracker issue.
+.. function:: system_must_validate_cert(f)
+
+ A decorator that skips the decorated test on TLS certification validation failures.
+
+
.. decorator:: run_with_locale(catstr, *locales)
A decorator for running a function in a different locale, correctly
.. decorator:: requires_freebsd_version(*min_version)
Decorator for the minimum version when running test on FreeBSD. If the
- FreeBSD version is less than the minimum, raise :exc:`unittest.SkipTest`.
+ FreeBSD version is less than the minimum, the test is skipped.
.. decorator:: requires_linux_version(*min_version)
Decorator for the minimum version when running test on Linux. If the
- Linux version is less than the minimum, raise :exc:`unittest.SkipTest`.
+ Linux version is less than the minimum, the test is skipped.
.. decorator:: requires_mac_version(*min_version)
Decorator for the minimum version when running test on macOS. If the
- macOS version is less than the minimum, raise :exc:`unittest.SkipTest`.
+ macOS version is less than the minimum, the test is skipped.
.. decorator:: requires_IEEE_754
Decorator for only running the test if :data:`HAVE_DOCSTRINGS`.
-.. decorator:: cpython_only(test)
+.. decorator:: cpython_only
Decorator for tests only applicable to CPython.
returns ``False``, then uses *msg* as the reason for skipping the test.
-.. decorator:: no_tracing(func)
+.. decorator:: no_tracing
Decorator to temporarily turn off tracing for the duration of the test.
-.. decorator:: refcount_test(test)
+.. decorator:: refcount_test
Decorator for tests which involve reference counting. The decorator does
not run the test if it is not run by CPython. Any trace function is unset
means the test doesn't support dummy runs when ``-M`` is not specified.
-.. decorator:: bigaddrspacetest(f)
+.. decorator:: bigaddrspacetest
- Decorator for tests that fill the address space. *f* is the function to
- wrap.
+ Decorator for tests that fill the address space.
.. function:: check_syntax_error(testcase, statement, errtext='', *, lineno=None, offset=None)
.. function:: check_free_after_iterating(test, iter, cls, args=())
- Assert that *iter* is deallocated after iterating.
+ Assert instances of *cls* are deallocated after iterating.
.. function:: missing_compiler_executable(cmd_names=[])
Class to save and restore signal handlers registered by the Python signal
handler.
+ .. method:: save(self)
+
+ Save the signal handlers to a dictionary mapping signal numbers to the
+ current signal handler.
+
+ .. method:: restore(self)
+
+ Set the signal numbers from the :meth:`save` dictionary to the saved
+ handler.
+
.. class:: Matcher()
variables *env_vars* succeeds (``rc == 0``) and return a ``(return code,
stdout, stderr)`` tuple.
- If the ``__cleanenv`` keyword is set, *env_vars* is used as a fresh
+ If the *__cleanenv* keyword-only parameter is set, *env_vars* is used as a fresh
environment.
Python is started in isolated mode (command line option ``-I``),
- except if the ``__isolated`` keyword is set to ``False``.
+ except if the *__isolated* keyword-only parameter is set to ``False``.
.. versionchanged:: 3.9
The function no longer strips whitespaces from *stderr*.
is still alive after *timeout* seconds.
-.. decorator:: reap_threads(func)
+.. decorator:: reap_threads
Decorator to ensure the threads are cleaned up even if the test fails.
.. function:: start_threads(threads, unlock=None)
- Context manager to start *threads*. It attempts to join the threads upon
- exit.
+ Context manager to start *threads*, which is a sequence of threads.
+ *unlock* is a function called after the threads are started, even if an
+ exception was raised; an example would be :meth:`threading.Event.set`.
+ ``start_threads`` will attempt to join the started threads upon exit.
.. function:: threading_cleanup(*original_values)
.. data:: TESTFN_NONASCII
- Set to a filename containing the :data:`FS_NONASCII` character.
+ Set to a filename containing the :data:`FS_NONASCII` character, if it exists.
+ This guarantees that if the filename exists, it can be encoded and decoded
+ with the default filesystem encoding. This allows tests that require a
+ non-ASCII filename to be easily skipped on platforms where they can't work.
.. data:: TESTFN_UNENCODABLE
.. function:: rmdir(filename)
Call :func:`os.rmdir` on *filename*. On Windows platforms, this is
- wrapped with a wait loop that checks for the existence of the file.
+ wrapped with a wait loop that checks for the existence of the file,
+ which is needed due to antivirus programs that can hold files open and prevent
+ deletion.
.. function:: rmtree(path)
Call :func:`shutil.rmtree` on *path* or call :func:`os.lstat` and
- :func:`os.rmdir` to remove a path and its contents. On Windows platforms,
+ :func:`os.rmdir` to remove a path and its contents. As with :func:`rmdir`,
+ on Windows platforms
this is wrapped with a wait loop that checks for the existence of the files.
.. function:: unlink(filename)
- Call :func:`os.unlink` on *filename*. On Windows platforms, this is
+ Call :func:`os.unlink` on *filename*. As with :func:`rmdir`,
+ on Windows platforms, this is
wrapped with a wait loop that checks for the existence of the file.
.. versionadded:: 3.1
-.. function:: import_module(name, deprecated=False, *, required_on())
+.. function:: import_module(name, deprecated=False, *, required_on=())
This function imports and returns the named module. Unlike a normal
import, this function raises :exc:`unittest.SkipTest` if the module
A context manager to force import to return a new module reference. This
is useful for testing module-level behaviors, such as the emission of a
- DeprecationWarning on import. Example usage::
+ :exc:`DeprecationWarning` on import. Example usage::
with CleanImport('foo'):
importlib.import_module('foo') # New reference.
.. class:: DirsOnSysPath(*paths)
- A context manager to temporarily add directories to sys.path.
+ A context manager to temporarily add directories to :data:`sys.path`.
This makes a copy of :data:`sys.path`, appends any directories given
as positional arguments, then reverts :data:`sys.path` to the copied
from tkinter import ttk
-.. class:: Tk(screenName=None, baseName=None, className='Tk', useTk=1)
-
- The :class:`Tk` class is instantiated without arguments. This creates a toplevel
- widget of Tk which usually is the main window of an application. Each instance
- has its own associated Tcl interpreter.
-
- .. FIXME: The following keyword arguments are currently recognized:
-
-
-.. function:: Tcl(screenName=None, baseName=None, className='Tk', useTk=0)
+.. class:: Tk(screenName=None, baseName=None, className='Tk', useTk=True, sync=False, use=None)
+
+ Construct a toplevel Tk widget, which is usually the main window of an
+ application, and initialize a Tcl interpreter for this widget. Each
+ instance has its own associated Tcl interpreter.
+
+ The :class:`Tk` class is typically instantiated using all default values.
+ However, the following keyword arguments are currently recognized:
+
+ *screenName*
+ When given (as a string), sets the :envvar:`DISPLAY` environment
+ variable. (X11 only)
+ *baseName*
+ Name of the profile file. By default, *baseName* is derived from the
+ program name (``sys.argv[0]``).
+ *className*
+ Name of the widget class. Used as a profile file and also as the name
+ with which Tcl is invoked (*argv0* in *interp*).
+ *useTk*
+ If ``True``, initialize the Tk subsystem. The :func:`tkinter.Tcl() <Tcl>`
+ function sets this to ``False``.
+ *sync*
+ If ``True``, execute all X server commands synchronously, so that errors
+ are reported immediately. Can be used for debugging. (X11 only)
+ *use*
+ Specifies the *id* of the window in which to embed the application,
+ instead of it being created as an independent toplevel window. *id* must
+ be specified in the same way as the value for the -use option for
+ toplevel widgets (that is, it has a form like that returned by
+ :meth:`winfo_id`).
+
+ Note that on some platforms this will only work correctly if *id* refers
+ to a Tk frame or toplevel that has its -container option enabled.
+
+ :class:`Tk` reads and interprets profile files, named
+ :file:`.{className}.tcl` and :file:`.{baseName}.tcl`, into the Tcl
+ interpreter and calls :func:`exec` on the contents of
+ :file:`.{className}.py` and :file:`.{baseName}.py`. The path for the
+ profile files is the :envvar:`HOME` environment variable or, if that
+ isn't defined, then :attr:`os.curdir`.
+
+ .. attribute:: tk
+
+ The Tk application object created by instantiating :class:`Tk`. This
+ provides access to the Tcl interpreter. Each widget that is attached
+ the same instance of :class:`Tk` has the same value for its :attr:`tk`
+ attribute.
+
+ .. attribute:: master
+
+ The widget object that contains this widget. For :class:`Tk`, the
+ *master* is :const:`None` because it is the main window. The terms
+ *master* and *parent* are similar and sometimes used interchangeably
+ as argument names; however, calling :meth:`winfo_parent` returns a
+ string of the widget name whereas :attr:`master` returns the object.
+ *parent*/*child* reflects the tree-like relationship while
+ *master*/*slave* reflects the container structure.
+
+ .. attribute:: children
+
+ The immediate descendants of this widget as a :class:`dict` with the
+ child widget names as the keys and the child instance objects as the
+ values.
+
+
+.. function:: Tcl(screenName=None, baseName=None, className='Tk', useTk=False)
The :func:`Tcl` function is a factory function which creates an object much like
that created by the :class:`Tk` class, except that it does not initialize the Tk
where:
sequence
- is a string that denotes the target kind of event. (See the bind man page and
- page 201 of John Ousterhout's book for details).
+ is a string that denotes the target kind of event. (See the
+ :manpage:`bind(3tk)` man page, and page 201 of John Ousterhout's book,
+ :title-reference:`Tcl and the Tk Toolkit (2nd edition)`, for details).
func
is a Python function, taking one argument, to be invoked when the event occurs.
NewType
=======
-Use the :class:`NewType` helper class to create distinct types::
+Use the :class:`NewType` helper to create distinct types::
from typing import NewType
Note that these checks are enforced only by the static type checker. At runtime,
the statement ``Derived = NewType('Derived', Base)`` will make ``Derived`` a
-class that immediately returns whatever parameter you pass it. That means
+callable that immediately returns whatever parameter you pass it. That means
the expression ``Derived(some_value)`` does not create a new class or introduce
much overhead beyond that of a regular function call.
on_error: Callable[[int, Exception], None]) -> None:
# Body
+ async def on_update(value: str) -> None:
+ # Body
+ callback: Callable[[str], Awaitable[None]] = on_update
+
It is possible to declare the return type of a callable without specifying
the call signature by substituting a literal ellipsis
for the list of arguments in the type hint: ``Callable[..., ReturnType]``.
See :pep:`612` for more information.
.. seealso::
- The documentation for :class:`ParamSpec` and :class:`Concatenate` provide
+ The documentation for :class:`ParamSpec` and :class:`Concatenate` provides
examples of usage in ``Callable``.
.. _generics:
Furthermore, a generic with only one parameter specification variable will accept
parameter lists in the forms ``X[[Type1, Type2, ...]]`` and also
``X[Type1, Type2, ...]`` for aesthetic reasons. Internally, the latter is converted
-to the former and are thus equivalent::
+to the former, so the following are equivalent::
>>> class X(Generic[P]): ...
...
Nominal vs structural subtyping
===============================
-Initially :pep:`484` defined Python static type system as using
+Initially :pep:`484` defined the Python static type system as using
*nominal subtyping*. This means that a class ``A`` is allowed where
a class ``B`` is expected if and only if ``A`` is a subclass of ``B``.
def with_lock(f: Callable[Concatenate[Lock, P], R]) -> Callable[P, R]:
'''A type-safe decorator which provides a lock.'''
- global my_lock
def inner(*args: P.args, **kwargs: P.kwargs) -> R:
# Provide the lock as the first argument.
return f(my_lock, *args, **kwargs)
``no_type_check`` functionality that currently exists in the ``typing``
module which completely disables typechecking annotations on a function
or a class, the ``Annotated`` type allows for both static typechecking
- of ``T`` (e.g., via mypy or Pyre, which can safely ignore ``x``)
+ of ``T`` (which can safely ignore ``x``)
together with runtime access to ``x`` within a specific application.
Ultimately, the responsibility of how to interpret the annotations (if
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
- For example::
+ For example::
def is_str_list(val: List[object]) -> TypeGuard[List[str]]:
'''Determines whether all objects in the list are strings'''
self.radius = radius
# Use a type variable to show that the return type
- # will always be an instance of whatever `cls` is
+ # will always be an instance of whatever ``cls`` is
@classmethod
def with_circumference(cls: type[C], circumference: float) -> C:
"""Create a circle with the specified circumference"""
use a :class:`TypeVar` with bound ``Callable[..., Any]``. However this
causes two problems:
- 1. The type checker can't type check the ``inner`` function because
- ``*args`` and ``**kwargs`` have to be typed :data:`Any`.
- 2. :func:`~cast` may be required in the body of the ``add_logging``
- decorator when returning the ``inner`` function, or the static type
- checker must be told to ignore the ``return inner``.
+ 1. The type checker can't type check the ``inner`` function because
+ ``*args`` and ``**kwargs`` have to be typed :data:`Any`.
+ 2. :func:`~cast` may be required in the body of the ``add_logging``
+ decorator when returning the ``inner`` function, or the static type
+ checker must be told to ignore the ``return inner``.
.. attribute:: args
.. attribute:: kwargs
The resulting class has an extra attribute ``__annotations__`` giving a
dict that maps the field names to the field types. (The field names are in
the ``_fields`` attribute and the default values are in the
- ``_field_defaults`` attribute both of which are part of the namedtuple
+ ``_field_defaults`` attribute, both of which are part of the :func:`~collections.namedtuple`
API.)
``NamedTuple`` subclasses can also have docstrings and methods::
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
The functional syntax should also be used when any of the keys are not valid
- :ref:`identifiers`, for example because they are keywords or contain hyphens.
+ :ref:`identifiers <identifiers>`, for example because they are keywords or contain hyphens.
Example::
# raises SyntaxError
y: int
z: int
- A ``TypedDict`` cannot inherit from a non-TypedDict class,
+ A ``TypedDict`` cannot inherit from a non-\ ``TypedDict`` class,
notably including :class:`Generic`. For example::
class X(TypedDict):
.. class:: Hashable
- An alias to :class:`collections.abc.Hashable`
+ An alias to :class:`collections.abc.Hashable`.
.. class:: Reversible(Iterable[T_co])
.. class:: Sized
- An alias to :class:`collections.abc.Sized`
+ An alias to :class:`collections.abc.Sized`.
Asynchronous programming
""""""""""""""""""""""""
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
- ...
+ ...
@final
class Leaf:
.. note::
- If ``from __future__ import annotations`` is used in Python 3.7 or later,
+ If ``from __future__ import annotations`` is used,
annotations are not evaluated at function definition time.
- Instead, they are stored as strings in ``__annotations__``,
- This makes it unnecessary to use quotes around the annotation.
+ Instead, they are stored as strings in ``__annotations__``.
+ This makes it unnecessary to use quotes around the annotation
(see :pep:`563`).
.. versionadded:: 3.5.2
+++ /dev/null
-.. _undoc:
-
-********************
-Undocumented Modules
-********************
-
-Here's a quick listing of modules that are currently undocumented, but that
-should be documented. Feel free to contribute documentation for them! (Send
-via email to docs@python.org.)
-
-The idea and original contents for this chapter were taken from a posting by
-Fredrik Lundh; the specific contents of this chapter have been substantially
-revised.
-
-
-Platform specific modules
-=========================
-
-These modules are used to implement the :mod:`os.path` module, and are not
-documented beyond this mention. There's little need to document these.
-
-:mod:`ntpath`
- --- Implementation of :mod:`os.path` on Win32 and Win64 platforms.
-
-:mod:`posixpath`
- --- Implementation of :mod:`os.path` on POSIX.
>>> mock = Mock(name='Thing', return_value=None)
>>> mock(1, 2, 3)
- >>> mock.assret_called_once_with(4, 5, 6)
+ >>> mock.assret_called_once_with(4, 5, 6) # Intentional typo!
Your tests can pass silently and incorrectly because of the typo.
>>> from urllib import request
>>> mock = Mock(spec=request.Request)
- >>> mock.assret_called_with
+ >>> mock.assret_called_with # Intentional typo!
Traceback (most recent call last):
...
AttributeError: Mock object has no attribute 'assret_called_with'
>>> mock.has_data()
<mock.Mock object at 0x...>
- >>> mock.has_data.assret_called_with()
+ >>> mock.has_data.assret_called_with() # Intentional typo!
Auto-speccing solves this problem. You can either pass ``autospec=True`` to
:func:`patch` / :func:`patch.object` or use the :func:`create_autospec` function to create a
>>> req.add_header('spam', 'eggs')
<MagicMock name='request.Request().add_header()' id='...'>
- >>> req.add_header.assret_called_with
+ >>> req.add_header.assret_called_with # Intentional typo!
Traceback (most recent call last):
...
AttributeError: Mock object has no attribute 'assret_called_with'
after :func:`setUpModule` if :func:`setUpModule` raises an exception.
It is responsible for calling all the cleanup functions added by
- :func:`addCleanupModule`. If you need cleanup functions to be called
+ :func:`addModuleCleanup`. If you need cleanup functions to be called
*prior* to :func:`tearDownModule` then you can call
:func:`doModuleCleanups` yourself.
(X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11"``, while
:mod:`urllib`'s default user agent string is
``"Python-urllib/2.6"`` (on Python 2.6).
+ All header keys are sent in camel case.
An appropriate ``Content-Type`` header should be included if the *data*
argument is present. If this header has not been provided and *data*
name, and later calls will overwrite previous calls in case the *key* collides.
Currently, this is no loss of HTTP functionality, since all headers which have
meaning when used more than once have a (header-specific) way of gaining the
- same functionality using only one header.
+ same functionality using only one header. Note that headers added using
+ this method are also added to redirected requests.
.. method:: Request.add_unredirected_header(key, header)
This method, if implemented, will be called by the parent
:class:`OpenerDirector`. It should return a file-like object as described in
- the return value of the :meth:`open` of :class:`OpenerDirector`, or ``None``.
+ the return value of the :meth:`~OpenerDirector.open` method of :class:`OpenerDirector`, or ``None``.
It should raise :exc:`~urllib.error.URLError`, unless a truly exceptional
thing happens (for example, :exc:`MemoryError` should not be mapped to
:exc:`URLError`).
**Source code:** :source:`Lib/uu.py`
.. deprecated:: 3.11
- The :mod:`uu` module is deprecated (see :pep:`594` for details).
+ The :mod:`uu` module is deprecated
+ (see :pep:`PEP 594 <594#uu-and-the-uu-encoding>` for details).
+ :mod:`base64` is a modern alternative.
--------------
+---------------+----------------------------------------------+
* *message* is a string containing a regular expression that the start of
- the warning message must match. The expression is compiled to always be
- case-insensitive.
+ the warning message must match, case-insensitively. In :option:`-W` and
+ :envvar:`PYTHONWARNINGS`, *message* is a literal string that the start of the
+ warning message must contain (case-insensitively), ignoring any whitespace at
+ the start or end of *message*.
* *category* is a class (a subclass of :exc:`Warning`) of which the warning
category must be a subclass in order to match.
-* *module* is a string containing a regular expression that the module name must
- match. The expression is compiled to be case-sensitive.
+* *module* is a string containing a regular expression that the start of the
+ fully-qualified module name must match, case-sensitively. In :option:`-W` and
+ :envvar:`PYTHONWARNINGS`, *module* is a literal string that the
+ fully-qualified module name must be equal to (case-sensitively), ignoring any
+ whitespace at the start or end of *module*.
* *lineno* is an integer that the line number where the warning occurred must
match, or ``0`` to match all line numbers.
error::ResourceWarning # Treat ResourceWarning messages as errors
default::DeprecationWarning # Show DeprecationWarning messages
ignore,default:::mymodule # Only report warnings triggered by "mymodule"
- error:::mymodule[.*] # Convert warnings to errors in "mymodule"
- # and any subpackages of "mymodule"
+ error:::mymodule # Convert warnings to errors in "mymodule"
.. _default-warning-filter:
single: External Data Representation
.. deprecated:: 3.11
- The :mod:`xdrlib` module is deprecated (see :pep:`594` for details).
+ The :mod:`xdrlib` module is deprecated
+ (see :pep:`PEP 594 <594#xdrlib>` for details).
--------------
|--> Commander Clement
-Additional resources
-^^^^^^^^^^^^^^^^^^^^
-
-See http://effbot.org/zone/element-index.htm for tutorials and links to other
-docs.
-
-
.. _elementtree-xpath:
XPath support
^^^^^^^^^
.. function:: xml.etree.ElementInclude.default_loader( href, parse, encoding=None)
+ :module:
Default loader. This default loader reads an included resource from disk. *href* is a URL.
*parse* is for parse mode either "xml" or "text". *encoding*
.. function:: xml.etree.ElementInclude.include( elem, loader=None, base_url=None, \
max_depth=6)
+ :module:
This function expands XInclude directives. *elem* is the root element. *loader* is
an optional resource loader. If omitted, it defaults to :func:`default_loader`.
.. data:: property_xml_string
| value: ``"http://xml.org/sax/properties/xml-string"``
- | data type: String
+ | data type: Bytes
| description: The literal string of characters that was the source for the
current event.
| access: read-only
`XML-RPC Specification <http://xmlrpc.scripting.com/spec.html>`_
The official specification.
- `Unofficial XML-RPC Errata <http://effbot.org/zone/xmlrpc-errata.htm>`_
- Fredrik Lundh's "unofficial errata, intended to clarify certain
- details in the XML-RPC specification, as well as hint at
- 'best practices' to use when designing your own XML-RPC
- implementations."
-
.. _serverproxy-objects:
ServerProxy Objects
compressed text files in :term:`universal newlines` mode.
.. versionchanged:: 3.6
- :meth:`open` can now be used to write files into the archive with the
+ :meth:`ZipFile.open` can now be used to write files into the archive with the
``mode='w'`` option.
.. versionchanged:: 3.6
%PYTHON% -c "import sphinx" > nul 2> nul\r
if errorlevel 1 (\r
echo Installing sphinx with %PYTHON%\r
- %PYTHON% -m pip install sphinx==2.2.0\r
+ %PYTHON% -m pip install -r requirements.txt\r
if errorlevel 1 exit /B\r
)\r
set SPHINXBUILD=%PYTHON% -c "import sphinx.cmd.build, sys; sys.exit(sphinx.cmd.build.main())"\r
%PYTHON% -c "import blurb" > nul 2> nul\r
if errorlevel 1 (\r
echo Installing blurb with %PYTHON%\r
+ rem Should have been installed with Sphinx earlier\r
%PYTHON% -m pip install blurb\r
if errorlevel 1 exit /B\r
)\r
:exc:`StopIteration`, or other exception) is the same as when
iterating over the :meth:`__await__` return value, described above.
-.. method:: coroutine.throw(type[, value[, traceback]])
+.. method:: coroutine.throw(value)
+ coroutine.throw(type[, value[, traceback]])
Raises the specified exception in the coroutine. This method delegates
to the :meth:`~generator.throw` method of the iterator that caused
could receive the value.
-.. method:: generator.throw(type[, value[, traceback]])
+.. method:: generator.throw(value)
+ generator.throw(type[, value[, traceback]])
- Raises an exception of type ``type`` at the point where the generator was paused,
+ Raises an exception at the point where the generator was paused,
and returns the next value yielded by the generator function. If the generator
exits without yielding another value, a :exc:`StopIteration` exception is
raised. If the generator function does not catch the passed-in exception, or
raises a different exception, then that exception propagates to the caller.
+ In typical use, this is called with a single exception instance similar to the
+ way the :keyword:`raise` keyword is used.
+
+ For backwards compatability, however, the second signature is
+ supported, following a convention from older versions of Python.
+ The *type* argument should be an exception class, and *value*
+ should be an exception instance. If the *value* is not provided, the
+ *type* constructor is called to get an instance. If *traceback*
+ is provided, it is set on the exception, otherwise any existing
+ :attr:`~BaseException.__traceback__` attribute stored in *value* may
+ be cleared.
+
.. index:: exception: GeneratorExit
| ``x[index]``, ``x[index:index]``, | Subscription, slicing, |
| ``x(arguments...)``, ``x.attribute`` | call, attribute reference |
+-----------------------------------------------+-------------------------------------+
-| :keyword:`await` ``x`` | Await expression |
+| :keyword:`await x <await>` | Await expression |
+-----------------------------------------------+-------------------------------------+
| ``**`` | Exponentiation [#]_ |
+-----------------------------------------------+-------------------------------------+
| :keyword:`is`, :keyword:`is not`, ``<``, | tests and identity tests |
| ``<=``, ``>``, ``>=``, ``!=``, ``==`` | |
+-----------------------------------------------+-------------------------------------+
-| :keyword:`not` ``x`` | Boolean NOT |
+| :keyword:`not x <not>` | Boolean NOT |
+-----------------------------------------------+-------------------------------------+
| :keyword:`and` | Boolean AND |
+-----------------------------------------------+-------------------------------------+
spam/
__init__.py
foo.py
- bar.py
-and ``spam/__init__.py`` has the following lines in it::
+and ``spam/__init__.py`` has the following line in it::
from .foo import Foo
- from .bar import Bar
-then executing the following puts a name binding to ``foo`` and ``bar`` in the
+then executing the following puts name bindings for ``foo`` and ``Foo`` in the
``spam`` module::
>>> import spam
>>> spam.foo
<module 'spam.foo' from '/tmp/imports/spam/foo.py'>
- >>> spam.bar
- <module 'spam.bar' from '/tmp/imports/spam/bar.py'>
+ >>> spam.Foo
+ <class 'spam.foo.Foo'>
Given Python's familiar name binding rules this might seem surprising, but
it's actually a fundamental feature of the import system. The invariant
(``b'\xef\xbb\xbf'``), the declared file encoding is UTF-8 (this is supported,
among others, by Microsoft's :program:`notepad`).
-If an encoding is declared, the encoding name must be recognized by Python. The
+If an encoding is declared, the encoding name must be recognized by Python
+(see :ref:`standard-encodings`). The
encoding is used for all lexical analysis, including string literals, comments
and identifiers.
-.. XXX there should be a list of supported encodings.
-
.. _explicit-joining:
In plain English: Both types of literals can be enclosed in matching single quotes
(``'``) or double quotes (``"``). They can also be enclosed in matching groups
of three single or double quotes (these are generally referred to as
-*triple-quoted strings*). The backslash (``\``) character is used to escape
-characters that otherwise have a special meaning, such as newline, backslash
-itself, or the quote character.
+*triple-quoted strings*). The backslash (``\``) character is used to give special
+meaning to otherwise ordinary characters like ``n``, which means 'newline' when
+escaped (``\n``). It can also be used to escape characters that otherwise have a
+special meaning, such as newline, backslash itself, or the quote character.
+See :ref:`escape sequences <escape-sequences>` below for examples.
.. index::
single: b'; bytes literal
single: \u; escape sequence
single: \U; escape sequence
+.. _escape-sequences:
+
Unless an ``'r'`` or ``'R'`` prefix is present, escape sequences in string and
bytes literals are interpreted according to rules similar to those used by
Standard C. The recognized escape sequences are:
* If the target list is a single target with no trailing comma,
optionally in parentheses, the object is assigned to that target.
-* Else: The object must be an iterable with the same number of
- items as there are targets in the target list, and the items are assigned,
- from left to right, to the corresponding targets.
+* Else:
* If the target list contains one target prefixed with an asterisk, called a
"starred" target: The object must be an iterable with at least as many items
Examples::
import foo # foo imported and bound locally
- import foo.bar.baz # foo.bar.baz imported, foo bound locally
- import foo.bar.baz as fbb # foo.bar.baz imported and bound as fbb
- from foo.bar import baz # foo.bar.baz imported and bound as baz
+ import foo.bar.baz # foo, foo.bar, and foo.bar.baz imported, foo bound locally
+ import foo.bar.baz as fbb # foo, foo.bar, and foo.bar.baz imported, foo.bar.baz bound as fbb
+ from foo.bar import baz # foo, foo.bar, and foo.bar.baz imported, foo.bar.baz bound as baz
from foo import attr # foo imported and foo.attr bound as attr
.. index:: single: * (asterisk); import statement
# version 3.2.1. It can be removed after bumping Sphinx version to at
# least 3.5.4.
docutils==0.17.1
+# Jinja version is pinned to a version compatible with Sphinx version 3.2.1.
+jinja2==3.0.3
blurb
import suspicious
-ISSUE_URI = 'https://bugs.python.org/issue%s'
+ISSUE_URI = 'https://bugs.python.org/issue?@action=redirect&bpo=%s'
+GH_ISSUE_URI = 'https://github.com/python/cpython/issues/%s'
SOURCE_URI = 'https://github.com/python/cpython/tree/3.10/%s'
# monkey-patch reST parser to disable alphabetic and roman enumerated lists
def issue_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
issue = utils.unescape(text)
+ # sanity check: there are no bpo issues within these two values
+ if 47261 < int(issue) < 400000:
+ msg = inliner.reporter.error(f'The BPO ID {text!r} seems too high -- '
+ 'use :gh:`...` for GitHub IDs', line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
text = 'bpo-' + issue
refnode = nodes.reference(text, text, refuri=ISSUE_URI % issue)
return [refnode], []
+# Support for marking up and linking to GitHub issues
+
+def gh_issue_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
+ issue = utils.unescape(text)
+ # sanity check: all GitHub issues have ID >= 32426
+ # even though some of them are also valid BPO IDs
+ if int(issue) < 32426:
+ msg = inliner.reporter.error(f'The GitHub ID {text!r} seems too low -- '
+ 'use :issue:`...` for BPO IDs', line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ text = 'gh-' + issue
+ refnode = nodes.reference(text, text, refuri=GH_ISSUE_URI % issue)
+ return [refnode], []
+
+
# Support for linking to Python source files easily
def source_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# Support for including Misc/NEWS
-issue_re = re.compile('(?:[Ii]ssue #|bpo-)([0-9]+)')
+issue_re = re.compile('(?:[Ii]ssue #|bpo-)([0-9]+)', re.I)
+gh_issue_re = re.compile('(?:gh-issue-|gh-)([0-9]+)', re.I)
whatsnew_re = re.compile(r"(?im)^what's new in (.*?)\??$")
text = 'The NEWS file is not available.'
node = nodes.strong(text, text)
return [node]
- content = issue_re.sub(r'`bpo-\1 <https://bugs.python.org/issue\1>`__',
- content)
+ content = issue_re.sub(r':issue:`\1`', content)
+ # Fallback handling for the GitHub issue
+ content = gh_issue_re.sub(r':gh:`\1`', content)
content = whatsnew_re.sub(r'\1', content)
# remove first 3 lines as they are the main heading
lines = ['.. default-role:: obj', ''] + content.splitlines()[3:]
def setup(app):
app.add_role('issue', issue_role)
+ app.add_role('gh', gh_issue_role)
app.add_role('source', source_role)
app.add_directive('impl-detail', ImplementationDetail)
app.add_directive('availability', Availability)
library/typing,,`,"# Else, type of ``val`` is narrowed to ``float``."
library/typing,,`,# Type of ``val`` is narrowed to ``List[str]``.
library/typing,,`,# Type of ``val`` remains as ``List[object]``.
-library/typing,,`, # will always be an instance of whatever `cls` is
+library/typing,,`, # will always be an instance of whatever ``cls`` is
then attribute lookup prioritizes the instance::
>>> class Warehouse:
- purpose = 'storage'
- region = 'west'
-
+ ... purpose = 'storage'
+ ... region = 'west'
+ ...
>>> w1 = Warehouse()
>>> print(w1.purpose, w1.region)
storage west
object: file
:func:`open` returns a :term:`file object`, and is most commonly used with
-two arguments: ``open(filename, mode)``.
+two positional arguments and one keyword argument:
+``open(filename, mode, encoding=None)``
::
- >>> f = open('workfile', 'w')
+ >>> f = open('workfile', 'w', encoding="utf-8")
.. XXX str(f) is <io.TextIOWrapper object at 0x82e8dc4>
omitted.
Normally, files are opened in :dfn:`text mode`, that means, you read and write
-strings from and to the file, which are encoded in a specific encoding. If
-encoding is not specified, the default is platform dependent (see
-:func:`open`). ``'b'`` appended to the mode opens the file in
-:dfn:`binary mode`: now the data is read and written in the form of bytes
-objects. This mode should be used for all files that don't contain text.
+strings from and to the file, which are encoded in a specific *encoding*.
+If *encoding* is not specified, the default is platform dependent
+(see :func:`open`).
+Because UTF-8 is the modern de-facto standard, ``encoding="utf-8"`` is
+recommended unless you know that you need to use a different encoding.
+Appending a ``'b'`` to the mode opens the file in :dfn:`binary mode`.
+Binary mode data is read and written as :class:`bytes` objects.
+You can not specify *encoding* when opening file in binary mode.
In text mode, the default when reading is to convert platform-specific line
endings (``\n`` on Unix, ``\r\n`` on Windows) to just ``\n``. When writing in
point. Using :keyword:`!with` is also much shorter than writing
equivalent :keyword:`try`\ -\ :keyword:`finally` blocks::
- >>> with open('workfile') as f:
+ >>> with open('workfile', encoding="utf-8") as f:
... read_data = f.read()
>>> # We can check that the file has been automatically closed.
json.dump(x, f)
-To decode the object again, if ``f`` is a :term:`text file` object which has
-been opened for reading::
+To decode the object again, if ``f`` is a :term:`binary file` or
+:term:`text file` object which has been opened for reading::
x = json.load(f)
+.. note::
+ JSON files must be encoded in UTF-8. Use ``encoding="utf-8"`` when opening
+ JSON file as a :term:`text file` for both of reading and writing.
+
This simple serialization technique can handle lists and dictionaries, but
serializing arbitrary class instances in JSON requires a bit of extra effort.
The reference for the :mod:`json` module contains an explanation of this.
.. index:: triple: module; search; path
When a module named :mod:`spam` is imported, the interpreter first searches for
-a built-in module with that name. If not found, it then searches for a file
+a built-in module with that name. These module names are listed in
+:data:`sys.builtin_module_names`. If not found, it then searches for a file
named :file:`spam.py` in a list of directories given by the variable
:data:`sys.path`. :data:`sys.path` is initialized from these locations:
__all__ = ["echo", "surround", "reverse"]
This would mean that ``from sound.effects import *`` would import the three
-named submodules of the :mod:`sound` package.
+named submodules of the :mod:`sound.effects` package.
If ``__all__`` is not defined, the statement ``from sound.effects import *``
does *not* import all submodules from the package :mod:`sound.effects` into the
import argparse
- parser = argparse.ArgumentParser(prog = 'top',
- description = 'Show top lines from each file')
+ parser = argparse.ArgumentParser(
+ prog='top',
+ description='Show top lines from each file')
parser.add_argument('filenames', nargs='+')
parser.add_argument('-l', '--lines', type=int, default=10)
args = parser.parse_args()
sophisticated and robust capabilities of its larger packages. For example:
* The :mod:`xmlrpc.client` and :mod:`xmlrpc.server` modules make implementing
- remote procedure calls into an almost trivial task. Despite the modules
+ remote procedure calls into an almost trivial task. Despite the modules'
names, no direct knowledge or handling of XML is needed.
* The :mod:`email` package is a library for managing email messages, including
The standard tool for deploying standalone Python applications on the Mac is
:program:`py2app`. More information on installing and using py2app can be found
-at http://undefined.org/python/#py2app.
+at https://pypi.org/project/py2app/.
Other Resources
subdirectory. By default, the subdirectory is named the same as the package,
and without the ``-ExcludeVersion`` option this name will include the specific
version installed. Inside the subdirectory is a ``tools`` directory that
-contains the Python installation::
+contains the Python installation:
+
+.. code-block:: doscon
# Without -ExcludeVersion
> .\python.3.5.2\tools\python.exe -V
.. note::
The embedded distribution does not include the `Microsoft C Runtime
- <https://www.microsoft.com/en-us/download/details.aspx?id=48145>`_ and it is
+ <https://docs.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist#visual-studio-2015-2017-2019-and-2022>`_ and it is
the responsibility of the application installer to provide this. The
runtime may have already been installed on a user's system previously or
automatically via Windows Update, and can be detected by finding
Windows will concatenate User variables *after* System variables, which may
cause unexpected results when modifying :envvar:`PATH`.
- The :envvar:`PYTHONPATH` variable is used by all versions of Python 2 and
- Python 3, so you should not permanently configure this variable unless it
- only includes code that is compatible with all of your installed Python
+ The :envvar:`PYTHONPATH` variable is used by all versions of Python,
+ so you should not permanently configure it unless the listed paths
+ only include code that is compatible with all of your installed Python
versions.
.. seealso::
- https://www.microsoft.com/en-us/wdsi/help/folder-variables
- Environment variables in Windows NT
+ https://docs.microsoft.com/en-us/windows/win32/procthread/environment-variables
+ Overview of environment variables on Windows
- https://technet.microsoft.com/en-us/library/cc754250.aspx
- The SET command, for temporarily modifying environment variables
+ https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/set_1
+ The ``set`` command, for temporarily modifying environment variables
- https://technet.microsoft.com/en-us/library/cc755104.aspx
- The SETX command, for permanently modifying environment variables
+ https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/setx
+ The ``setx`` command, for permanently modifying environment variables
- https://support.microsoft.com/en-us/help/310519/how-to-manage-environment-variables-in-windows-xp
- How To Manage Environment Variables in Windows XP
-
- https://www.chem.gla.ac.uk/~louis/software/faq/q1.html
- Setting Environment variables, Louis J. Farrugia
.. _windows-path-mod:
System-wide installations of Python 3.3 and later will put the launcher on your
:envvar:`PATH`. The launcher is compatible with all available versions of
Python, so it does not matter which version is installed. To check that the
-launcher is available, execute the following command in Command Prompt:
-
-::
+launcher is available, execute the following command in Command Prompt::
py
started - it can be exited as normal, and any additional command-line
arguments specified will be sent directly to Python.
-If you have multiple versions of Python installed (e.g., 2.7 and |version|) you
-will have noticed that Python |version| was started - to launch Python 2.7, try
-the command:
-
-::
+If you have multiple versions of Python installed (e.g., 3.7 and |version|) you
+will have noticed that Python |version| was started - to launch Python 3.7, try
+the command::
- py -2.7
+ py -3.7
-If you want the latest version of Python 2.x you have installed, try the
-command:
-
-::
+If you want the latest version of Python 2 you have installed, try the
+command::
py -2
-You should find the latest version of Python 2.x starts.
-
-If you see the following error, you do not have the launcher installed:
+You should find the latest version of Python 3.x starts.
-::
+If you see the following error, you do not have the launcher installed::
'py' is not recognized as an internal or external command,
operable program or batch file.
Per-user installations of Python do not add the launcher to :envvar:`PATH`
unless the option was selected on installation.
+The command::
+
+ py --list
+
+displays the currently installed version(s) of Python.
+
Virtual environments
^^^^^^^^^^^^^^^^^^^^
import sys
sys.stdout.write("hello from Python %s\n" % (sys.version,))
-From the directory in which hello.py lives, execute the command:
-
-::
+From the directory in which hello.py lives, execute the command::
py hello.py
Re-executing the command should now print the latest Python 3.x information.
As with the above command-line examples, you can specify a more explicit
-version qualifier. Assuming you have Python 2.6 installed, try changing the
-first line to ``#! python2.6`` and you should find the 2.6 version
-information printed.
+version qualifier. Assuming you have Python 3.7 installed, try changing
+the first line to ``#! python3.7`` and you should find the |version|
+version information printed.
Note that unlike interactive use, a bare "python" will use the latest
version of Python 2.x that you have installed. This is for backward
Any of the above virtual commands can be suffixed with an explicit version
(either just the major version, or the major and minor version).
Furthermore the 32-bit version can be requested by adding "-32" after the
-minor version. I.e. ``/usr/bin/python2.7-32`` will request usage of the
-32-bit python 2.7.
+minor version. I.e. ``/usr/bin/python3.7-32`` will request usage of the
+32-bit python 3.7.
.. versionadded:: 3.7
``python2`` will use the latest Python 2.x version installed and
the command ``python3`` will use the latest Python 3.x installed.
-* The commands ``python3.1`` and ``python2.7`` will not consult any
+* The command ``python3.7`` will not consult any
options at all as the versions are fully specified.
* If ``PY_PYTHON=3``, the commands ``python`` and ``python3`` will both use
the latest installed Python 3 version.
-* If ``PY_PYTHON=3.1-32``, the command ``python`` will use the 32-bit
- implementation of 3.1 whereas the command ``python3`` will use the latest
+* If ``PY_PYTHON=3.7-32``, the command ``python`` will use the 32-bit
+ implementation of 3.7 whereas the command ``python3`` will use the latest
installed Python (PY_PYTHON was not considered at all as a major
version was specified.)
-* If ``PY_PYTHON=3`` and ``PY_PYTHON3=3.1``, the commands
- ``python`` and ``python3`` will both use specifically 3.1
+* If ``PY_PYTHON=3`` and ``PY_PYTHON3=3.7``, the commands
+ ``python`` and ``python3`` will both use specifically 3.7
In addition to environment variables, the same settings can be configured
in the .INI file used by the launcher. The section in the INI file is
For example:
-* Setting ``PY_PYTHON=3.1`` is equivalent to the INI file containing:
+* Setting ``PY_PYTHON=3.7`` is equivalent to the INI file containing:
.. code-block:: ini
[defaults]
- python=3.1
+ python=3.7
-* Setting ``PY_PYTHON=3`` and ``PY_PYTHON3=3.1`` is equivalent to the INI file
+* Setting ``PY_PYTHON=3`` and ``PY_PYTHON3=3.7`` is equivalent to the INI file
containing:
.. code-block:: ini
[defaults]
python=3
- python3=3.1
+ python3=3.7
Diagnostics
-----------
utilities for:
* `Component Object Model
- <https://docs.microsoft.com/en-us/windows/desktop/com/component-object-model--com--portal>`_
+ <https://docs.microsoft.com/en-us/windows/win32/com/component-object-model--com--portal>`_
(COM)
* Win32 API calls
* Registry
* Event log
-* `Microsoft Foundation Classes <https://msdn.microsoft.com/en-us/library/fe1cf721%28VS.80%29.aspx>`_ (MFC)
- user interfaces
+* `Microsoft Foundation Classes
+ <https://docs.microsoft.com/en-us/cpp/mfc/mfc-desktop-applications>`_
+ (MFC) user interfaces
`PythonWin <https://web.archive.org/web/20060524042422/
https://www.python.org/windows/pythonwin/>`_ is a sample MFC application
`Win32 How Do I...? <http://timgolden.me.uk/python/win32_how_do_i.html>`_
by Tim Golden
- `Python and COM <http://www.boddie.org.uk/python/COM.html>`_
+ `Python and COM <https://www.boddie.org.uk/python/COM.html>`_
by David and Paul Boddie
Python.
-WConio
-------
-
-Since Python's advanced terminal handling layer, :mod:`curses`, is restricted to
-Unix-like systems, there is a library exclusive to Windows as well: Windows
-Console I/O for Python.
-
-`WConio <http://newcenturycomputers.net/projects/wconio.html>`_ is a wrapper for
-Turbo-C's :file:`CONIO.H`, used to create text user interfaces.
-
-
-
Compiling Python on Windows
===========================
<https://devguide.python.org/setup/#getting-the-source-code>`_.
The source tree contains a build solution and project files for Microsoft
-Visual Studio 2015, which is the compiler used to build the official Python
+Visual Studio, which is the compiler used to build the official Python
releases. These files are in the :file:`PCbuild` directory.
Check :file:`PCbuild/readme.txt` for general information on the build process.
-
For extension modules, consult :ref:`building-on-windows`.
-.. seealso::
-
- `Python + Windows + distutils + SWIG + gcc MinGW <http://sebsauvage.net/python/mingw.html>`_
- or "Creating Python extensions in C/C++ with SWIG and compiling them with
- MinGW gcc under Windows" or "Installing Python extension with distutils
- and without Microsoft Visual C++" by Sébastien Sauvage, 2003
-
Other Platforms
===============
earlier are no longer supported (due to the lack of users or developers).
Check :pep:`11` for details on all unsupported platforms.
-* `Windows CE <http://pythonce.sourceforge.net/>`_ is still supported.
-* The `Cygwin <https://cygwin.com/>`_ installer offers to install the Python
- interpreter as well (cf. `Cygwin package source
- <ftp://ftp.uni-erlangen.de/pub/pc/gnuwin32/cygwin/mirrors/cygnus/
- release/python>`_, `Maintainer releases
- <http://www.tishler.net/jason/software/python/>`_)
+* `Windows CE <http://pythonce.sourceforge.net/>`_ is
+ `no longer supported <https://github.com/python/cpython/issues/71542>`__
+ since Python 3 (if it ever was).
+* The `Cygwin <https://cygwin.com/>`_ installer offers to install the
+ `Python interpreter <https://cygwin.com/packages/summary/python3.html>`__
+ as well
See `Python for Windows <https://www.python.org/downloads/windows/>`_
for detailed information about platforms with pre-compiled installers.
This article explains the new features in Python 2.5. The final release of
Python 2.5 is scheduled for August 2006; :pep:`356` describes the planned
-release schedule.
+release schedule. Python 2.5 was released on September 19, 2006.
The changes in Python 2.5 are an interesting mix of language and library
improvements. The library enhancements will be more important to Python's user
The rest of this section will provide a brief overview of using ElementTree.
Full documentation for ElementTree is available at
-http://effbot.org/zone/element-index.htm.
+https://web.archive.org/web/20201124024954/http://effbot.org/zone/element-index.htm.
ElementTree represents an XML document as a tree of element nodes. The text
content of the document is stored as the :attr:`text` and :attr:`tail`
.. seealso::
- http://effbot.org/zone/element-index.htm
+ https://web.archive.org/web/20201124024954/http://effbot.org/zone/element-index.htm
Official documentation for ElementTree.
.. ======================================================================
This saves the maintainer some effort going through the SVN logs
when researching a change.
-This article explains the new features in Python 2.6, released on October 1
+This article explains the new features in Python 2.6, released on October 1,
2008. The release schedule is described in :pep:`361`.
The major theme of Python 2.6 is preparing the migration path to
Fredrik Lundh develops ElementTree and produced the 1.3 version;
you can read his article describing 1.3 at
-http://effbot.org/zone/elementtree-13-intro.htm.
+https://web.archive.org/web/20200703234532/http://effbot.org/zone/elementtree-13-intro.htm.
Florent Xicluna updated the version included with
Python, after discussions on python-dev and in :issue:`6472`.)
This article explains the new features in Python 3.0, compared to 2.6.
Python 3.0, also known as "Python 3000" or "Py3K", is the first ever
-*intentionally backwards incompatible* Python release. There are more
-changes than in a typical release, and more that are important for all
-Python users. Nevertheless, after digesting the changes, you'll find
+*intentionally backwards incompatible* Python release. Python 3.0 was released on December 3, 2008.
+There are more changes than in a typical release, and more that are important for all
+Python users. Nevertheless, after digesting the changes, you'll find
that Python really hasn't changed all that much -- by and large, we're
mostly fixing well-known annoyances and warts, and removing a lot of
old cruft.
when researching a change.
This article explains the new features in Python 3.1, compared to 3.0.
+Python 3.1 was released on June 27, 2009.
PEP 372: Ordered Dictionaries
when researching a change.
This article explains the new features in Python 3.10, compared to 3.9.
-
+Python 3.10 was released on October 4, 2021.
For full details, see the :ref:`changelog <changelog>`.
Summary -- Release highlights
also now un-stringize stringized annotations.
(Contributed by Larry Hastings in :issue:`43817`.)
+itertools
+---------
+
+Add :func:`itertools.pairwise()`.
+(Contributed by Raymond Hettinger in :issue:`38200`.)
+
linecache
---------
This saves the maintainer the effort of going through the SVN log
when researching a change.
-This article explains the new features in Python 3.2 as compared to 3.1. It
+This article explains the new features in Python 3.2 as compared to 3.1.
+Python 3.2 was released on February 20, 2011. It
focuses on a few highlights and gives a few examples. For full details, see the
`Misc/NEWS
<https://github.com/python/cpython/blob/076ca6c3c8df3030307e548d9be792ce3c1c6eea/Misc/NEWS>`_
* :meth:`xml.etree.ElementTree.getiterator` use ``Element.iter`` instead.
For details of the update, see `Introducing ElementTree
-<http://effbot.org/zone/elementtree-13-intro.htm>`_ on Fredrik Lundh's website.
+<https://web.archive.org/web/20200703234532/http://effbot.org/zone/elementtree-13-intro.htm>`_
+on Fredrik Lundh's website.
(Contributed by Florent Xicluna and Fredrik Lundh, :issue:`6472`.)
:Editor: Raymond Hettinger
This article explains the new features in Python 3.8, compared to 3.7.
+Python 3.8 was released on October 14, 2019.
For full details, see the :ref:`changelog <changelog>`.
.. testsetup::
first introduced in Python 3.4. It offers better performance and smaller
size compared to Protocol 3 available since Python 3.0.
-* Removed one ``Py_ssize_t`` member from ``PyGC_Head``. All GC tracked
+* Removed one :c:type:`Py_ssize_t` member from ``PyGC_Head``. All GC tracked
objects (e.g. tuple, list, dict) size is reduced 4 or 8 bytes.
(Contributed by Inada Naoki in :issue:`33597`.)
when researching a change.
This article explains the new features in Python 3.9, compared to 3.8.
-Python 3.9 was released on October 5th, 2020.
+Python 3.9 was released on October 5, 2020.
For full details, see the :ref:`changelog <changelog>`.
invalid_arguments:
| a=args ',' '*' { RAISE_SYNTAX_ERROR_KNOWN_LOCATION(a, "iterable argument unpacking follows keyword argument unpacking") }
| a=expression b=for_if_clauses ',' [args | expression for_if_clauses] {
- RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, PyPegen_last_item(b, comprehension_ty)->target, "Generator expression must be parenthesized") }
+ RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, _PyPegen_get_last_comprehension_item(PyPegen_last_item(b, comprehension_ty)), "Generator expression must be parenthesized") }
| a=NAME b='=' expression for_if_clauses {
RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, b, "invalid syntax. Maybe you meant '==' or ':=' instead of '='?")}
| a=args b=for_if_clauses { _PyPegen_nonparen_genexp_in_call(p, a, b) }
| args ',' a=expression b=for_if_clauses {
- RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, asdl_seq_GET(b, b->size-1)->target, "Generator expression must be parenthesized") }
+ RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, _PyPegen_get_last_comprehension_item(PyPegen_last_item(b, comprehension_ty)), "Generator expression must be parenthesized") }
| a=args ',' args { _PyPegen_arguments_parsing_error(p, a) }
invalid_kwarg:
| a[Token*]=('True'|'False'|'None') b='=' {
invalid_except_stmt_indent:
| a='except' expression ['as' NAME ] ':' NEWLINE !INDENT {
RAISE_INDENTATION_ERROR("expected an indented block after 'except' statement on line %d", a->lineno) }
- | a='except' ':' NEWLINE !INDENT { RAISE_SYNTAX_ERROR("expected an indented block after except statement on line %d", a->lineno) }
+ | a='except' ':' NEWLINE !INDENT { RAISE_INDENTATION_ERROR("expected an indented block after 'except' statement on line %d", a->lineno) }
invalid_match_stmt:
| "match" subject_expr !':' { CHECK_VERSION(void*, 10, "Pattern matching is", RAISE_SYNTAX_ERROR("expected ':'") ) }
| a="match" subject=subject_expr ':' NEWLINE !INDENT {
# error "this header requires Py_BUILD_CORE define"
#endif
-#define _Py_HAMT_MAX_TREE_DEPTH 7
+
+/*
+HAMT tree is shaped by hashes of keys. Every group of 5 bits of a hash denotes
+the exact position of the key in one level of the tree. Since we're using
+32 bit hashes, we can have at most 7 such levels. Although if there are
+two distinct keys with equal hashes, they will have to occupy the same
+cell in the 7th level of the tree -- so we'd put them in a "collision" node.
+Which brings the total possible tree depth to 8. Read more about the actual
+layout of the HAMT tree in `hamt.c`.
+
+This constant is used to define a datastucture for storing iteration state.
+*/
+#define _Py_HAMT_MAX_TREE_DEPTH 8
#define PyHamt_Check(o) Py_IS_TYPE(o, &_PyHamt_Type)
/*--start constants--*/
#define PY_MAJOR_VERSION 3
#define PY_MINOR_VERSION 10
-#define PY_MICRO_VERSION 4
+#define PY_MICRO_VERSION 5
#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL
#define PY_RELEASE_SERIAL 0
/* Version as a string */
-#define PY_VERSION "3.10.4"
+#define PY_VERSION "3.10.5"
/*--end constants--*/
/* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2.
'default',
'type',
'choices',
+ 'required',
'help',
'metavar',
]
# non-mmap files even if sendfile is supported by OS
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
- "and file {file!r} combination")
+ f"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
- if not hasattr(socket, 'AF_UNIX') or sock.family != socket.AF_UNIX:
+ if sock.family == socket.AF_INET or (
+ base_events._HAS_IPv6 and sock.family == socket.AF_INET6):
resolved = await self._ensure_resolved(
address, family=sock.family, type=sock.type, proto=sock.proto,
loop=self,
return self._state == RUNNING
def done(self):
- """Return True of the future was cancelled or finished executing."""
+ """Return True if the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
+ # Traceback object needs to be garbage-collected as its frames
+ # contain references to all the objects in the exception scope
+ self.exc.__traceback__ = None
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return _rebuild_exc, (self.exc, self.tb)
mp_context = mp.get_context()
self._mp_context = mp_context
+ # https://github.com/python/cpython/issues/90622
+ self._safe_to_dynamically_spawn_children = (
+ self._mp_context.get_start_method(allow_none=False) != "fork")
+
if initializer is not None and not callable(initializer):
raise TypeError("initializer must be a callable")
self._initializer = initializer
def _start_executor_manager_thread(self):
if self._executor_manager_thread is None:
# Start the processes so that their sentinels are known.
+ if not self._safe_to_dynamically_spawn_children: # ie, using fork.
+ self._launch_processes()
self._executor_manager_thread = _ExecutorManagerThread(self)
self._executor_manager_thread.start()
_threads_wakeups[self._executor_manager_thread] = \
process_count = len(self._processes)
if process_count < self._max_workers:
- p = self._mp_context.Process(
- target=_process_worker,
- args=(self._call_queue,
- self._result_queue,
- self._initializer,
- self._initargs))
- p.start()
- self._processes[p.pid] = p
+ # Assertion disabled as this codepath is also used to replace a
+ # worker that unexpectedly dies, even when using the 'fork' start
+ # method. That means there is still a potential deadlock bug. If a
+ # 'fork' mp_context worker dies, we'll be forking a new one when
+ # we know a thread is running (self._executor_manager_thread).
+ #assert self._safe_to_dynamically_spawn_children or not self._executor_manager_thread, 'https://github.com/python/cpython/issues/90622'
+ self._spawn_process()
+
+ def _launch_processes(self):
+ # https://github.com/python/cpython/issues/90622
+ assert not self._executor_manager_thread, (
+ 'Processes cannot be fork()ed after the thread has started, '
+ 'deadlock in the child processes could result.')
+ for _ in range(len(self._processes), self._max_workers):
+ self._spawn_process()
+
+ def _spawn_process(self):
+ p = self._mp_context.Process(
+ target=_process_worker,
+ args=(self._call_queue,
+ self._result_queue,
+ self._initializer,
+ self._initargs))
+ p.start()
+ self._processes[p.pid] = p
def submit(self, fn, /, *args, **kwargs):
with self._shutdown_lock:
# Wake up queue management thread
self._executor_manager_thread_wakeup.wakeup()
- self._adjust_process_count()
+ if self._safe_to_dynamically_spawn_children:
+ self._adjust_process_count()
self._start_executor_manager_thread()
return f
submit.__doc__ = _base.Executor.submit.__doc__
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
- if tz is None:
+ if tz is None and not utc:
# As of version 2015f max fold in IANA database is
# 23 hours at 1969-09-30 13:00:00 in Kwajalein.
# Let's probe 24 hours in the past to detect a transition:
probe2 = cls(y, m, d, hh, mm, ss, us, tz)
if probe2 == result:
result._fold = 1
- else:
+ elif tz is not None:
result = tz.fromutc(result)
return result
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
- terminated strings, ready to be printed as-is via the writeline()
+ terminated strings, ready to be printed as-is via the writelines()
method of a file-like object.
Example:
def _find_lineno(self, obj, source_lines):
"""
- Return a line number of the given object's docstring. Note:
- this method assumes that the object has a docstring.
+ Return a line number of the given object's docstring.
+
+ Returns `None` if the given object does not have a docstring.
"""
lineno = None
+ docstring = getattr(obj, '__doc__', None)
# Find the line number for modules.
- if inspect.ismodule(obj):
+ if inspect.ismodule(obj) and docstring is not None:
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
- if inspect.isclass(obj):
+ if inspect.isclass(obj) and docstring is not None:
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.__func__
- if inspect.isfunction(obj): obj = obj.__code__
+ if inspect.isfunction(obj) and getattr(obj, '__doc__', None):
+ # We don't use `docstring` var here, because `obj` can be changed.
+ obj = obj.__code__
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
# Turn the CTE decoded bytes into unicode.
try:
string = bstring.decode(charset)
- except UnicodeError:
+ except UnicodeDecodeError:
defects.append(errors.UndecodableBytesDefect("Encoded word "
- "contains bytes not decodable using {} charset".format(charset)))
+ f"contains bytes not decodable using {charset!r} charset"))
string = bstring.decode(charset, 'surrogateescape')
- except LookupError:
+ except (LookupError, UnicodeEncodeError):
string = bstring.decode('ascii', 'surrogateescape')
if charset.lower() != 'unknown-8bit':
- defects.append(errors.CharsetError("Unknown charset {} "
- "in encoded word; decoded as unknown bytes".format(charset)))
+ defects.append(errors.CharsetError(f"Unknown charset {charset!r} "
+ f"in encoded word; decoded as unknown bytes"))
return string, charset, lang, defects
else:
try:
value = value.decode(charset, 'surrogateescape')
- except LookupError:
+ except (LookupError, UnicodeEncodeError):
# XXX: there should really be a custom defect for
# unknown character set to make it easy to find,
# because otherwise unknown charset is a silent
digits += value[0]
value = value[1:]
if digits[0] == '0' and digits != '0':
- section.defects.append(errors.InvalidHeaderError(
+ section.defects.append(errors.InvalidHeaderDefect(
"section number has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, 'digits'))
charset is the input character set, and must be the canonical name of a
character set.
- Optional header_enc and body_enc is either Charset.QP for
- quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
+ Optional header_enc and body_enc is either charset.QP for
+ quoted-printable, charset.BASE64 for base64 encoding, charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
- Charset.QP (for quoted-printable), Charset.BASE64 (for
- base64 encoding), or Charset.SHORTEST for the shortest of
+ charset.QP (for quoted-printable), charset.BASE64 (for
+ base64 encoding), or charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
- header encoding. Charset.SHORTEST is not allowed for
+ header encoding. charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before they can be
add('\\[')
else:
stuff = pat[i:j]
- if '--' not in stuff:
+ if '-' not in stuff:
stuff = stuff.replace('\\', r'\\')
else:
chunks = []
chunks.append(pat[i:k])
i = k+1
k = k+3
- chunks.append(pat[i:j])
+ chunk = pat[i:j]
+ if chunk:
+ chunks.append(chunk)
+ else:
+ chunks[-1] += '-'
+ # Remove empty ranges -- invalid in RE.
+ for k in range(len(chunks)-1, 0, -1):
+ if chunks[k-1][-1] > chunks[k][0]:
+ chunks[k-1] = chunks[k-1][:-1] + chunks[k][1:]
+ del chunks[k]
# Escape backslashes and hyphens for set difference (--).
# Hyphens that create ranges shouldn't be escaped.
stuff = '-'.join(s.replace('\\', r'\\').replace('-', r'\-')
# Escape set operations (&&, ~~ and ||).
stuff = re.sub(r'([&~|])', r'\\\1', stuff)
i = j+1
- if stuff[0] == '!':
- stuff = '^' + stuff[1:]
- elif stuff[0] in ('^', '['):
- stuff = '\\' + stuff
- add(f'[{stuff}]')
+ if not stuff:
+ # Empty range: never match.
+ add('(?!)')
+ elif stuff == '!':
+ # Negated empty range: match any character.
+ add('.')
+ else:
+ if stuff[0] == '!':
+ stuff = '^' + stuff[1:]
+ elif stuff[0] in ('^', '['):
+ stuff = '\\' + stuff
+ add(f'[{stuff}]')
else:
add(re.escape(c))
assert i == n
# maps the HTML entity name to the Unicode code point
+# from https://html.spec.whatwg.org/multipage/named-characters.html
name2codepoint = {
'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1
"""
Test corner cases in flash_paren_event and paren_closed_event.
- These cases force conditional expression and alternate paths.
+ Force execution of conditional expressions and alternate paths.
"""
text = self.text
pm = self.get_parenmatch()
- text.insert('insert', '# this is a commen)')
+ text.insert('insert', '# Comment.)')
pm.paren_closed_event('event')
text.insert('insert', '\ndef')
except (AddressValueError, NetmaskValueError):
pass
- raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
- address)
+ raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 address')
def ip_network(address, strict=True):
except (AddressValueError, NetmaskValueError):
pass
- raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
- address)
+ raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 network')
def ip_interface(address):
except (AddressValueError, NetmaskValueError):
pass
- raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
- address)
+ raise ValueError(f'{address!r} does not appear to be an IPv4 or IPv6 interface')
def v4_int_to_packed(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = str(address).split('/')
if len(addr) > 2:
- raise AddressValueError("Only one '/' permitted in %r" % address)
+ raise AddressValueError(f"Only one '/' permitted in {address!r}")
return addr
# which converts into a formatted IP string.
addr_str = str(address)
if '/' in addr_str:
- raise AddressValueError("Unexpected '/' in %r" % address)
+ raise AddressValueError(f"Unexpected '/' in {address!r}")
self._ip = self._ip_int_from_string(addr_str)
@property
# which converts into a formatted IP string.
addr_str = str(address)
if '/' in addr_str:
- raise AddressValueError("Unexpected '/' in %r" % address)
+ raise AddressValueError(f"Unexpected '/' in {address!r}")
addr_str, self._scope_id = self._split_scope_id(addr_str)
self._ip = self._ip_int_from_string(addr_str)
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
- process.join(timeout=0.1)
+ process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive after terminate')
def close(self):
self._closed = True
- try:
- self._reader.close()
- finally:
- close = self._close
- if close:
- self._close = None
- close()
+ close = self._close
+ if close:
+ self._close = None
+ close()
def join_thread(self):
debug('Queue.join_thread()')
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send_bytes,
- self._wlock, self._writer.close, self._ignore_epipe,
- self._on_queue_feeder_error, self._sem),
+ self._wlock, self._reader.close, self._writer.close,
+ self._ignore_epipe, self._on_queue_feeder_error,
+ self._sem),
name='QueueFeederThread'
)
self._thread.daemon = True
notempty.notify()
@staticmethod
- def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe,
- onerror, queue_sem):
+ def _feed(buffer, notempty, send_bytes, writelock, reader_close,
+ writer_close, ignore_epipe, onerror, queue_sem):
debug('starting thread to feed data to pipe')
nacquire = notempty.acquire
nrelease = notempty.release
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
- close()
+ reader_close()
+ writer_close()
return
# serialize the data before acquiring the lock
return address[0] == 0
elif isinstance(address, str):
return address[0] == "\0"
- raise TypeError('address type of {address!r} unrecognized')
+ raise TypeError(f'address type of {address!r} unrecognized')
abstract_sockets_supported = _platform_supports_abstract_sockets()
if idx >= len(self) or idx < -len(self):
raise IndexError(idx)
+ if idx < 0:
+ idx += len(self)
return self._pathcls._from_parsed_parts(self._drv, self._root,
self._parts[:-idx - 1])
# -*- coding: utf-8 -*-
-# Autogenerated by Sphinx on Wed Mar 23 20:11:40 2022
+# Autogenerated by Sphinx on Mon Jun 6 12:53:10 2022
topics = {'assert': 'The "assert" statement\n'
'**********************\n'
'\n'
' optionally in parentheses, the object is assigned to that '
'target.\n'
'\n'
- '* Else: The object must be an iterable with the same number of '
- 'items\n'
- ' as there are targets in the target list, and the items are '
- 'assigned,\n'
- ' from left to right, to the corresponding targets.\n'
+ '* Else:\n'
'\n'
' * If the target list contains one target prefixed with an '
'asterisk,\n'
'is\n'
'applied to separating the commands; the input is split at the '
'first\n'
- '";;" pair, even if it is in the middle of a quoted string.\n'
+ '";;" pair, even if it is in the middle of a quoted string. A\n'
+ 'workaround for strings with double semicolons is to use '
+ 'implicit\n'
+ 'string concatenation "\';\'\';\'" or "";"";"".\n'
'\n'
'If a file ".pdbrc" exists in the user’s home directory or in '
'the\n'
'Examples:\n'
'\n'
' import foo # foo imported and bound locally\n'
- ' import foo.bar.baz # foo.bar.baz imported, foo bound '
- 'locally\n'
- ' import foo.bar.baz as fbb # foo.bar.baz imported and bound as '
- 'fbb\n'
- ' from foo.bar import baz # foo.bar.baz imported and bound as '
- 'baz\n'
+ ' import foo.bar.baz # foo, foo.bar, and foo.bar.baz '
+ 'imported, foo bound locally\n'
+ ' import foo.bar.baz as fbb # foo, foo.bar, and foo.bar.baz '
+ 'imported, foo.bar.baz bound as fbb\n'
+ ' from foo.bar import baz # foo, foo.bar, and foo.bar.baz '
+ 'imported, foo.bar.baz bound as baz\n'
' from foo import attr # foo imported and foo.attr bound as '
'attr\n'
'\n'
'| "x(arguments...)", "x.attribute" | '
'attribute reference |\n'
'+-------------------------------------------------+---------------------------------------+\n'
- '| "await" "x" | '
+ '| "await x" | '
'Await expression |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "**" | '
'| ">=", "!=", "==" | '
'tests and identity tests |\n'
'+-------------------------------------------------+---------------------------------------+\n'
- '| "not" "x" | '
+ '| "not x" | '
'Boolean NOT |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "and" | '
' >>> "they\'re bill\'s friends from the UK".title()\n'
' "They\'Re Bill\'S Friends From The Uk"\n'
'\n'
- ' A workaround for apostrophes can be constructed using '
- 'regular\n'
- ' expressions:\n'
+ ' The "string.capwords()" function does not have this '
+ 'problem, as it\n'
+ ' splits words on spaces only.\n'
+ '\n'
+ ' Alternatively, a workaround for apostrophes can be '
+ 'constructed\n'
+ ' using regular expressions:\n'
'\n'
' >>> import re\n'
' >>> def titlecase(s):\n'
'single quotes ("\'") or double quotes ("""). They can also be '
'enclosed\n'
'in matching groups of three single or double quotes (these are\n'
- 'generally referred to as *triple-quoted strings*). The '
- 'backslash\n'
- '("\\") character is used to escape characters that otherwise have '
- 'a\n'
- 'special meaning, such as newline, backslash itself, or the quote\n'
+ 'generally referred to as *triple-quoted strings*). The backslash '
+ '("\\")\n'
+ 'character is used to give special meaning to otherwise ordinary\n'
+ 'characters like "n", which means ‘newline’ when escaped ("\\n"). '
+ 'It can\n'
+ 'also be used to escape characters that otherwise have a special\n'
+ 'meaning, such as newline, backslash itself, or the quote '
'character.\n'
+ 'See escape sequences below for examples.\n'
'\n'
'Bytes literals are always prefixed with "\'b\'" or "\'B\'"; they '
'produce\n'
'unwise to use\n'
'them as dictionary keys.)\n'
'\n'
- 'Dictionaries can be created by placing a comma-separated '
- 'list of "key:\n'
- 'value" pairs within braces, for example: "{\'jack\': 4098, '
- "'sjoerd':\n"
- '4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the '
- '"dict"\n'
- 'constructor.\n'
- '\n'
'class dict(**kwargs)\n'
'class dict(mapping, **kwargs)\n'
'class dict(iterable, **kwargs)\n'
def put_nowait(self, item):
'''Put an item into the queue without blocking.
- This is exactly equivalent to `put(item)` and is only provided
+ This is exactly equivalent to `put(item, block=False)` and is only provided
for compatibility with the Queue class.
'''
return self.put(item, block=False)
def run_module(mod_name, init_globals=None,
run_name=None, alter_sys=False):
- """Execute a module's code without importing it
+ """Execute a module's code without importing it.
- Returns the resulting top level namespace dictionary
+ mod_name -- an absolute module name or package name.
+
+ Optional arguments:
+ init_globals -- dictionary used to pre-populate the module’s
+ globals dictionary before the code is executed.
+
+ run_name -- if not None, this will be used for setting __name__;
+ otherwise, __name__ will be set to mod_name + '__main__' if the
+ named module is a package and to just mod_name otherwise.
+
+ alter_sys -- if True, sys.argv[0] is updated with the value of
+ __file__ and sys.modules[__name__] is updated with a temporary
+ module object for the module being executed. Both are
+ restored to their original values before the function returns.
+
+ Returns the resulting module globals dictionary.
"""
mod_name, mod_spec, code = _get_module_details(mod_name)
if run_name is None:
return code, fname
def run_path(path_name, init_globals=None, run_name=None):
- """Execute code located at the specified filesystem location
+ """Execute code located at the specified filesystem location.
+
+ path_name -- filesystem location of a Python script, zipfile,
+ or directory containing a top level __main__.py script.
+
+ Optional arguments:
+ init_globals -- dictionary used to pre-populate the module’s
+ globals dictionary before the code is executed.
- Returns the resulting top level namespace dictionary
+ run_name -- if not None, this will be used to set __name__;
+ otherwise, '<run_path>' will be used for __name__.
- The file path may refer directly to a Python script (i.e.
- one that could be directly executed with execfile) or else
- it may refer to a zipfile or directory containing a top
- level __main__.py script.
+ Returns the resulting module globals dictionary.
"""
if run_name is None:
run_name = "<run_path>"
ignore_dangling_symlinks=False, dirs_exist_ok=False):
"""Recursively copy a directory tree and return the destination directory.
- dirs_exist_ok dictates whether to raise an exception in case dst or any
- missing parent directory already exists.
-
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
+ If dirs_exist_ok is false (the default) and `dst` already exists, a
+ `FileExistsError` is raised. If `dirs_exist_ok` is true, the copying
+ operation will continue if it encounters existing directories, and files
+ within the `dst` tree will be overwritten by corresponding files from the
+ `src` tree.
"""
sys.audit("shutil.copytree", src, dst)
with os.scandir(src) as itr:
# 3. This notice may not be removed or altered from any source distribution.
"""
-The sqlite3 extension module provides a DB-API 2.0 (PEP 249) compilant
+The sqlite3 extension module provides a DB-API 2.0 (PEP 249) compliant
interface to the SQLite library, and requires SQLite 3.7.15 or newer.
To use the module, start by creating a database Connection object:
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
+import contextlib
import unittest
import sqlite3 as sqlite
self.assertEqual(action, 0, "progress handler was not cleared")
class TraceCallbackTests(unittest.TestCase):
+ @contextlib.contextmanager
+ def check_stmt_trace(self, cx, expected):
+ try:
+ traced = []
+ cx.set_trace_callback(lambda stmt: traced.append(stmt))
+ yield
+ finally:
+ self.assertEqual(traced, expected)
+ cx.set_trace_callback(None)
+
def test_trace_callback_used(self):
"""
Test that the trace callback is invoked once it is set.
cur.execute(queries[1])
self.assertEqual(traced_statements, queries)
+ def test_trace_expanded_sql(self):
+ expected = [
+ "create table t(t)",
+ "BEGIN ",
+ "insert into t values(0)",
+ "insert into t values(1)",
+ "insert into t values(2)",
+ "COMMIT",
+ ]
+ cx = sqlite.connect(":memory:")
+ with self.check_stmt_trace(cx, expected):
+ with cx:
+ cx.execute("create table t(t)")
+ cx.executemany("insert into t values(?)", ((v,) for v in range(3)))
+
def suite():
tests = [
import functools
from test import support
+from unittest.mock import patch
+
+
class RegressionTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.assertEqual(val, b'')
+class RecursiveUseOfCursors(unittest.TestCase):
+ # GH-80254: sqlite3 should not segfault for recursive use of cursors.
+ msg = "Recursive use of cursors not allowed"
+
+ def setUp(self):
+ self.con = sqlite.connect(":memory:",
+ detect_types=sqlite.PARSE_COLNAMES)
+ self.cur = self.con.cursor()
+ self.cur.execute("create table test(x foo)")
+ self.cur.executemany("insert into test(x) values (?)",
+ [("foo",), ("bar",)])
+
+ def tearDown(self):
+ self.cur.close()
+ self.con.close()
+
+ def test_recursive_cursor_init(self):
+ conv = lambda x: self.cur.__init__(self.con)
+ with patch.dict(sqlite.converters, {"INIT": conv}):
+ with self.assertRaisesRegex(sqlite.ProgrammingError, self.msg):
+ self.cur.execute(f'select x as "x [INIT]", x from test')
+
+ def test_recursive_cursor_close(self):
+ conv = lambda x: self.cur.close()
+ with patch.dict(sqlite.converters, {"CLOSE": conv}):
+ with self.assertRaisesRegex(sqlite.ProgrammingError, self.msg):
+ self.cur.execute(f'select x as "x [CLOSE]", x from test')
+
+ def test_recursive_cursor_fetch(self):
+ conv = lambda x, l=[]: self.cur.fetchone() if l else l.append(None)
+ with patch.dict(sqlite.converters, {"ITER": conv}):
+ self.cur.execute(f'select x as "x [ITER]", x from test')
+ with self.assertRaisesRegex(sqlite.ProgrammingError, self.msg):
+ self.cur.fetchall()
+
+
def suite():
tests = [
- RegressionTests
+ RegressionTests,
+ RecursiveUseOfCursors,
]
return unittest.TestSuite(
[unittest.TestLoader().loadTestsFromTestCase(t) for t in tests]
(0x3c2, 0x3c3), # ςσ
# GREEK SMALL LETTER PHI, GREEK PHI SYMBOL
(0x3c6, 0x3d5), # φϕ
+ # CYRILLIC SMALL LETTER VE, CYRILLIC SMALL LETTER ROUNDED VE
+ (0x432, 0x1c80), # вᲀ
+ # CYRILLIC SMALL LETTER DE, CYRILLIC SMALL LETTER LONG-LEGGED DE
+ (0x434, 0x1c81), # дᲁ
+ # CYRILLIC SMALL LETTER O, CYRILLIC SMALL LETTER NARROW O
+ (0x43e, 0x1c82), # оᲂ
+ # CYRILLIC SMALL LETTER ES, CYRILLIC SMALL LETTER WIDE ES
+ (0x441, 0x1c83), # сᲃ
+ # CYRILLIC SMALL LETTER TE, CYRILLIC SMALL LETTER TALL TE, CYRILLIC SMALL LETTER THREE-LEGGED TE
+ (0x442, 0x1c84, 0x1c85), # тᲄᲅ
+ # CYRILLIC SMALL LETTER HARD SIGN, CYRILLIC SMALL LETTER TALL HARD SIGN
+ (0x44a, 0x1c86), # ъᲆ
+ # CYRILLIC SMALL LETTER YAT, CYRILLIC SMALL LETTER TALL YAT
+ (0x463, 0x1c87), # ѣᲇ
+ # CYRILLIC SMALL LETTER UNBLENDED UK, CYRILLIC SMALL LETTER MONOGRAPH UK
+ (0x1c88, 0xa64b), # ᲈꙋ
# LATIN SMALL LETTER S WITH DOT ABOVE, LATIN SMALL LETTER LONG S WITH DOT ABOVE
(0x1e61, 0x1e9b), # ṡẛ
# LATIN SMALL LIGATURE LONG S T, LATIN SMALL LIGATURE ST
charmap += b'\0' * 0xff00
continue
# Character set contains non-BMP character codes.
+ # For range, all BMP characters in the range are already
+ # proceeded.
if fixup:
hascased = True
- # There are only two ranges of cased non-BMP characters:
- # 10400-1044F (Deseret) and 118A0-118DF (Warang Citi),
- # and for both ranges RANGE_UNI_IGNORE works.
+ # For now, IN_UNI_IGNORE+LITERAL and
+ # IN_UNI_IGNORE+RANGE_UNI_IGNORE work for all non-BMP
+ # characters, because two characters (at least one of
+ # which is not in the BMP) match case-insensitively
+ # if and only if:
+ # 1) c1.lower() == c2.lower()
+ # 2) c1.lower() == c2 or c1.lower().upper() == c2
+ # Also, both c.lower() and c.lower().upper() are single
+ # characters for every non-BMP character.
if op is RANGE:
op = RANGE_UNI_IGNORE
tail.append((op, av))
def __repr__(self):
return self.name
+ __reduce__ = None
+
MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT')
def _makecodes(names):
self.groupdict = {}
self.groupwidths = [None] # group 0
self.lookbehindgroups = None
+ self.grouprefpos = {}
@property
def groups(self):
return len(self.groupwidths)
charname = source.getuntil('}', 'character name')
try:
c = ord(unicodedata.lookup(charname))
- except KeyError:
+ except (KeyError, TypeError):
raise source.error("undefined character name %r" % charname,
len(charname) + len(r'\N{}'))
return LITERAL, c
charname = source.getuntil('}', 'character name')
try:
c = ord(unicodedata.lookup(charname))
- except KeyError:
+ except (KeyError, TypeError):
raise source.error("undefined character name %r" % charname,
len(charname) + len(r'\N{}'))
return LITERAL, c
if condgroup >= MAXGROUPS:
msg = "invalid group reference %d" % condgroup
raise source.error(msg, len(condname) + 1)
+ if condgroup not in state.grouprefpos:
+ state.grouprefpos[condgroup] = (
+ source.tell() - len(condname) - 1
+ )
state.checklookbehindgroup(condgroup, source)
item_yes = _parse(source, state, verbose, nested + 1)
if source.match("|"):
assert source.next == ")"
raise source.error("unbalanced parenthesis")
+ for g in p.state.grouprefpos:
+ if g >= p.state.groups:
+ msg = "invalid group reference %d" % g
+ raise error(msg, str, p.state.grouprefpos[g])
+
if flags & SRE_FLAG_DEBUG:
p.dump()
seconds past the Epoch (the time values
returned from time.time())
- fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
- by the server running on HOST at port PORT. No
- validation of the certificate is performed.
+ get_server_certificate (addr, ssl_version, ca_certs, timeout) -- Retrieve the
+ certificate from the server at the specified
+ address and return it as a PEM-encoded string
+
Integer constants:
return False
+# These are primarily fail-safe knobs for negatives. A True value does not
+# guarantee the given libc/syscall API will be used.
_USE_POSIX_SPAWN = _use_posix_spawn()
+_USE_VFORK = True
class Popen:
self.assertEqual(t.microsecond, 7812)
def test_timestamp_limits(self):
- # minimum timestamp
- min_dt = self.theclass.min.replace(tzinfo=timezone.utc)
+ with self.subTest("minimum UTC"):
+ min_dt = self.theclass.min.replace(tzinfo=timezone.utc)
+ min_ts = min_dt.timestamp()
+
+ # This test assumes that datetime.min == 0000-01-01T00:00:00.00
+ # If that assumption changes, this value can change as well
+ self.assertEqual(min_ts, -62135596800)
+
+ with self.subTest("maximum UTC"):
+ # Zero out microseconds to avoid rounding issues
+ max_dt = self.theclass.max.replace(tzinfo=timezone.utc,
+ microsecond=0)
+ max_ts = max_dt.timestamp()
+
+ # This test assumes that datetime.max == 9999-12-31T23:59:59.999999
+ # If that assumption changes, this value can change as well
+ self.assertEqual(max_ts, 253402300799.0)
+
+ def test_fromtimestamp_limits(self):
+ try:
+ self.theclass.fromtimestamp(-2**32 - 1)
+ except (OSError, OverflowError):
+ self.skipTest("Test not valid on this platform")
+
+ # XXX: Replace these with datetime.{min,max}.timestamp() when we solve
+ # the issue with gh-91012
+ min_dt = self.theclass.min + timedelta(days=1)
min_ts = min_dt.timestamp()
+
+ max_dt = self.theclass.max.replace(microsecond=0)
+ max_ts = ((self.theclass.max - timedelta(hours=23)).timestamp() +
+ timedelta(hours=22, minutes=59, seconds=59).total_seconds())
+
+ for (test_name, ts, expected) in [
+ ("minimum", min_ts, min_dt),
+ ("maximum", max_ts, max_dt),
+ ]:
+ with self.subTest(test_name, ts=ts, expected=expected):
+ actual = self.theclass.fromtimestamp(ts)
+
+ self.assertEqual(actual, expected)
+
+ # Test error conditions
+ test_cases = [
+ ("Too small by a little", min_ts - timedelta(days=1, hours=12).total_seconds()),
+ ("Too small by a lot", min_ts - timedelta(days=400).total_seconds()),
+ ("Too big by a little", max_ts + timedelta(days=1).total_seconds()),
+ ("Too big by a lot", max_ts + timedelta(days=400).total_seconds()),
+ ]
+
+ for test_name, ts in test_cases:
+ with self.subTest(test_name, ts=ts):
+ with self.assertRaises((ValueError, OverflowError)):
+ # converting a Python int to C time_t can raise a
+ # OverflowError, especially on 32-bit platforms.
+ self.theclass.fromtimestamp(ts)
+
+ def test_utcfromtimestamp_limits(self):
try:
- # date 0001-01-01 00:00:00+00:00: timestamp=-62135596800
- self.assertEqual(self.theclass.fromtimestamp(min_ts, tz=timezone.utc),
- min_dt)
- except (OverflowError, OSError) as exc:
- # the date 0001-01-01 doesn't fit into 32-bit time_t,
- # or platform doesn't support such very old date
- self.skipTest(str(exc))
-
- # maximum timestamp: set seconds to zero to avoid rounding issues
- max_dt = self.theclass.max.replace(tzinfo=timezone.utc,
- second=0, microsecond=0)
+ self.theclass.utcfromtimestamp(-2**32 - 1)
+ except (OSError, OverflowError):
+ self.skipTest("Test not valid on this platform")
+
+ min_dt = self.theclass.min.replace(tzinfo=timezone.utc)
+ min_ts = min_dt.timestamp()
+
+ max_dt = self.theclass.max.replace(microsecond=0, tzinfo=timezone.utc)
max_ts = max_dt.timestamp()
- # date 9999-12-31 23:59:00+00:00: timestamp 253402300740
- self.assertEqual(self.theclass.fromtimestamp(max_ts, tz=timezone.utc),
- max_dt)
-
- # number of seconds greater than 1 year: make sure that the new date
- # is not valid in datetime.datetime limits
- delta = 3600 * 24 * 400
-
- # too small
- ts = min_ts - delta
- # converting a Python int to C time_t can raise a OverflowError,
- # especially on 32-bit platforms.
- with self.assertRaises((ValueError, OverflowError)):
- self.theclass.fromtimestamp(ts)
- with self.assertRaises((ValueError, OverflowError)):
- self.theclass.utcfromtimestamp(ts)
-
- # too big
- ts = max_dt.timestamp() + delta
- with self.assertRaises((ValueError, OverflowError)):
- self.theclass.fromtimestamp(ts)
- with self.assertRaises((ValueError, OverflowError)):
- self.theclass.utcfromtimestamp(ts)
+
+ for (test_name, ts, expected) in [
+ ("minimum", min_ts, min_dt.replace(tzinfo=None)),
+ ("maximum", max_ts, max_dt.replace(tzinfo=None)),
+ ]:
+ with self.subTest(test_name, ts=ts, expected=expected):
+ try:
+ actual = self.theclass.utcfromtimestamp(ts)
+ except (OSError, OverflowError) as exc:
+ self.skipTest(str(exc))
+
+ self.assertEqual(actual, expected)
+
+ # Test error conditions
+ test_cases = [
+ ("Too small by a little", min_ts - 1),
+ ("Too small by a lot", min_ts - timedelta(days=400).total_seconds()),
+ ("Too big by a little", max_ts + 1),
+ ("Too big by a lot", max_ts + timedelta(days=400).total_seconds()),
+ ]
+
+ for test_name, ts in test_cases:
+ with self.subTest(test_name, ts=ts):
+ with self.assertRaises((ValueError, OverflowError)):
+ # converting a Python int to C time_t can raise a
+ # OverflowError, especially on 32-bit platforms.
+ self.theclass.utcfromtimestamp(ts)
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
--- /dev/null
+# This module is used in `test_doctest`.
+# It must not have a docstring.
+
+def func_with_docstring():
+ """Some unrelated info."""
+
+
+def func_without_docstring():
+ pass
+
+
+def func_with_doctest():
+ """
+ This function really contains a test case.
+
+ >>> func_with_doctest.__name__
+ 'func_with_doctest'
+ """
+ return 3
+
+
+class ClassWithDocstring:
+ """Some unrelated class information."""
+
+
+class ClassWithoutDocstring:
+ pass
+
+
+class ClassWithDoctest:
+ """This class really has a test case in it.
+
+ >>> ClassWithDoctest.__name__
+ 'ClassWithDoctest'
+ """
+
+
+class MethodWrapper:
+ def method_with_docstring(self):
+ """Method with a docstring."""
+
+ def method_without_docstring(self):
+ pass
+
+ def method_with_doctest(self):
+ """
+ This has a doctest!
+ >>> MethodWrapper.method_with_doctest.__name__
+ 'method_with_doctest'
+ """
import sys
import unittest
from test import support
+from test.support.os_helper import TESTFN_UNDECODABLE, FS_NONASCII
try:
import gc
except ImportError:
# Ensure there's a non-ASCII character in env vars at all times to force
# tests consider this case. See BPO-44647 for details.
- os.environ.setdefault(
- UNICODE_GUARD_ENV,
- "\N{SMILING FACE WITH SUNGLASSES}",
- )
+ if TESTFN_UNDECODABLE and os.supports_bytes_environ:
+ os.environb.setdefault(UNICODE_GUARD_ENV.encode(), TESTFN_UNDECODABLE)
+ elif FS_NONASCII:
+ os.environ.setdefault(UNICODE_GUARD_ENV, FS_NONASCII)
def replace_stdout():
import warnings
+MS_WINDOWS = (sys.platform == 'win32')
+
+
def normalize_text(text):
if text is None:
return None
encoding = '%s/%s' % (encoding, errors)
info_add('sys.%s.encoding' % name, encoding)
- # Were we compiled --with-pydebug or with #define Py_DEBUG?
+ # Were we compiled --with-pydebug?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
if Py_DEBUG:
text = 'Yes (sys.gettotalrefcount() present)'
else:
text = 'No (sys.gettotalrefcount() missing)'
- info_add('Py_DEBUG', text)
+ info_add('build.Py_DEBUG', text)
+
+ # Were we compiled --with-trace-refs?
+ Py_TRACE_REFS = hasattr(sys, 'getobjects')
+ if Py_TRACE_REFS:
+ text = 'Yes (sys.getobjects() present)'
+ else:
+ text = 'No (sys.getobjects() missing)'
+ info_add('build.Py_TRACE_REFS', text)
def collect_platform(info_add):
def collect_sysconfig(info_add):
+ # On Windows, sysconfig is not reliable to get macros used
+ # to build Python
+ if MS_WINDOWS:
+ return
+
import sysconfig
for name in (
value = normalize_text(value)
info_add('sysconfig[%s]' % name, value)
+ PY_CFLAGS = sysconfig.get_config_var('PY_CFLAGS')
+ NDEBUG = (PY_CFLAGS and '-DNDEBUG' in PY_CFLAGS)
+ if NDEBUG:
+ text = 'ignore assertions (macro defined)'
+ else:
+ text= 'build assertions (macro not defined)'
+ info_add('build.NDEBUG',text)
+
+ for name in (
+ 'WITH_DOC_STRINGS',
+ 'WITH_DTRACE',
+ 'WITH_FREELISTS',
+ 'WITH_PYMALLOC',
+ 'WITH_VALGRIND',
+ ):
+ value = sysconfig.get_config_var(name)
+ if value:
+ text = 'Yes'
+ else:
+ text = 'No'
+ info_add(f'build.{name}', text)
+
def collect_ssl(info_add):
import os
def collect_socket(info_add):
- import socket
+ try:
+ import socket
+ except ImportError:
+ return
hostname = socket.gethostname()
info_add('socket.hostname', hostname)
return
call_func(info_add, 'pymem.allocator', _testcapi, 'pymem_getallocatorsname')
- copy_attr(info_add, 'pymem.with_pymalloc', _testcapi, 'WITH_PYMALLOC')
def collect_resource(info_add):
call_func(info_add, 'test_support._is_gui_available', support, '_is_gui_available')
call_func(info_add, 'test_support.python_is_optimized', support, 'python_is_optimized')
+ info_add('test_support.check_sanitizer(address=True)',
+ support.check_sanitizer(address=True))
+ info_add('test_support.check_sanitizer(memory=True)',
+ support.check_sanitizer(memory=True))
+ info_add('test_support.check_sanitizer(ub=True)',
+ support.check_sanitizer(ub=True))
+
def collect_cc(info_add):
import subprocess
nargs='+',
default=42,
choices=[1, 2, 3],
+ required=False,
help='HELP',
metavar='METAVAR')
string = (
"Action(option_strings=['--foo', '-a', '-b'], dest='b', "
"nargs='+', const=None, default=42, type='int', "
- "choices=[1, 2, 3], help='HELP', metavar='METAVAR')")
+ "choices=[1, 2, 3], required=False, help='HELP', metavar='METAVAR')")
self.assertStringEqual(option, string)
def test_argument(self):
nargs='?',
default=2.5,
choices=[0.5, 1.5, 2.5],
+ required=True,
help='H HH H',
metavar='MV MV MV')
string = (
"Action(option_strings=[], dest='x', nargs='?', "
"const=None, default=2.5, type=%r, choices=[0.5, 1.5, 2.5], "
- "help='H HH H', metavar='MV MV MV')" % float)
+ "required=True, help='H HH H', metavar='MV MV MV')" % float)
self.assertStringEqual(argument, string)
def test_namespace(self):
self.assertTrue(t1.result())
race_tasks = [t2, t3, t4]
done_tasks = [t for t in race_tasks if t.done() and t.result()]
- self.assertTrue(2, len(done_tasks))
+ self.assertEqual(2, len(done_tasks))
# cleanup locked semaphore
sem.release()
from contextvars import ContextVar
from unittest import mock
-from test.test_asyncio import utils as test_utils
def tearDownModule():
asyncio.set_event_loop_policy(None)
-class ToThreadTests(test_utils.TestCase):
- def setUp(self):
- super().setUp()
- self.loop = asyncio.new_event_loop()
- asyncio.set_event_loop(self.loop)
-
- def tearDown(self):
- self.loop.run_until_complete(
- self.loop.shutdown_default_executor())
- self.loop.close()
- asyncio.set_event_loop(None)
- self.loop = None
- super().tearDown()
-
- def test_to_thread(self):
- async def main():
- return await asyncio.to_thread(sum, [40, 2])
-
- result = self.loop.run_until_complete(main())
+class ToThreadTests(unittest.IsolatedAsyncioTestCase):
+ async def test_to_thread(self):
+ result = await asyncio.to_thread(sum, [40, 2])
self.assertEqual(result, 42)
- def test_to_thread_exception(self):
+ async def test_to_thread_exception(self):
def raise_runtime():
raise RuntimeError("test")
- async def main():
- await asyncio.to_thread(raise_runtime)
-
with self.assertRaisesRegex(RuntimeError, "test"):
- self.loop.run_until_complete(main())
+ await asyncio.to_thread(raise_runtime)
- def test_to_thread_once(self):
+ async def test_to_thread_once(self):
func = mock.Mock()
- async def main():
- await asyncio.to_thread(func)
-
- self.loop.run_until_complete(main())
+ await asyncio.to_thread(func)
func.assert_called_once()
- def test_to_thread_concurrent(self):
+ async def test_to_thread_concurrent(self):
func = mock.Mock()
- async def main():
- futs = []
- for _ in range(10):
- fut = asyncio.to_thread(func)
- futs.append(fut)
- await asyncio.gather(*futs)
+ futs = []
+ for _ in range(10):
+ fut = asyncio.to_thread(func)
+ futs.append(fut)
+ await asyncio.gather(*futs)
- self.loop.run_until_complete(main())
self.assertEqual(func.call_count, 10)
- def test_to_thread_args_kwargs(self):
+ async def test_to_thread_args_kwargs(self):
# Unlike run_in_executor(), to_thread() should directly accept kwargs.
func = mock.Mock()
- async def main():
- await asyncio.to_thread(func, 'test', something=True)
+ await asyncio.to_thread(func, 'test', something=True)
- self.loop.run_until_complete(main())
func.assert_called_once_with('test', something=True)
- def test_to_thread_contextvars(self):
+ async def test_to_thread_contextvars(self):
test_ctx = ContextVar('test_ctx')
def get_ctx():
return test_ctx.get()
- async def main():
- test_ctx.set('parrot')
- return await asyncio.to_thread(get_ctx)
+ test_ctx.set('parrot')
+ result = await asyncio.to_thread(get_ctx)
- result = self.loop.run_until_complete(main())
self.assertEqual(result, 'parrot')
target
)
+ def test_insort_keynotNone(self):
+ x = []
+ y = {"a": 2, "b": 1}
+ for f in (self.module.insort_left, self.module.insort_right):
+ self.assertRaises(TypeError, f, x, y, key = "b")
+
class TestBisectPython(TestBisect, unittest.TestCase):
module = py_bisect
self.assertEqual(days[0][1], firstweekday)
self.assertEqual(days[-1][1], (firstweekday - 1) % 7)
+ def test_iterweekdays(self):
+ week0 = list(range(7))
+ for firstweekday in range(7):
+ cal = calendar.Calendar(firstweekday)
+ week = list(cal.iterweekdays())
+ expected = week0[firstweekday:] + week0[:firstweekday]
+ self.assertEqual(week, expected)
+
class MonthCalendarTestCase(unittest.TestCase):
def setUp(self):
import codecs
import html.entities
+import itertools
import sys
import unicodedata
import unittest
self.pos = len(exc.object)
return ("<?>", oldpos)
+class RepeatedPosReturn:
+ def __init__(self, repl="<?>"):
+ self.repl = repl
+ self.pos = 0
+ self.count = 0
+
+ def handle(self, exc):
+ if self.count > 0:
+ self.count -= 1
+ return (self.repl, self.pos)
+ return (self.repl, exc.end)
+
# A UnicodeEncodeError object with a bad start attribute
class BadStartUnicodeEncodeError(UnicodeEncodeError):
def __init__(self):
codecs.lookup_error("namereplace")
)
- def test_unencodablereplacement(self):
+ def test_encode_nonascii_replacement(self):
+ def handle(exc):
+ if isinstance(exc, UnicodeEncodeError):
+ return (repl, exc.end)
+ raise TypeError("don't know how to handle %r" % exc)
+ codecs.register_error("test.replacing", handle)
+
+ for enc, input, repl in (
+ ("ascii", "[¤]", "abc"),
+ ("iso-8859-1", "[€]", "½¾"),
+ ("iso-8859-15", "[¤]", "œŸ"),
+ ):
+ res = input.encode(enc, "test.replacing")
+ self.assertEqual(res, ("[" + repl + "]").encode(enc))
+
+ for enc, input, repl in (
+ ("utf-8", "[\udc80]", "\U0001f40d"),
+ ("utf-16", "[\udc80]", "\U0001f40d"),
+ ("utf-32", "[\udc80]", "\U0001f40d"),
+ ):
+ with self.subTest(encoding=enc):
+ with self.assertRaises(UnicodeEncodeError) as cm:
+ input.encode(enc, "test.replacing")
+ exc = cm.exception
+ self.assertEqual(exc.start, 1)
+ self.assertEqual(exc.end, 2)
+ self.assertEqual(exc.object, input)
+
+ def test_encode_unencodable_replacement(self):
def unencrepl(exc):
if isinstance(exc, UnicodeEncodeError):
- return ("\u4242", exc.end)
+ return (repl, exc.end)
else:
raise TypeError("don't know how to handle %r" % exc)
codecs.register_error("test.unencreplhandler", unencrepl)
- for enc in ("ascii", "iso-8859-1", "iso-8859-15"):
- self.assertRaises(
- UnicodeEncodeError,
- "\u4242".encode,
- enc,
- "test.unencreplhandler"
- )
+
+ for enc, input, repl in (
+ ("ascii", "[¤]", "½"),
+ ("iso-8859-1", "[€]", "œ"),
+ ("iso-8859-15", "[¤]", "½"),
+ ("utf-8", "[\udc80]", "\udcff"),
+ ("utf-16", "[\udc80]", "\udcff"),
+ ("utf-32", "[\udc80]", "\udcff"),
+ ):
+ with self.subTest(encoding=enc):
+ with self.assertRaises(UnicodeEncodeError) as cm:
+ input.encode(enc, "test.unencreplhandler")
+ exc = cm.exception
+ self.assertEqual(exc.start, 1)
+ self.assertEqual(exc.end, 2)
+ self.assertEqual(exc.object, input)
+
+ def test_encode_bytes_replacement(self):
+ def handle(exc):
+ if isinstance(exc, UnicodeEncodeError):
+ return (repl, exc.end)
+ raise TypeError("don't know how to handle %r" % exc)
+ codecs.register_error("test.replacing", handle)
+
+ # It works even if the bytes sequence is not decodable.
+ for enc, input, repl in (
+ ("ascii", "[¤]", b"\xbd\xbe"),
+ ("iso-8859-1", "[€]", b"\xbd\xbe"),
+ ("iso-8859-15", "[¤]", b"\xbd\xbe"),
+ ("utf-8", "[\udc80]", b"\xbd\xbe"),
+ ("utf-16le", "[\udc80]", b"\xbd\xbe"),
+ ("utf-16be", "[\udc80]", b"\xbd\xbe"),
+ ("utf-32le", "[\udc80]", b"\xbc\xbd\xbe\xbf"),
+ ("utf-32be", "[\udc80]", b"\xbc\xbd\xbe\xbf"),
+ ):
+ with self.subTest(encoding=enc):
+ res = input.encode(enc, "test.replacing")
+ self.assertEqual(res, "[".encode(enc) + repl + "]".encode(enc))
+
+ def test_encode_odd_bytes_replacement(self):
+ def handle(exc):
+ if isinstance(exc, UnicodeEncodeError):
+ return (repl, exc.end)
+ raise TypeError("don't know how to handle %r" % exc)
+ codecs.register_error("test.replacing", handle)
+
+ input = "[\udc80]"
+ # Tests in which the replacement bytestring contains not whole number
+ # of code units.
+ for enc, repl in (
+ *itertools.product(("utf-16le", "utf-16be"),
+ [b"a", b"abc"]),
+ *itertools.product(("utf-32le", "utf-32be"),
+ [b"a", b"ab", b"abc", b"abcde"]),
+ ):
+ with self.subTest(encoding=enc, repl=repl):
+ with self.assertRaises(UnicodeEncodeError) as cm:
+ input.encode(enc, "test.replacing")
+ exc = cm.exception
+ self.assertEqual(exc.start, 1)
+ self.assertEqual(exc.end, 2)
+ self.assertEqual(exc.object, input)
+ self.assertEqual(exc.reason, "surrogates not allowed")
def test_badregistercall(self):
# enhance coverage of:
self.assertRaises(ValueError, codecs.charmap_encode, "\xff", err, D())
self.assertRaises(TypeError, codecs.charmap_encode, "\xff", err, {0xff: 300})
+ def test_decodehelper_bug36819(self):
+ handler = RepeatedPosReturn("x")
+ codecs.register_error("test.bug36819", handler.handle)
+
+ testcases = [
+ ("ascii", b"\xff"),
+ ("utf-8", b"\xff"),
+ ("utf-16be", b'\xdc\x80'),
+ ("utf-32be", b'\x00\x00\xdc\x80'),
+ ("iso-8859-6", b"\xff"),
+ ]
+ for enc, bad in testcases:
+ input = "abcd".encode(enc) + bad
+ with self.subTest(encoding=enc):
+ handler.count = 50
+ decoded = input.decode(enc, "test.bug36819")
+ self.assertEqual(decoded, 'abcdx' * 51)
+
+ def test_encodehelper_bug36819(self):
+ handler = RepeatedPosReturn()
+ codecs.register_error("test.bug36819", handler.handle)
+
+ input = "abcd\udc80"
+ encodings = ["ascii", "latin1", "utf-8", "utf-16", "utf-32"] # built-in
+ encodings += ["iso-8859-15"] # charmap codec
+ if sys.platform == 'win32':
+ encodings = ["mbcs", "oem"] # code page codecs
+
+ handler.repl = "\udcff"
+ for enc in encodings:
+ with self.subTest(encoding=enc):
+ handler.count = 50
+ with self.assertRaises(UnicodeEncodeError) as cm:
+ input.encode(enc, "test.bug36819")
+ exc = cm.exception
+ self.assertEqual(exc.start, 4)
+ self.assertEqual(exc.end, 5)
+ self.assertEqual(exc.object, input)
+ if sys.platform == "win32":
+ handler.count = 50
+ with self.assertRaises(UnicodeEncodeError) as cm:
+ codecs.code_page_encode(437, input, "test.bug36819")
+ exc = cm.exception
+ self.assertEqual(exc.start, 4)
+ self.assertEqual(exc.end, 5)
+ self.assertEqual(exc.object, input)
+
+ handler.repl = "x"
+ for enc in encodings:
+ with self.subTest(encoding=enc):
+ # The interpreter should segfault after a handful of attempts.
+ # 50 was chosen to try to ensure a segfault without a fix,
+ # but not OOM a machine with one.
+ handler.count = 50
+ encoded = input.encode(enc, "test.bug36819")
+ self.assertEqual(encoded.decode(enc), "abcdx" * 51)
+ if sys.platform == "win32":
+ handler.count = 50
+ encoded = codecs.code_page_encode(437, input, "test.bug36819")
+ self.assertEqual(encoded[0].decode(), "abcdx" * 51)
+ self.assertEqual(encoded[1], len(input))
+
def test_translatehelper(self):
# enhance coverage of:
# Objects/unicodeobject.c::unicode_encode_call_errorhandler()
class CommandLineTestsBase:
"""Test compileall's CLI."""
- @classmethod
- def setUpClass(cls):
- for path in filter(os.path.isdir, sys.path):
- directory_created = False
- directory = pathlib.Path(path) / '__pycache__'
- path = directory / 'test.try'
- try:
- if not directory.is_dir():
- directory.mkdir()
- directory_created = True
- path.write_text('# for test_compileall', encoding="utf-8")
- except OSError:
- sys_path_writable = False
- break
- finally:
- os_helper.unlink(str(path))
- if directory_created:
- directory.rmdir()
- else:
- sys_path_writable = True
- cls._sys_path_writable = sys_path_writable
-
- def _skip_if_sys_path_not_writable(self):
- if not self._sys_path_writable:
- raise unittest.SkipTest('not all entries on sys.path are writable')
+ def setUp(self):
+ self.directory = tempfile.mkdtemp()
+ self.addCleanup(os_helper.rmtree, self.directory)
+ self.pkgdir = os.path.join(self.directory, 'foo')
+ os.mkdir(self.pkgdir)
+ self.pkgdir_cachedir = os.path.join(self.pkgdir, '__pycache__')
+ # Create the __init__.py and a package module.
+ self.initfn = script_helper.make_script(self.pkgdir, '__init__', '')
+ self.barfn = script_helper.make_script(self.pkgdir, 'bar', '')
+
+ @contextlib.contextmanager
+ def temporary_pycache_prefix(self):
+ """Adjust and restore sys.pycache_prefix."""
+ old_prefix = sys.pycache_prefix
+ new_prefix = os.path.join(self.directory, '__testcache__')
+ try:
+ sys.pycache_prefix = new_prefix
+ yield {
+ 'PYTHONPATH': self.directory,
+ 'PYTHONPYCACHEPREFIX': new_prefix,
+ }
+ finally:
+ sys.pycache_prefix = old_prefix
def _get_run_args(self, args):
return [*support.optim_args_from_interpreter_flags(),
path = importlib.util.cache_from_source(fn)
self.assertFalse(os.path.exists(path))
- def setUp(self):
- self.directory = tempfile.mkdtemp()
- self.addCleanup(os_helper.rmtree, self.directory)
- self.pkgdir = os.path.join(self.directory, 'foo')
- os.mkdir(self.pkgdir)
- self.pkgdir_cachedir = os.path.join(self.pkgdir, '__pycache__')
- # Create the __init__.py and a package module.
- self.initfn = script_helper.make_script(self.pkgdir, '__init__', '')
- self.barfn = script_helper.make_script(self.pkgdir, 'bar', '')
-
def test_no_args_compiles_path(self):
# Note that -l is implied for the no args case.
- self._skip_if_sys_path_not_writable()
bazfn = script_helper.make_script(self.directory, 'baz', '')
- self.assertRunOK(PYTHONPATH=self.directory)
- self.assertCompiled(bazfn)
- self.assertNotCompiled(self.initfn)
- self.assertNotCompiled(self.barfn)
+ with self.temporary_pycache_prefix() as env:
+ self.assertRunOK(**env)
+ self.assertCompiled(bazfn)
+ self.assertNotCompiled(self.initfn)
+ self.assertNotCompiled(self.barfn)
@without_source_date_epoch # timestamp invalidation test
def test_no_args_respects_force_flag(self):
- self._skip_if_sys_path_not_writable()
bazfn = script_helper.make_script(self.directory, 'baz', '')
- self.assertRunOK(PYTHONPATH=self.directory)
- pycpath = importlib.util.cache_from_source(bazfn)
+ with self.temporary_pycache_prefix() as env:
+ self.assertRunOK(**env)
+ pycpath = importlib.util.cache_from_source(bazfn)
# Set atime/mtime backward to avoid file timestamp resolution issues
os.utime(pycpath, (time.time()-60,)*2)
mtime = os.stat(pycpath).st_mtime
# Without force, no recompilation
- self.assertRunOK(PYTHONPATH=self.directory)
+ self.assertRunOK(**env)
mtime2 = os.stat(pycpath).st_mtime
self.assertEqual(mtime, mtime2)
# Now force it.
- self.assertRunOK('-f', PYTHONPATH=self.directory)
+ self.assertRunOK('-f', **env)
mtime2 = os.stat(pycpath).st_mtime
self.assertNotEqual(mtime, mtime2)
def test_no_args_respects_quiet_flag(self):
- self._skip_if_sys_path_not_writable()
script_helper.make_script(self.directory, 'baz', '')
- noisy = self.assertRunOK(PYTHONPATH=self.directory)
+ with self.temporary_pycache_prefix() as env:
+ noisy = self.assertRunOK(**env)
self.assertIn(b'Listing ', noisy)
- quiet = self.assertRunOK('-q', PYTHONPATH=self.directory)
+ quiet = self.assertRunOK('-q', **env)
self.assertNotIn(b'Listing ', quiet)
# Ensure that the default behavior of compileall's CLI is to create
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future,
BrokenExecutor)
from concurrent.futures.process import BrokenProcessPool, _check_system_limits
-from multiprocessing import get_context
import multiprocessing.process
import multiprocessing.util
+import multiprocessing as mp
if support.check_sanitizer(address=True, memory=True):
self.executor = self.executor_type(
max_workers=self.worker_count,
**self.executor_kwargs)
- self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
super().tearDown()
def get_context(self):
- return get_context(self.ctx)
-
- def _prime_executor(self):
- # Make sure that the executor is ready to do work before running the
- # tests. This should reduce the probability of timeouts in the tests.
- futures = [self.executor.submit(time.sleep, 0.1)
- for _ in range(self.worker_count)]
- for f in futures:
- f.result()
+ return mp.get_context(self.ctx)
class ThreadPoolMixin(ExecutorMixin):
with self.assertRaises(BrokenExecutor):
self.executor.submit(get_init_status)
- def _prime_executor(self):
- pass
-
@contextlib.contextmanager
def _assert_logged(self, msg):
if self.log_queue is not None:
f.result()
def test_cancel_futures(self):
- executor = self.executor_type(max_workers=3)
- fs = [executor.submit(time.sleep, .1) for _ in range(50)]
- executor.shutdown(cancel_futures=True)
+ assert self.worker_count <= 5, "test needs few workers"
+ fs = [self.executor.submit(time.sleep, .1) for _ in range(50)]
+ self.executor.shutdown(cancel_futures=True)
# We can't guarantee the exact number of cancellations, but we can
- # guarantee that *some* were cancelled. With setting max_workers to 3,
- # most of the submitted futures should have been cancelled.
+ # guarantee that *some* were cancelled. With few workers, many of
+ # the submitted futures should have been cancelled.
cancelled = [fut for fut in fs if fut.cancelled()]
- self.assertTrue(len(cancelled) >= 35, msg=f"{len(cancelled)=}")
+ self.assertGreater(len(cancelled), 20)
# Ensure the other futures were able to finish.
# Use "not fut.cancelled()" instead of "fut.done()" to include futures
# Similar to the number of cancelled futures, we can't guarantee the
# exact number that completed. But, we can guarantee that at least
# one finished.
- self.assertTrue(len(others) > 0, msg=f"{len(others)=}")
+ self.assertGreater(len(others), 0)
- def test_hang_issue39205(self):
+ def test_hang_gh83386(self):
"""shutdown(wait=False) doesn't hang at exit with running futures.
- See https://bugs.python.org/issue39205.
+ See https://github.com/python/cpython/issues/83386.
"""
if self.executor_type == futures.ProcessPoolExecutor:
raise unittest.SkipTest(
- "Hangs due to https://bugs.python.org/issue39205")
+ "Hangs, see https://github.com/python/cpython/issues/83386")
rc, out, err = assert_python_ok('-c', """if True:
from concurrent.futures import {executor_type}
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
+ if {context!r}: multiprocessing.set_start_method({context!r})
t = {executor_type}(max_workers=3)
t.submit(sleep_and_print, 1.0, "apple")
t.shutdown(wait=False)
- """.format(executor_type=self.executor_type.__name__))
+ """.format(executor_type=self.executor_type.__name__,
+ context=getattr(self, 'ctx', None)))
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase):
- def _prime_executor(self):
- pass
-
def test_threads_terminate(self):
def acquire_lock(lock):
lock.acquire()
class ProcessPoolShutdownTest(ExecutorShutdownTest):
- def _prime_executor(self):
- pass
-
def test_processes_terminate(self):
def acquire_lock(lock):
lock.acquire()
- mp_context = get_context()
+ mp_context = self.get_context()
+ if mp_context.get_start_method(allow_none=False) == "fork":
+ # fork pre-spawns, not on demand.
+ expected_num_processes = self.worker_count
+ else:
+ expected_num_processes = 3
+
sem = mp_context.Semaphore(0)
for _ in range(3):
self.executor.submit(acquire_lock, sem)
- self.assertEqual(len(self.executor._processes), 3)
+ self.assertEqual(len(self.executor._processes), expected_num_processes)
for _ in range(3):
sem.release()
processes = self.executor._processes
p.join()
def test_context_manager_shutdown(self):
- with futures.ProcessPoolExecutor(max_workers=5) as e:
+ with futures.ProcessPoolExecutor(
+ max_workers=5, mp_context=self.get_context()) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
p.join()
def test_del_shutdown(self):
- executor = futures.ProcessPoolExecutor(max_workers=5)
+ executor = futures.ProcessPoolExecutor(
+ max_workers=5, mp_context=self.get_context())
res = executor.map(abs, range(-5, 5))
executor_manager_thread = executor._executor_manager_thread
processes = executor._processes
def test_shutdown_no_wait(self):
# Ensure that the executor cleans up the processes when calling
# shutdown with wait=False
- executor = futures.ProcessPoolExecutor(max_workers=5)
+ executor = futures.ProcessPoolExecutor(
+ max_workers=5, mp_context=self.get_context())
res = executor.map(abs, range(-5, 5))
processes = executor._processes
call_queue = executor._call_queue
pool.submit(submit, pool)
for _ in range(50):
- with futures.ProcessPoolExecutor(1, mp_context=get_context('fork')) as workers:
+ with futures.ProcessPoolExecutor(1, mp_context=mp.get_context('fork')) as workers:
workers.submit(tuple)
def test_ressources_gced_in_workers(self):
# Ensure that argument for a job are correctly gc-ed after the job
# is finished
- mgr = get_context(self.ctx).Manager()
+ mgr = self.get_context().Manager()
obj = EventfulGCObj(mgr)
future = self.executor.submit(id, obj)
future.result()
mgr.join()
def test_saturation(self):
- executor = self.executor_type(4)
- mp_context = get_context()
+ executor = self.executor
+ mp_context = self.get_context()
sem = mp_context.Semaphore(0)
job_count = 15 * executor._max_workers
- try:
- for _ in range(job_count):
- executor.submit(sem.acquire)
- self.assertEqual(len(executor._processes), executor._max_workers)
- for _ in range(job_count):
- sem.release()
- finally:
- executor.shutdown()
+ for _ in range(job_count):
+ executor.submit(sem.acquire)
+ self.assertEqual(len(executor._processes), executor._max_workers)
+ for _ in range(job_count):
+ sem.release()
def test_idle_process_reuse_one(self):
- executor = self.executor_type(4)
+ executor = self.executor
+ assert executor._max_workers >= 4
+ if self.get_context().get_start_method(allow_none=False) == "fork":
+ raise unittest.SkipTest("Incompatible with the fork start method.")
executor.submit(mul, 21, 2).result()
executor.submit(mul, 6, 7).result()
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._processes), 1)
- executor.shutdown()
def test_idle_process_reuse_multiple(self):
- executor = self.executor_type(4)
+ executor = self.executor
+ assert executor._max_workers <= 5
+ if self.get_context().get_start_method(allow_none=False) == "fork":
+ raise unittest.SkipTest("Incompatible with the fork start method.")
executor.submit(mul, 12, 7).result()
executor.submit(mul, 33, 25)
executor.submit(mul, 25, 26).result()
executor.submit(mul, 18, 29)
- self.assertLessEqual(len(executor._processes), 2)
+ executor.submit(mul, 1, 2).result()
+ executor.submit(mul, 0, 9)
+ self.assertLessEqual(len(executor._processes), 3)
executor.shutdown()
+
create_executor_tests(ProcessPoolExecutorTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
self.executor.shutdown(wait=True)
executor = self.executor_type(
- max_workers=2, mp_context=get_context(self.ctx))
+ max_workers=2, mp_context=self.get_context())
res = executor.submit(func, *args)
if ignore_stderr:
# if a worker fails after the shutdown call.
self.executor.shutdown(wait=True)
with self.executor_type(max_workers=2,
- mp_context=get_context(self.ctx)) as executor:
+ mp_context=self.get_context()) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
f = executor.submit(_crash, delay=.1)
executor.shutdown(wait=True)
# Reported in bpo-39104.
self.executor.shutdown(wait=True)
with self.executor_type(max_workers=2,
- mp_context=get_context(self.ctx)) as executor:
+ mp_context=self.get_context()) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
# Start the executor and get the executor_manager_thread to collect
self.assertEqual(len(h4), 2)
self.assertEqual(len(h5), 3)
+ def test_hamt_collision_3(self):
+ # Test that iteration works with the deepest tree possible.
+ # https://github.com/python/cpython/issues/93065
+
+ C = HashKey(0b10000000_00000000_00000000_00000000, 'C')
+ D = HashKey(0b10000000_00000000_00000000_00000000, 'D')
+
+ E = HashKey(0b00000000_00000000_00000000_00000000, 'E')
+
+ h = hamt()
+ h = h.set(C, 'C')
+ h = h.set(D, 'D')
+ h = h.set(E, 'E')
+
+ # BitmapNode(size=2 count=1 bitmap=0b1):
+ # NULL:
+ # BitmapNode(size=2 count=1 bitmap=0b1):
+ # NULL:
+ # BitmapNode(size=2 count=1 bitmap=0b1):
+ # NULL:
+ # BitmapNode(size=2 count=1 bitmap=0b1):
+ # NULL:
+ # BitmapNode(size=2 count=1 bitmap=0b1):
+ # NULL:
+ # BitmapNode(size=2 count=1 bitmap=0b1):
+ # NULL:
+ # BitmapNode(size=4 count=2 bitmap=0b101):
+ # <Key name:E hash:0>: 'E'
+ # NULL:
+ # CollisionNode(size=4 id=0x107a24520):
+ # <Key name:C hash:2147483648>: 'C'
+ # <Key name:D hash:2147483648>: 'D'
+
+ self.assertEqual({k.name for k in h.keys()}, {'C', 'D', 'E'})
+
def test_hamt_stress(self):
COLLECTION_SIZE = 7000
TEST_ITERS_EVERY = 647
import sys
import tempfile
import threading
+import traceback
import unittest
from contextlib import * # Tests __all__
from test import support
stack.push(lambda *exc: True)
1/0
+ def test_exit_exception_traceback(self):
+ # This test captures the current behavior of ExitStack so that we know
+ # if we ever unintendedly change it. It is not a statement of what the
+ # desired behavior is (for instance, we may want to remove some of the
+ # internal contextlib frames).
+
+ def raise_exc(exc):
+ raise exc
+
+ try:
+ with self.exit_stack() as stack:
+ stack.callback(raise_exc, ValueError)
+ 1/0
+ except ValueError as e:
+ exc = e
+
+ self.assertIsInstance(exc, ValueError)
+ ve_frames = traceback.extract_tb(exc.__traceback__)
+ expected = \
+ [('test_exit_exception_traceback', 'with self.exit_stack() as stack:')] + \
+ self.callback_error_internal_frames + \
+ [('_exit_wrapper', 'callback(*args, **kwds)'),
+ ('raise_exc', 'raise exc')]
+
+ self.assertEqual(
+ [(f.name, f.line) for f in ve_frames], expected)
+
+ self.assertIsInstance(exc.__context__, ZeroDivisionError)
+ zde_frames = traceback.extract_tb(exc.__context__.__traceback__)
+ self.assertEqual([(f.name, f.line) for f in zde_frames],
+ [('test_exit_exception_traceback', '1/0')])
+
def test_exit_exception_chaining_reference(self):
# Sanity check to make sure that ExitStack chaining matches
# actual nested with statements
class TestExitStack(TestBaseExitStack, unittest.TestCase):
exit_stack = ExitStack
+ callback_error_internal_frames = [
+ ('__exit__', 'raise exc_details[1]'),
+ ('__exit__', 'if cb(*exc_details):'),
+ ]
class TestRedirectStream:
@functools.wraps(func)
def wrapper(*args, **kwargs):
coro = func(*args, **kwargs)
- loop = asyncio.new_event_loop()
- asyncio.set_event_loop(loop)
- try:
- return loop.run_until_complete(coro)
- finally:
- loop.close()
- asyncio.set_event_loop_policy(None)
+ asyncio.run(coro)
return wrapper
+def tearDownModule():
+ asyncio.set_event_loop_policy(None)
+
class TestAbstractAsyncContextManager(unittest.TestCase):
return self.run_coroutine(self.__aexit__(*exc_details))
exit_stack = SyncAsyncExitStack
+ callback_error_internal_frames = [
+ ('__exit__', 'return self.run_coroutine(self.__aexit__(*exc_details))'),
+ ('run_coroutine', 'raise exc'),
+ ('run_coroutine', 'raise exc'),
+ ('__aexit__', 'raise exc_details[1]'),
+ ('__aexit__', 'cb_suppress = cb(*exc_details)'),
+ ]
def setUp(self):
self.loop = asyncio.new_event_loop()
stdscr.echochar(b'A')
stdscr.echochar(65)
with self.assertRaises((UnicodeEncodeError, OverflowError)):
- stdscr.echochar('\u20ac')
+ # Unicode is not fully supported yet, but at least it does
+ # not crash.
+ # It is supposed to fail because either the character is
+ # not encodable with the current encoding, or it is encoded to
+ # a multibyte sequence.
+ stdscr.echochar('\u0114')
stdscr.echochar('A', curses.A_BOLD)
self.assertIs(stdscr.is_wintouched(), False)
class A(metaclass=M):
pass
+ def test_disappearing_custom_mro(self):
+ """
+ gh-92112: A custom mro() returning a result conflicting with
+ __bases__ and deleting itself caused a double free.
+ """
+ class B:
+ pass
+
+ class M(DebugHelperMeta):
+ def mro(cls):
+ del M.mro
+ return (B,)
+
+ with self.assertRaises(TypeError):
+ class A(metaclass=M):
+ pass
+
if __name__ == "__main__":
unittest.main()
# NOTE: There are some additional tests relating to interaction with
# zipimport in the test_zipimport_support test module.
+# There are also related tests in `test_doctest2` module.
######################################################################
## Sample Objects (used by test cases)
>>> tests = finder.find(sample_func)
>>> print(tests) # doctest: +ELLIPSIS
- [<DocTest sample_func from test_doctest.py:28 (1 example)>]
+ [<DocTest sample_func from test_doctest.py:29 (1 example)>]
The exact name depends on how test_doctest was invoked, so allow for
leading path components.
1 SampleClass.double
1 SampleClass.get
+When used with `exclude_empty=False` we are also interested in line numbers
+of doctests that are empty.
+It used to be broken for quite some time until `bpo-28249`.
+
+ >>> from test import doctest_lineno
+ >>> tests = doctest.DocTestFinder(exclude_empty=False).find(doctest_lineno)
+ >>> for t in tests:
+ ... print('%5s %s' % (t.lineno, t.name))
+ None test.doctest_lineno
+ 22 test.doctest_lineno.ClassWithDocstring
+ 30 test.doctest_lineno.ClassWithDoctest
+ None test.doctest_lineno.ClassWithoutDocstring
+ None test.doctest_lineno.MethodWrapper
+ 39 test.doctest_lineno.MethodWrapper.method_with_docstring
+ 45 test.doctest_lineno.MethodWrapper.method_with_doctest
+ None test.doctest_lineno.MethodWrapper.method_without_docstring
+ 4 test.doctest_lineno.func_with_docstring
+ 12 test.doctest_lineno.func_with_doctest
+ None test.doctest_lineno.func_without_docstring
+
Turning off Recursion
~~~~~~~~~~~~~~~~~~~~~
DocTestFinder can be told not to look for tests in contained objects
# XXX Should this be a new Defect instead?
defects = [errors.CharsetError])
+ def test_invalid_character_in_charset(self):
+ self._test('=?utf-8\udce2\udc80\udc9d?q?foo=ACbar?=',
+ b'foo\xacbar'.decode('ascii', 'surrogateescape'),
+ charset = 'utf-8\udce2\udc80\udc9d',
+ # XXX Should this be a new Defect instead?
+ defects = [errors.CharsetError])
+
def test_q_nonascii(self):
self._test('=?utf-8?q?=C3=89ric?=',
'Éric',
import email.policy
from email.charset import Charset
-from email.header import Header, decode_header, make_header
-from email.parser import Parser, HeaderParser
from email.generator import Generator, DecodedGenerator, BytesGenerator
+from email.header import Header, decode_header, make_header
+from email.headerregistry import HeaderRegistry
from email.message import Message
from email.mime.application import MIMEApplication
from email.mime.audio import MIMEAudio
-from email.mime.text import MIMEText
-from email.mime.image import MIMEImage
from email.mime.base import MIMEBase
+from email.mime.image import MIMEImage
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
-from email import utils
-from email import errors
+from email.mime.text import MIMEText
+from email.parser import Parser, HeaderParser
+from email import base64mime
from email import encoders
+from email import errors
from email import iterators
-from email import base64mime
from email import quoprimime
+from email import utils
from test.support import threading_helper
from test.support.os_helper import unlink
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(), 'myfile.txt')
+ def test_rfc2231_bad_character_in_encoding(self):
+ m = """\
+Content-Transfer-Encoding: 8bit
+Content-Disposition: inline; filename*=utf-8\udce2\udc80\udc9d''myfile.txt
+
+"""
+ msg = email.message_from_string(m)
+ self.assertEqual(msg.get_filename(), 'myfile.txt')
+
def test_rfc2231_single_tick_in_filename_extended(self):
eq = self.assertEqual
m = """\
result = fp.getvalue()
self._signed_parts_eq(original, result)
-
+class TestHeaderRegistry(TestEmailBase):
+ # See issue gh-93010.
+ def test_HeaderRegistry(self):
+ reg = HeaderRegistry()
+ a = reg('Content-Disposition', 'attachment; 0*00="foo"')
+ self.assertIsInstance(a.defects[0], errors.InvalidHeaderDefect)
if __name__ == '__main__':
unittest.main()
" charset*=unknown-8bit''utf-8%E2%80%9D\n",
),
+ 'rfc2231_nonascii_in_charset_of_charset_parameter_value': (
+ "text/plain; charset*=utf-8”''utf-8%E2%80%9D",
+ 'text/plain',
+ 'text',
+ 'plain',
+ {'charset': 'utf-8”'},
+ [],
+ 'text/plain; charset="utf-8”"',
+ "Content-Type: text/plain;"
+ " charset*=utf-8''utf-8%E2%80%9D\n",
+ ),
+
'rfc2231_encoded_then_unencoded_segments': (
('application/x-foo;'
'\tname*0*="us-ascii\'en-us\'My";'
if MS_WINDOWS:
# Copy pythonXY.dll (or pythonXY_d.dll)
- ver = sys.version_info
- dll = f'python{ver.major}{ver.minor}'
- dll3 = f'python{ver.major}'
- if debug_build(sys.executable):
- dll += '_d'
- dll3 += '_d'
- dll += '.dll'
- dll3 += '.dll'
- dll = os.path.join(os.path.dirname(self.test_exe), dll)
- dll3 = os.path.join(os.path.dirname(self.test_exe), dll3)
- dll_copy = os.path.join(tmpdir, os.path.basename(dll))
- dll3_copy = os.path.join(tmpdir, os.path.basename(dll3))
- shutil.copyfile(dll, dll_copy)
- shutil.copyfile(dll3, dll3_copy)
+ import fnmatch
+ exedir = os.path.dirname(self.test_exe)
+ for f in os.listdir(exedir):
+ if fnmatch.fnmatch(f, '*.dll'):
+ shutil.copyfile(os.path.join(exedir, f), os.path.join(tmpdir, f))
# Copy Python program
exec_copy = os.path.join(tmpdir, os.path.basename(self.test_exe))
s = '''if True:\n print()\n\texec "mixed tabs and spaces"'''
ckmsg(s, "inconsistent use of tabs and spaces in indentation", TabError)
- def check(self, src, lineno, offset, encoding='utf-8'):
+ def check(self, src, lineno, offset, end_lineno=None, end_offset=None, encoding='utf-8'):
with self.subTest(source=src, lineno=lineno, offset=offset):
with self.assertRaises(SyntaxError) as cm:
compile(src, '<fragment>', 'exec')
self.assertEqual(cm.exception.lineno, lineno)
self.assertEqual(cm.exception.offset, offset)
+ if end_lineno is not None:
+ self.assertEqual(cm.exception.end_lineno, end_lineno)
+ if end_offset is not None:
+ self.assertEqual(cm.exception.end_offset, end_offset)
+
if cm.exception.text is not None:
if not isinstance(src, str):
src = src.decode(encoding, 'replace')
check('match ...:\n case {**rest, "key": value}:\n ...', 2, 19)
check("[a b c d e f]", 1, 2)
check("for x yfff:", 1, 7)
+ check("f(a for a in b, c)", 1, 3, 1, 15)
+ check("f(a for a in b if a, c)", 1, 3, 1, 20)
+ check("f(a, b for b in c)", 1, 6, 1, 18)
+ check("f(a, b for b in c, d)", 1, 6, 1, 18)
# Errors thrown by compile.c
check('class foo:return 1', 1, 11)
import unittest
import os
+import string
import warnings
from fnmatch import fnmatch, fnmatchcase, translate, filter
check('usr/bin', 'usr\\bin', normsep)
check('usr\\bin', 'usr\\bin')
+ def test_char_set(self):
+ ignorecase = os.path.normcase('ABC') == os.path.normcase('abc')
+ check = self.check_match
+ tescases = string.ascii_lowercase + string.digits + string.punctuation
+ for c in tescases:
+ check(c, '[az]', c in 'az')
+ check(c, '[!az]', c not in 'az')
+ # Case insensitive.
+ for c in tescases:
+ check(c, '[AZ]', (c in 'az') and ignorecase)
+ check(c, '[!AZ]', (c not in 'az') or not ignorecase)
+ for c in string.ascii_uppercase:
+ check(c, '[az]', (c in 'AZ') and ignorecase)
+ check(c, '[!az]', (c not in 'AZ') or not ignorecase)
+ # Repeated same character.
+ for c in tescases:
+ check(c, '[aa]', c == 'a')
+ # Special cases.
+ for c in tescases:
+ check(c, '[^az]', c in '^az')
+ check(c, '[[az]', c in '[az')
+ check(c, r'[!]]', c != ']')
+ check('[', '[')
+ check('[]', '[]')
+ check('[!', '[!')
+ check('[!]', '[!]')
+
+ def test_range(self):
+ ignorecase = os.path.normcase('ABC') == os.path.normcase('abc')
+ normsep = os.path.normcase('\\') == os.path.normcase('/')
+ check = self.check_match
+ tescases = string.ascii_lowercase + string.digits + string.punctuation
+ for c in tescases:
+ check(c, '[b-d]', c in 'bcd')
+ check(c, '[!b-d]', c not in 'bcd')
+ check(c, '[b-dx-z]', c in 'bcdxyz')
+ check(c, '[!b-dx-z]', c not in 'bcdxyz')
+ # Case insensitive.
+ for c in tescases:
+ check(c, '[B-D]', (c in 'bcd') and ignorecase)
+ check(c, '[!B-D]', (c not in 'bcd') or not ignorecase)
+ for c in string.ascii_uppercase:
+ check(c, '[b-d]', (c in 'BCD') and ignorecase)
+ check(c, '[!b-d]', (c not in 'BCD') or not ignorecase)
+ # Upper bound == lower bound.
+ for c in tescases:
+ check(c, '[b-b]', c == 'b')
+ # Special cases.
+ for c in tescases:
+ check(c, '[!-#]', c not in '-#')
+ check(c, '[!--.]', c not in '-.')
+ check(c, '[^-`]', c in '^_`')
+ if not (normsep and c == '/'):
+ check(c, '[[-^]', c in r'[\]^')
+ check(c, r'[\-^]', c in r'\]^')
+ check(c, '[b-]', c in '-b')
+ check(c, '[!b-]', c not in '-b')
+ check(c, '[-b]', c in '-b')
+ check(c, '[!-b]', c not in '-b')
+ check(c, '[-]', c in '-')
+ check(c, '[!-]', c not in '-')
+ # Upper bound is less that lower bound: error in RE.
+ for c in tescases:
+ check(c, '[d-b]', False)
+ check(c, '[!d-b]', True)
+ check(c, '[d-bx-z]', c in 'xyz')
+ check(c, '[!d-bx-z]', c not in 'xyz')
+ check(c, '[d-b^-`]', c in '^_`')
+ if not (normsep and c == '/'):
+ check(c, '[d-b[-^]', c in r'[\]^')
+
+ def test_sep_in_char_set(self):
+ normsep = os.path.normcase('\\') == os.path.normcase('/')
+ check = self.check_match
+ check('/', r'[/]')
+ check('\\', r'[\]')
+ check('/', r'[\]', normsep)
+ check('\\', r'[/]', normsep)
+ check('[/]', r'[/]', False)
+ check(r'[\\]', r'[/]', False)
+ check('\\', r'[\t]')
+ check('/', r'[\t]', normsep)
+ check('t', r'[\t]')
+ check('\t', r'[\t]', False)
+
+ def test_sep_in_range(self):
+ normsep = os.path.normcase('\\') == os.path.normcase('/')
+ check = self.check_match
+ check('a/b', 'a[.-0]b', not normsep)
+ check('a\\b', 'a[.-0]b', False)
+ check('a\\b', 'a[Z-^]b', not normsep)
+ check('a/b', 'a[Z-^]b', False)
+
+ check('a/b', 'a[/-0]b', not normsep)
+ check(r'a\b', 'a[/-0]b', False)
+ check('a[/-0]b', 'a[/-0]b', False)
+ check(r'a[\-0]b', 'a[/-0]b', False)
+
+ check('a/b', 'a[.-/]b')
+ check(r'a\b', 'a[.-/]b', normsep)
+ check('a[.-/]b', 'a[.-/]b', False)
+ check(r'a[.-\]b', 'a[.-/]b', False)
+
+ check(r'a\b', r'a[\-^]b')
+ check('a/b', r'a[\-^]b', normsep)
+ check(r'a[\-^]b', r'a[\-^]b', False)
+ check('a[/-^]b', r'a[\-^]b', False)
+
+ check(r'a\b', r'a[Z-\]b', not normsep)
+ check('a/b', r'a[Z-\]b', False)
+ check(r'a[Z-\]b', r'a[Z-\]b', False)
+ check('a[Z-/]b', r'a[Z-\]b', False)
+
def test_warnings(self):
with warnings.catch_warnings():
warnings.simplefilter('error', Warning)
"f'{'",
"f'x{<'", # See bpo-46762.
"f'x{>'",
+ "f'{i='", # See gh-93418.
])
# But these are just normal strings.
def test_algorithms_available(self):
self.assertTrue(set(hashlib.algorithms_guaranteed).
issubset(hashlib.algorithms_available))
+ # all available algorithms must be loadable, bpo-47101
+ self.assertNotIn("undefined", hashlib.algorithms_available)
+ for name in hashlib.algorithms_available:
+ digest = hashlib.new(name, usedforsecurity=False)
def test_usedforsecurity_true(self):
hashlib.new("sha256", usedforsecurity=True)
@threading_helper.reap_threads
@cpython_only
+ @unittest.skipUnless(__debug__, "Won't work if __debug__ is False")
def test_dump_ur(self):
# See: http://bugs.python.org/issue26543
untagged_resp_dict = {'READ-WRITE': [b'']}
if __name__ == "__main__":
- unittets.main()
+ unittest.main()
assertBadAddress("1.2.3.256", re.escape("256 (> 255)"))
def test_valid_netmask(self):
+ self.assertEqual(str(self.factory(('192.0.2.0', 24))), '192.0.2.0/24')
+ self.assertEqual(str(self.factory(('192.0.2.0', '24'))), '192.0.2.0/24')
+ self.assertEqual(str(self.factory(('192.0.2.0', '255.255.255.0'))),
+ '192.0.2.0/24')
self.assertEqual(str(self.factory('192.0.2.0/255.255.255.0')),
'192.0.2.0/24')
for i in range(0, 33):
def test_valid_netmask(self):
# We only support CIDR for IPv6, because expanded netmasks are not
# standard notation.
+ self.assertEqual(str(self.factory(('2001:db8::', 32))),
+ '2001:db8::/32')
+ self.assertEqual(str(self.factory(('2001:db8::', '32'))),
+ '2001:db8::/32')
self.assertEqual(str(self.factory('2001:db8::/32')), '2001:db8::/32')
for i in range(0, 129):
# Generate and re-parse the CIDR format (trivial).
self.assertEqual(ipaddress.IPv4Interface((3221225985, 24)),
ipaddress.IPv4Interface('192.0.2.1/24'))
+ # Invalid netmask
+ with self.assertRaises(ValueError):
+ ipaddress.IPv4Network(('192.0.2.1', '255.255.255.255.0'))
+
+ # Invalid netmask using factory
+ with self.assertRaises(ValueError):
+ ipaddress.ip_network(('192.0.2.1', '255.255.255.255.0'))
+
# issue #16531: constructing IPv6Network from an (address, mask) tuple
def testIPv6Tuple(self):
# /128
ipaddress.IPv6Network((ip_scoped, 96))
# strict=False and host bits set
+ # Invalid netmask
+ with self.assertRaises(ValueError):
+ ipaddress.IPv6Network(('2001:db8::1', '255.255.255.0'))
+
+ # Invalid netmask using factory
+ with self.assertRaises(ValueError):
+ ipaddress.ip_network(('2001:db8::1', '255.255.255.0'))
+
# issue57
def testAddressIntMath(self):
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') + 255,
-import os
+import opcode
+import re
+import sys
import textwrap
import unittest
-from test.support import os_helper
+from test.support import os_helper, verbose
from test.support.script_helper import assert_python_ok
+Py_DEBUG = hasattr(sys, 'gettotalrefcount')
+
+@unittest.skipUnless(Py_DEBUG, "lltrace requires Py_DEBUG")
class TestLLTrace(unittest.TestCase):
def test_lltrace_does_not_crash_on_subscript_operator(self):
assert_python_ok(os_helper.TESTFN)
+ def run_code(self, code):
+ code = textwrap.dedent(code).strip()
+ with open(os_helper.TESTFN, 'w', encoding='utf-8') as fd:
+ self.addCleanup(os_helper.unlink, os_helper.TESTFN)
+ fd.write(code)
+ status, stdout, stderr = assert_python_ok(os_helper.TESTFN)
+ self.assertEqual(stderr, b"")
+ self.assertEqual(status, 0)
+ result = stdout.decode('utf-8')
+ if verbose:
+ print("\n\n--- code ---")
+ print(code)
+ print("\n--- stdout ---")
+ print(result)
+ print()
+ return result
+
+ def check_op(self, op, stdout, present):
+ op = opcode.opmap[op]
+ regex = re.compile(f': {op}($|, )', re.MULTILINE)
+ if present:
+ self.assertTrue(regex.search(stdout),
+ f'": {op}" not found in: {stdout}')
+ else:
+ self.assertFalse(regex.search(stdout),
+ f'": {op}" found in: {stdout}')
+
+ def check_op_in(self, op, stdout):
+ self.check_op(op, stdout, True)
+
+ def check_op_not_in(self, op, stdout):
+ self.check_op(op, stdout, False)
+
+ def test_lltrace(self):
+ stdout = self.run_code("""
+ def dont_trace_1():
+ a = "a"
+ a = 10 * a
+ def trace_me():
+ for i in range(3):
+ +i
+ def dont_trace_2():
+ x = 42
+ y = -x
+ dont_trace_1()
+ __ltrace__ = 1
+ trace_me()
+ del __ltrace__
+ dont_trace_2()
+ """)
+ self.check_op_in("GET_ITER", stdout)
+ self.check_op_in("FOR_ITER", stdout)
+ self.check_op_in("UNARY_POSITIVE", stdout)
+ self.check_op_in("POP_TOP", stdout)
+
+ # before: dont_trace_1() is not traced
+ self.check_op_not_in("BINARY_MULTIPLY", stdout)
+
+ # after: dont_trace_2() is not traced
+ self.check_op_not_in("UNARY_NEGATIVE", stdout)
+
+
if __name__ == "__main__":
unittest.main()
if sys.platform != 'cygwin':
mode += 't'
temp_fd, temp_filename = tempfile.mkstemp()
- with os.fdopen(temp_fd, mode=mode) as fp:
+ with os.fdopen(temp_fd, mode=mode, encoding="utf-8") as fp:
fp.write(test_data)
self.addCleanup(os.unlink, temp_filename)
return netrc.netrc(temp_filename)
import sys
import sysconfig
import tempfile
+import textwrap
import threading
import time
import types
self.assertEqual(0, handle_delta)
+ def test_stat_unlink_race(self):
+ # bpo-46785: the implementation of os.stat() falls back to reading
+ # the parent directory if CreateFileW() fails with a permission
+ # error. If reading the parent directory fails because the file or
+ # directory are subsequently unlinked, or because the volume or
+ # share are no longer available, then the original permission error
+ # should not be restored.
+ filename = os_helper.TESTFN
+ self.addCleanup(os_helper.unlink, filename)
+ deadline = time.time() + 5
+ command = textwrap.dedent("""\
+ import os
+ import sys
+ import time
+
+ filename = sys.argv[1]
+ deadline = float(sys.argv[2])
+
+ while time.time() < deadline:
+ try:
+ with open(filename, "w") as f:
+ pass
+ except OSError:
+ pass
+ try:
+ os.remove(filename)
+ except OSError:
+ pass
+ """)
+
+ with subprocess.Popen([sys.executable, '-c', command, filename, str(deadline)]) as proc:
+ while time.time() < deadline:
+ try:
+ os.stat(filename)
+ except FileNotFoundError as e:
+ assert e.winerror == 2 # ERROR_FILE_NOT_FOUND
+ try:
+ proc.wait(1)
+ except subprocess.TimeoutExpired:
+ proc.terminate()
+
+
@os_helper.skip_unless_symlink
class NonLocalSymlinkTests(unittest.TestCase):
self.assertEqual(par[0], P('/a/b'))
self.assertEqual(par[1], P('/a'))
self.assertEqual(par[2], P('/'))
+ self.assertEqual(par[-1], P('/'))
+ self.assertEqual(par[-2], P('/a'))
+ self.assertEqual(par[-3], P('/a/b'))
self.assertEqual(par[0:1], (P('/a/b'),))
self.assertEqual(par[:2], (P('/a/b'), P('/a')))
self.assertEqual(par[:-1], (P('/a/b'), P('/a')))
self.assertEqual(par[::-1], (P('/'), P('/a'), P('/a/b')))
self.assertEqual(list(par), [P('/a/b'), P('/a'), P('/')])
with self.assertRaises(IndexError):
+ par[-4]
+ with self.assertRaises(IndexError):
par[3]
def test_drive_common(self):
mask = posix.sched_getaffinity(0)
self.assertIsInstance(mask, set)
self.assertGreaterEqual(len(mask), 1)
- self.assertRaises(OSError, posix.sched_getaffinity, -1)
+ if not sys.platform.startswith("freebsd"):
+ # bpo-47205: does not raise OSError on FreeBSD
+ self.assertRaises(OSError, posix.sched_getaffinity, -1)
for cpu in mask:
self.assertIsInstance(cpu, int)
self.assertGreaterEqual(cpu, 0)
self.assertRaises(ValueError, posix.sched_setaffinity, 0, [-10])
self.assertRaises(ValueError, posix.sched_setaffinity, 0, map(int, "0X"))
self.assertRaises(OverflowError, posix.sched_setaffinity, 0, [1<<128])
- self.assertRaises(OSError, posix.sched_setaffinity, -1, mask)
+ if not sys.platform.startswith("freebsd"):
+ # bpo-47205: does not raise OSError on FreeBSD
+ self.assertRaises(OSError, posix.sched_setaffinity, -1, mask)
def test_rtld_constants(self):
# check presence of major RTLD_* constants
re.compile(r'(?P<a>x)(?P=a)(?(a)y)')
re.compile(r'(?P<a1>x)(?P=a1)(?(a1)y)')
re.compile(r'(?P<a1>x)\1(?(1)y)')
+ re.compile(b'(?P<a1>x)(?P=a1)(?(a1)y)')
+ # New valid identifiers in Python 3
+ re.compile('(?P<µ>x)(?P=µ)(?(µ)y)')
+ re.compile('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)(?P=𝔘𝔫𝔦𝔠𝔬𝔡𝔢)(?(𝔘𝔫𝔦𝔠𝔬𝔡𝔢)y)')
+ # Support > 100 groups.
+ pat = '|'.join('x(?P<a%d>%x)y' % (i, i) for i in range(1, 200 + 1))
+ pat = '(?:%s)(?(200)z|t)' % pat
+ self.assertEqual(re.match(pat, 'xc8yz').span(), (0, 5))
+
+ def test_symbolic_groups_errors(self):
self.checkPatternError(r'(?P<a>)(?P<a>)',
"redefinition of group name 'a' as group 2; "
"was group 1")
self.checkPatternError(r'(?(-1))', "bad character in group name '-1'", 3)
self.checkPatternError(r'(?(1a))', "bad character in group name '1a'", 3)
self.checkPatternError(r'(?(a.))', "bad character in group name 'a.'", 3)
- # New valid/invalid identifiers in Python 3
- re.compile('(?P<µ>x)(?P=µ)(?(µ)y)')
- re.compile('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)(?P=𝔘𝔫𝔦𝔠𝔬𝔡𝔢)(?(𝔘𝔫𝔦𝔠𝔬𝔡𝔢)y)')
self.checkPatternError('(?P<©>x)', "bad character in group name '©'", 4)
+ self.checkPatternError('(?P=©)', "bad character in group name '©'", 4)
+ self.checkPatternError('(?(©)y)', "bad character in group name '©'", 3)
+
+ def test_symbolic_refs(self):
+ self.assertEqual(re.sub('(?P<a>x)|(?P<b>y)', r'\g<b>', 'xx'), '')
+ self.assertEqual(re.sub('(?P<a>x)|(?P<b>y)', r'\2', 'xx'), '')
+ self.assertEqual(re.sub(b'(?P<a1>x)', br'\g<a1>', b'xx'), b'xx')
+ # New valid identifiers in Python 3
+ self.assertEqual(re.sub('(?P<µ>x)', r'\g<µ>', 'xx'), 'xx')
+ self.assertEqual(re.sub('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)', r'\g<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>', 'xx'), 'xx')
# Support > 100 groups.
pat = '|'.join('x(?P<a%d>%x)y' % (i, i) for i in range(1, 200 + 1))
- pat = '(?:%s)(?(200)z|t)' % pat
- self.assertEqual(re.match(pat, 'xc8yz').span(), (0, 5))
+ self.assertEqual(re.sub(pat, r'\g<200>', 'xc8yzxc8y'), 'c8zc8')
- def test_symbolic_refs(self):
+ def test_symbolic_refs_errors(self):
self.checkTemplateError('(?P<a>x)', r'\g<a', 'xx',
'missing >, unterminated name', 3)
self.checkTemplateError('(?P<a>x)', r'\g<', 'xx',
'invalid group reference 2', 1)
with self.assertRaisesRegex(IndexError, "unknown group name 'ab'"):
re.sub('(?P<a>x)', r'\g<ab>', 'xx')
- self.assertEqual(re.sub('(?P<a>x)|(?P<b>y)', r'\g<b>', 'xx'), '')
- self.assertEqual(re.sub('(?P<a>x)|(?P<b>y)', r'\2', 'xx'), '')
self.checkTemplateError('(?P<a>x)', r'\g<-1>', 'xx',
"bad character in group name '-1'", 3)
- # New valid/invalid identifiers in Python 3
- self.assertEqual(re.sub('(?P<µ>x)', r'\g<µ>', 'xx'), 'xx')
- self.assertEqual(re.sub('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)', r'\g<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>', 'xx'), 'xx')
self.checkTemplateError('(?P<a>x)', r'\g<©>', 'xx',
"bad character in group name '©'", 3)
- # Support > 100 groups.
- pat = '|'.join('x(?P<a%d>%x)y' % (i, i) for i in range(1, 200 + 1))
- self.assertEqual(re.sub(pat, r'\g<200>', 'xc8yzxc8y'), 'c8zc8')
+ self.checkTemplateError('(?P<a>x)', r'\g<㊀>', 'xx',
+ "bad character in group name '㊀'", 3)
+ self.checkTemplateError('(?P<a>x)', r'\g<¹>', 'xx',
+ "bad character in group name '¹'", 3)
def test_re_subn(self):
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
pat = '(?:%s)(?(200)z)' % pat
self.assertEqual(re.match(pat, 'xc8yz').span(), (0, 5))
- self.checkPatternError(r'(?P<a>)(?(0))', 'bad group number', 10)
+ def test_re_groupref_exists_errors(self):
+ self.checkPatternError(r'(?P<a>)(?(0)a|b)', 'bad group number', 10)
+ self.checkPatternError(r'()(?(-1)a|b)',
+ "bad character in group name '-1'", 5)
+ self.checkPatternError(r'()(?(㊀)a|b)',
+ "bad character in group name '㊀'", 5)
+ self.checkPatternError(r'()(?(¹)a|b)',
+ "bad character in group name '¹'", 5)
+ self.checkPatternError(r'()(?(1',
+ "missing ), unterminated name", 5)
+ self.checkPatternError(r'()(?(1)a',
+ "missing ), unterminated subpattern", 2)
self.checkPatternError(r'()(?(1)a|b',
'missing ), unterminated subpattern', 2)
+ self.checkPatternError(r'()(?(1)a|b|c',
+ 'conditional backref with more than '
+ 'two branches', 10)
self.checkPatternError(r'()(?(1)a|b|c)',
'conditional backref with more than '
'two branches', 10)
+ self.checkPatternError(r'()(?(2)a)',
+ "invalid group reference 2", 5)
def test_re_groupref_overflow(self):
from sre_constants import MAXGROUPS
"undefined character name 'SPAM'", 0)
self.checkPatternError(r'[\N{SPAM}]',
"undefined character name 'SPAM'", 1)
+ self.checkPatternError(r'\N{KEYCAP NUMBER SIGN}',
+ "undefined character name 'KEYCAP NUMBER SIGN'", 0)
+ self.checkPatternError(r'[\N{KEYCAP NUMBER SIGN}]',
+ "undefined character name 'KEYCAP NUMBER SIGN'", 1)
self.checkPatternError(br'\N{LESS-THAN SIGN}', r'bad escape \N', 0)
self.checkPatternError(br'[\N{LESS-THAN SIGN}]', r'bad escape \N', 1)
self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa")
- assert '\u212a'.lower() == 'k' # 'K'
+ # Two different characters have the same lowercase.
+ assert 'K'.lower() == '\u212a'.lower() == 'k' # 'K'
self.assertTrue(re.match(r'K', '\u212a', re.I))
self.assertTrue(re.match(r'k', '\u212a', re.I))
self.assertTrue(re.match(r'\u212a', 'K', re.I))
self.assertTrue(re.match(r'\u212a', 'k', re.I))
- assert '\u017f'.upper() == 'S' # 'ſ'
+
+ # Two different characters have the same uppercase.
+ assert 's'.upper() == '\u017f'.upper() == 'S' # 'ſ'
self.assertTrue(re.match(r'S', '\u017f', re.I))
self.assertTrue(re.match(r's', '\u017f', re.I))
self.assertTrue(re.match(r'\u017f', 'S', re.I))
self.assertTrue(re.match(r'\u017f', 's', re.I))
+
+ # Two different characters have the same uppercase. Unicode 9.0+.
+ assert '\u0432'.upper() == '\u1c80'.upper() == '\u0412' # 'в', 'ᲀ', 'В'
+ self.assertTrue(re.match(r'\u0412', '\u0432', re.I))
+ self.assertTrue(re.match(r'\u0412', '\u1c80', re.I))
+ self.assertTrue(re.match(r'\u0432', '\u0412', re.I))
+ self.assertTrue(re.match(r'\u0432', '\u1c80', re.I))
+ self.assertTrue(re.match(r'\u1c80', '\u0412', re.I))
+ self.assertTrue(re.match(r'\u1c80', '\u0432', re.I))
+
+ # Two different characters have the same multicharacter uppercase.
assert '\ufb05'.upper() == '\ufb06'.upper() == 'ST' # 'ſt', 'st'
self.assertTrue(re.match(r'\ufb05', '\ufb06', re.I))
self.assertTrue(re.match(r'\ufb06', '\ufb05', re.I))
self.assertTrue(re.match(br'[19a]', b'a', re.I))
self.assertTrue(re.match(br'[19a]', b'A', re.I))
self.assertTrue(re.match(br'[19A]', b'a', re.I))
- assert '\u212a'.lower() == 'k' # 'K'
+
+ # Two different characters have the same lowercase.
+ assert 'K'.lower() == '\u212a'.lower() == 'k' # 'K'
self.assertTrue(re.match(r'[19K]', '\u212a', re.I))
self.assertTrue(re.match(r'[19k]', '\u212a', re.I))
self.assertTrue(re.match(r'[19\u212a]', 'K', re.I))
self.assertTrue(re.match(r'[19\u212a]', 'k', re.I))
- assert '\u017f'.upper() == 'S' # 'ſ'
+
+ # Two different characters have the same uppercase.
+ assert 's'.upper() == '\u017f'.upper() == 'S' # 'ſ'
self.assertTrue(re.match(r'[19S]', '\u017f', re.I))
self.assertTrue(re.match(r'[19s]', '\u017f', re.I))
self.assertTrue(re.match(r'[19\u017f]', 'S', re.I))
self.assertTrue(re.match(r'[19\u017f]', 's', re.I))
+
+ # Two different characters have the same uppercase. Unicode 9.0+.
+ assert '\u0432'.upper() == '\u1c80'.upper() == '\u0412' # 'в', 'ᲀ', 'В'
+ self.assertTrue(re.match(r'[19\u0412]', '\u0432', re.I))
+ self.assertTrue(re.match(r'[19\u0412]', '\u1c80', re.I))
+ self.assertTrue(re.match(r'[19\u0432]', '\u0412', re.I))
+ self.assertTrue(re.match(r'[19\u0432]', '\u1c80', re.I))
+ self.assertTrue(re.match(r'[19\u1c80]', '\u0412', re.I))
+ self.assertTrue(re.match(r'[19\u1c80]', '\u0432', re.I))
+
+ # Two different characters have the same multicharacter uppercase.
assert '\ufb05'.upper() == '\ufb06'.upper() == 'ST' # 'ſt', 'st'
self.assertTrue(re.match(r'[19\ufb05]', '\ufb06', re.I))
self.assertTrue(re.match(r'[19\ufb06]', '\ufb05', re.I))
self.assertTrue(re.match(r'[\U00010400-\U00010427]', '\U00010428', re.I))
self.assertTrue(re.match(r'[\U00010400-\U00010427]', '\U00010400', re.I))
- assert '\u212a'.lower() == 'k' # 'K'
+ # Two different characters have the same lowercase.
+ assert 'K'.lower() == '\u212a'.lower() == 'k' # 'K'
self.assertTrue(re.match(r'[J-M]', '\u212a', re.I))
self.assertTrue(re.match(r'[j-m]', '\u212a', re.I))
self.assertTrue(re.match(r'[\u2129-\u212b]', 'K', re.I))
self.assertTrue(re.match(r'[\u2129-\u212b]', 'k', re.I))
- assert '\u017f'.upper() == 'S' # 'ſ'
+
+ # Two different characters have the same uppercase.
+ assert 's'.upper() == '\u017f'.upper() == 'S' # 'ſ'
self.assertTrue(re.match(r'[R-T]', '\u017f', re.I))
self.assertTrue(re.match(r'[r-t]', '\u017f', re.I))
self.assertTrue(re.match(r'[\u017e-\u0180]', 'S', re.I))
self.assertTrue(re.match(r'[\u017e-\u0180]', 's', re.I))
+
+ # Two different characters have the same uppercase. Unicode 9.0+.
+ assert '\u0432'.upper() == '\u1c80'.upper() == '\u0412' # 'в', 'ᲀ', 'В'
+ self.assertTrue(re.match(r'[\u0411-\u0413]', '\u0432', re.I))
+ self.assertTrue(re.match(r'[\u0411-\u0413]', '\u1c80', re.I))
+ self.assertTrue(re.match(r'[\u0431-\u0433]', '\u0412', re.I))
+ self.assertTrue(re.match(r'[\u0431-\u0433]', '\u1c80', re.I))
+ self.assertTrue(re.match(r'[\u1c80-\u1c82]', '\u0412', re.I))
+ self.assertTrue(re.match(r'[\u1c80-\u1c82]', '\u0432', re.I))
+
+ # Two different characters have the same multicharacter uppercase.
assert '\ufb05'.upper() == '\ufb06'.upper() == 'ST' # 'ſt', 'st'
self.assertTrue(re.match(r'[\ufb04-\ufb05]', '\ufb06', re.I))
self.assertTrue(re.match(r'[\ufb06-\ufb07]', '\ufb05', re.I))
self.assertIsNone(re.match(r'(?i:(?-i:a)b)', 'Ab'))
self.assertTrue(re.match(r'(?i:(?-i:a)b)', 'aB'))
- self.assertTrue(re.match(r'(?x: a) b', 'a b'))
- self.assertIsNone(re.match(r'(?x: a) b', ' a b'))
- self.assertTrue(re.match(r'(?-x: a) b', ' ab', re.VERBOSE))
- self.assertIsNone(re.match(r'(?-x: a) b', 'ab', re.VERBOSE))
-
self.assertTrue(re.match(r'\w(?a:\W)\w', '\xe0\xe0\xe0'))
self.assertTrue(re.match(r'(?a:\W(?u:\w)\W)', '\xe0\xe0\xe0'))
self.assertTrue(re.match(r'\W(?u:\w)\W', '\xe0\xe0\xe0', re.ASCII))
self.checkPatternError(r'(?i+', 'missing -, : or )', 3)
self.checkPatternError(r'(?iz', 'unknown flag', 3)
+ def test_ignore_spaces(self):
+ for space in " \t\n\r\v\f":
+ self.assertTrue(re.fullmatch(space + 'a', 'a', re.VERBOSE))
+ for space in b" ", b"\t", b"\n", b"\r", b"\v", b"\f":
+ self.assertTrue(re.fullmatch(space + b'a', b'a', re.VERBOSE))
+ self.assertTrue(re.fullmatch('(?x) a', 'a'))
+ self.assertTrue(re.fullmatch(' (?x) a', 'a', re.VERBOSE))
+ self.assertTrue(re.fullmatch('(?x) (?x) a', 'a'))
+ self.assertTrue(re.fullmatch(' a(?x: b) c', ' ab c'))
+ self.assertTrue(re.fullmatch(' a(?-x: b) c', 'a bc', re.VERBOSE))
+ self.assertTrue(re.fullmatch('(?x) a(?-x: b) c', 'a bc'))
+ self.assertTrue(re.fullmatch('(?x) a| b', 'a'))
+ self.assertTrue(re.fullmatch('(?x) a| b', 'b'))
+
+ def test_comments(self):
+ self.assertTrue(re.fullmatch('#x\na', 'a', re.VERBOSE))
+ self.assertTrue(re.fullmatch(b'#x\na', b'a', re.VERBOSE))
+ self.assertTrue(re.fullmatch('(?x)#x\na', 'a'))
+ self.assertTrue(re.fullmatch('#x\n(?x)#y\na', 'a', re.VERBOSE))
+ self.assertTrue(re.fullmatch('(?x)#x\n(?x)#y\na', 'a'))
+ self.assertTrue(re.fullmatch('#x\na(?x:#y\nb)#z\nc', '#x\nab#z\nc'))
+ self.assertTrue(re.fullmatch('#x\na(?-x:#y\nb)#z\nc', 'a#y\nbc',
+ re.VERBOSE))
+ self.assertTrue(re.fullmatch('(?x)#x\na(?-x:#y\nb)#z\nc', 'a#y\nbc'))
+ self.assertTrue(re.fullmatch('(?x)#x\na|#y\nb', 'a'))
+ self.assertTrue(re.fullmatch('(?x)#x\na|#y\nb', 'b'))
+
def test_bug_6509(self):
# Replacement strings of both types must parse properly.
# all strings
def test_unicode_guard_env(self):
guard = os.environ.get(setup.UNICODE_GUARD_ENV)
self.assertIsNotNone(guard, f"{setup.UNICODE_GUARD_ENV} not set")
- if guard != "\N{SMILING FACE WITH SUNGLASSES}":
+ if guard.isascii():
# Skip to signify that the env var value was changed by the user;
# possibly to something ASCII to work around Unicode issues.
self.skipTest("Modified guard")
"runpy.run_path(%r)\n") % dummy_dir
script_name = self._make_test_script(script_dir, mod_name, source)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
- msg = "recursion depth exceeded"
- self.assertRaisesRegex(RecursionError, msg, run_path, zip_name)
+ self.assertRaises(RecursionError, run_path, zip_name)
def test_encoding(self):
with temp_dir() as script_dir:
self.assertEqual(read_file(src_file), 'foo')
@unittest.skipIf(MACOS or SOLARIS or _winapi, 'On MACOS, Solaris and Windows the errors are not confusing (though different)')
+ # gh-92670: The test uses a trailing slash to force the OS consider
+ # the path as a directory, but on AIX the trailing slash has no effect
+ # and is considered as a file.
+ @unittest.skipIf(AIX, 'Not valid on AIX, see gh-92670')
def test_copyfile_nonexistent_dir(self):
# Issue 43219
src_dir = self.mkdtemp()
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
+ @unittest.skipIf(not sysconfig.get_config_var("HAVE_VFORK"),
+ "vfork() not enabled by configure.")
+ def test__use_vfork(self):
+ # Attempts code coverage within _posixsubprocess.c on the code that
+ # probes the subprocess module for the existence and value of this
+ # attribute in 3.10.5.
+ self.assertTrue(subprocess._USE_VFORK) # The default value regardless.
+ with mock.patch.object(subprocess, "_USE_VFORK", False):
+ self.assertEqual(self.run_python("pass").returncode, 0,
+ msg="False _USE_VFORK failed")
+
+ class RaisingBool:
+ def __bool__(self):
+ raise RuntimeError("force PyObject_IsTrue to return -1")
+
+ with mock.patch.object(subprocess, "_USE_VFORK", RaisingBool()):
+ self.assertEqual(self.run_python("pass").returncode, 0,
+ msg="odd bool()-error _USE_VFORK failed")
+ del subprocess._USE_VFORK
+ self.assertEqual(self.run_python("pass").returncode, 0,
+ msg="lack of a _USE_VFORK attribute failed")
+
def _get_test_grp_name():
for name_group in ('staff', 'nogroup', 'grp', 'nobody', 'nfsnobody'):
>>> try:
... something()
+ ... except:
+ ... pass
+ Traceback (most recent call last):
+ IndentationError: expected an indented block after 'except' statement on line 3
+
+ >>> try:
+ ... something()
... except A:
... pass
Traceback (most recent call last):
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
- expected = self.write_unraisable_exc(
- A.B.X(), "msg", "obj");
+ expected = self.write_unraisable_exc(
+ A.B.X(), "msg", "obj");
report = stderr.getvalue()
testName = 'test_original_unraisablehook_exception_qualname'
self.assertIn(f"{testName}.<locals>.A.B.X", report)
self.compare_events(doit_async.__code__.co_firstlineno,
tracer.events, events)
+ def test_async_for_backwards_jump_has_no_line(self):
+ async def arange(n):
+ for i in range(n):
+ yield i
+ async def f():
+ async for i in arange(3):
+ if i > 100:
+ break # should never be traced
+
+ tracer = self.make_tracer()
+ coro = f()
+ try:
+ sys.settrace(tracer.trace)
+ coro.send(None)
+ except Exception:
+ pass
+ finally:
+ sys.settrace(None)
+
+ events = [
+ (0, 'call'),
+ (1, 'line'),
+ (-3, 'call'),
+ (-2, 'line'),
+ (-1, 'line'),
+ (-1, 'return'),
+ (1, 'exception'),
+ (2, 'line'),
+ (1, 'line'),
+ (-1, 'call'),
+ (-2, 'line'),
+ (-1, 'line'),
+ (-1, 'return'),
+ (1, 'exception'),
+ (2, 'line'),
+ (1, 'line'),
+ (-1, 'call'),
+ (-2, 'line'),
+ (-1, 'line'),
+ (-1, 'return'),
+ (1, 'exception'),
+ (2, 'line'),
+ (1, 'line'),
+ (-1, 'call'),
+ (-2, 'line'),
+ (-2, 'return'),
+ (1, 'exception'),
+ (1, 'return'),
+ ]
+ self.compare_events(f.__code__.co_firstlineno,
+ tracer.events, events)
+
def test_21_repeated_pass(self):
def func():
pass
yield 3
next(gen())
output.append(5)
+
+ @jump_test(2, 3, [1, 3])
+ def test_jump_forward_over_listcomp(output):
+ output.append(1)
+ x = [i for i in range(10)]
+ output.append(3)
+
+ # checking for segfaults.
+ # See https://github.com/python/cpython/issues/92311
+ @jump_test(3, 1, [])
+ def test_jump_backward_over_listcomp(output):
+ a = 1
+ x = [i for i in range(10)]
+ c = 3
+
+ @jump_test(8, 2, [2, 7, 2])
+ def test_jump_backward_over_listcomp_v2(output):
+ flag = False
+ output.append(2)
+ if flag:
+ return
+ x = [i for i in range(5)]
+ flag = 6
+ output.append(7)
+ output.append(8)
+
+ @async_jump_test(2, 3, [1, 3])
+ async def test_jump_forward_over_async_listcomp(output):
+ output.append(1)
+ x = [i async for i in asynciter(range(10))]
+ output.append(3)
+
+ @async_jump_test(3, 1, [])
+ async def test_jump_backward_over_async_listcomp(output):
+ a = 1
+ x = [i async for i in asynciter(range(10))]
+ c = 3
+
+ @async_jump_test(8, 2, [2, 7, 2])
+ async def test_jump_backward_over_async_listcomp_v2(output):
+ flag = False
+ output.append(2)
+ if flag:
+ return
+ x = [i async for i in asynciter(range(5))]
+ flag = 6
+ output.append(7)
+ output.append(8)
if __name__ == "__main__":
{'a': typing.Optional[int], 'b': int}
)
+ def test_non_generic_subscript(self):
+ # For backward compatibility, subscription works
+ # on arbitrary TypedDict types.
+ class TD(TypedDict):
+ a: T
+ A = TD[int]
+ self.assertEqual(A.__origin__, TD)
+ self.assertEqual(A.__parameters__, ())
+ self.assertEqual(A.__args__, (int,))
+ a = A(a = 1)
+ self.assertIs(type(a), dict)
+ self.assertEqual(a, {'a': 1})
+
class IOTests(BaseTestCase):
pass
self.assertEqual(handler.headers_received["Range"], "bytes=20-39")
+ def test_sending_headers_camel(self):
+ handler = self.start_server()
+ req = urllib.request.Request("http://localhost:%s/" % handler.port,
+ headers={"X-SoMe-hEader": "foobar"})
+ with urllib.request.urlopen(req):
+ pass
+ self.assertIn("X-Some-Header", handler.headers_received.keys())
+ self.assertNotIn("X-SoMe-hEader", handler.headers_received.keys())
+
def test_basic(self):
handler = self.start_server()
with urllib.request.urlopen("http://localhost:%s" % handler.port) as open_url:
from test import support
from test.support import os_helper
from test.support import socket_helper
+from test.support import ResourceDenied
from test.test_urllib2 import sanepathname2url
import os
import html
import io
import itertools
-import locale
import operator
import os
import pickle
return newtest
return decorator
+def convlinesep(data):
+ return data.replace(b'\n', os.linesep.encode())
+
class ModuleTest(unittest.TestCase):
def test_sanity(self):
def test_tostring_xml_declaration_unicode_encoding(self):
elem = ET.XML('<body><tag/></body>')
- preferredencoding = locale.getpreferredencoding()
self.assertEqual(
- f"<?xml version='1.0' encoding='{preferredencoding}'?>\n<body><tag /></body>",
- ET.tostring(elem, encoding='unicode', xml_declaration=True)
+ ET.tostring(elem, encoding='unicode', xml_declaration=True),
+ "<?xml version='1.0' encoding='utf-8'?>\n<body><tag /></body>"
)
def test_tostring_xml_declaration_cases(self):
elem = ET.XML('<body><tag>ø</tag></body>')
- preferredencoding = locale.getpreferredencoding()
TESTCASES = [
# (expected_retval, encoding, xml_declaration)
# ... xml_declaration = None
b"<body><tag>ø</tag></body>", 'US-ASCII', True),
(b"<?xml version='1.0' encoding='ISO-8859-1'?>\n"
b"<body><tag>\xf8</tag></body>", 'ISO-8859-1', True),
- (f"<?xml version='1.0' encoding='{preferredencoding}'?>\n"
+ ("<?xml version='1.0' encoding='utf-8'?>\n"
"<body><tag>ø</tag></body>", 'unicode', True),
]
b"<?xml version='1.0' encoding='us-ascii'?>\n<body><tag /></body>"
)
- preferredencoding = locale.getpreferredencoding()
stringlist = ET.tostringlist(elem, encoding='unicode', xml_declaration=True)
self.assertEqual(
''.join(stringlist),
- f"<?xml version='1.0' encoding='{preferredencoding}'?>\n<body><tag /></body>"
+ "<?xml version='1.0' encoding='utf-8'?>\n<body><tag /></body>"
)
self.assertRegex(stringlist[0], r"^<\?xml version='1.0' encoding='.+'?>")
self.assertEqual(['<body', '>', '<tag', ' />', '</body>'], stringlist[1:])
def test_write_to_filename(self):
self.addCleanup(os_helper.unlink, TESTFN)
- tree = ET.ElementTree(ET.XML('''<site />'''))
+ tree = ET.ElementTree(ET.XML('''<site>\xf8</site>'''))
tree.write(TESTFN)
with open(TESTFN, 'rb') as f:
- self.assertEqual(f.read(), b'''<site />''')
+ self.assertEqual(f.read(), b'''<site>ø</site>''')
+
+ def test_write_to_filename_with_encoding(self):
+ self.addCleanup(os_helper.unlink, TESTFN)
+ tree = ET.ElementTree(ET.XML('''<site>\xf8</site>'''))
+ tree.write(TESTFN, encoding='utf-8')
+ with open(TESTFN, 'rb') as f:
+ self.assertEqual(f.read(), b'''<site>\xc3\xb8</site>''')
+
+ tree.write(TESTFN, encoding='ISO-8859-1')
+ with open(TESTFN, 'rb') as f:
+ self.assertEqual(f.read(), convlinesep(
+ b'''<?xml version='1.0' encoding='ISO-8859-1'?>\n'''
+ b'''<site>\xf8</site>'''))
+
+ def test_write_to_filename_as_unicode(self):
+ self.addCleanup(os_helper.unlink, TESTFN)
+ with open(TESTFN, 'w') as f:
+ encoding = f.encoding
+ os_helper.unlink(TESTFN)
+
+ tree = ET.ElementTree(ET.XML('''<site>\xf8</site>'''))
+ tree.write(TESTFN, encoding='unicode')
+ with open(TESTFN, 'rb') as f:
+ data = f.read()
+ expected = "<site>\xf8</site>".encode(encoding, 'xmlcharrefreplace')
+ if encoding.lower() in ('utf-8', 'ascii'):
+ self.assertEqual(data, expected)
+ else:
+ self.assertIn(b"<?xml version='1.0' encoding=", data)
+ self.assertIn(expected, data)
def test_write_to_text_file(self):
self.addCleanup(os_helper.unlink, TESTFN)
- tree = ET.ElementTree(ET.XML('''<site />'''))
+ tree = ET.ElementTree(ET.XML('''<site>\xf8</site>'''))
with open(TESTFN, 'w', encoding='utf-8') as f:
tree.write(f, encoding='unicode')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
- self.assertEqual(f.read(), b'''<site />''')
+ self.assertEqual(f.read(), b'''<site>\xc3\xb8</site>''')
+
+ with open(TESTFN, 'w', encoding='ascii', errors='xmlcharrefreplace') as f:
+ tree.write(f, encoding='unicode')
+ self.assertFalse(f.closed)
+ with open(TESTFN, 'rb') as f:
+ self.assertEqual(f.read(), convlinesep(
+ b'''<?xml version='1.0' encoding='ascii'?>\n'''
+ b'''<site>ø</site>'''))
+
+ with open(TESTFN, 'w', encoding='ISO-8859-1') as f:
+ tree.write(f, encoding='unicode')
+ self.assertFalse(f.closed)
+ with open(TESTFN, 'rb') as f:
+ self.assertEqual(f.read(), convlinesep(
+ b'''<?xml version='1.0' encoding='ISO-8859-1'?>\n'''
+ b'''<site>\xf8</site>'''))
def test_write_to_binary_file(self):
self.addCleanup(os_helper.unlink, TESTFN)
- tree = ET.ElementTree(ET.XML('''<site />'''))
+ tree = ET.ElementTree(ET.XML('''<site>\xf8</site>'''))
with open(TESTFN, 'wb') as f:
tree.write(f)
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
- self.assertEqual(f.read(), b'''<site />''')
+ self.assertEqual(f.read(), b'''<site>ø</site>''')
+
+ def test_write_to_binary_file_with_encoding(self):
+ self.addCleanup(os_helper.unlink, TESTFN)
+ tree = ET.ElementTree(ET.XML('''<site>\xf8</site>'''))
+ with open(TESTFN, 'wb') as f:
+ tree.write(f, encoding='utf-8')
+ self.assertFalse(f.closed)
+ with open(TESTFN, 'rb') as f:
+ self.assertEqual(f.read(), b'''<site>\xc3\xb8</site>''')
+
+ with open(TESTFN, 'wb') as f:
+ tree.write(f, encoding='ISO-8859-1')
+ self.assertFalse(f.closed)
+ with open(TESTFN, 'rb') as f:
+ self.assertEqual(f.read(),
+ b'''<?xml version='1.0' encoding='ISO-8859-1'?>\n'''
+ b'''<site>\xf8</site>''')
def test_write_to_binary_file_with_bom(self):
self.addCleanup(os_helper.unlink, TESTFN)
- tree = ET.ElementTree(ET.XML('''<site />'''))
+ tree = ET.ElementTree(ET.XML('''<site>\xf8</site>'''))
# test BOM writing to buffered file
with open(TESTFN, 'wb') as f:
tree.write(f, encoding='utf-16')
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
- '''<site />'''.encode("utf-16"))
+ '''<site>\xf8</site>'''.encode("utf-16"))
# test BOM writing to non-buffered file
with open(TESTFN, 'wb', buffering=0) as f:
tree.write(f, encoding='utf-16')
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
- '''<site />'''.encode("utf-16"))
+ '''<site>\xf8</site>'''.encode("utf-16"))
def test_read_from_stringio(self):
tree = ET.ElementTree()
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_stringio(self):
- tree = ET.ElementTree(ET.XML('''<site />'''))
+ tree = ET.ElementTree(ET.XML('''<site>\xf8</site>'''))
stream = io.StringIO()
tree.write(stream, encoding='unicode')
- self.assertEqual(stream.getvalue(), '''<site />''')
+ self.assertEqual(stream.getvalue(), '''<site>\xf8</site>''')
def test_read_from_bytesio(self):
tree = ET.ElementTree()
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_bytesio(self):
- tree = ET.ElementTree(ET.XML('''<site />'''))
+ tree = ET.ElementTree(ET.XML('''<site>\xf8</site>'''))
raw = io.BytesIO()
tree.write(raw)
- self.assertEqual(raw.getvalue(), b'''<site />''')
+ self.assertEqual(raw.getvalue(), b'''<site>ø</site>''')
class dummy:
pass
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_user_text_writer(self):
- tree = ET.ElementTree(ET.XML('''<site />'''))
+ tree = ET.ElementTree(ET.XML('''<site>\xf8</site>'''))
stream = io.StringIO()
writer = self.dummy()
writer.write = stream.write
tree.write(writer, encoding='unicode')
- self.assertEqual(stream.getvalue(), '''<site />''')
+ self.assertEqual(stream.getvalue(), '''<site>\xf8</site>''')
def test_read_from_user_binary_reader(self):
raw = io.BytesIO(b'''<?xml version="1.0"?><site></site>''')
tree = ET.ElementTree()
def test_write_to_user_binary_writer(self):
- tree = ET.ElementTree(ET.XML('''<site />'''))
+ tree = ET.ElementTree(ET.XML('''<site>\xf8</site>'''))
raw = io.BytesIO()
writer = self.dummy()
writer.write = raw.write
tree.write(writer)
- self.assertEqual(raw.getvalue(), b'''<site />''')
+ self.assertEqual(raw.getvalue(), b'''<site>ø</site>''')
def test_write_to_user_binary_writer_with_bom(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
fp.write("short file")
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN)
+ def test_negative_central_directory_offset_raises_BadZipFile(self):
+ # Zip file containing an empty EOCD record
+ buffer = bytearray(b'PK\x05\x06' + b'\0'*18)
+
+ # Set the size of the central directory bytes to become 1,
+ # causing the central directory offset to become negative
+ for dirsize in 1, 2**32-1:
+ buffer[12:16] = struct.pack('<L', dirsize)
+ f = io.BytesIO(buffer)
+ self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, f)
+
def test_closed_zip_raises_ValueError(self):
"""Verify that testzip() doesn't swallow inappropriate exceptions."""
data = io.BytesIO()
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
- all_waiters = self._waiters
- waiters_to_notify = _deque(_islice(all_waiters, n))
- if not waiters_to_notify:
- return
- for waiter in waiters_to_notify:
- waiter.release()
+ waiters = self._waiters
+ while waiters and n > 0:
+ waiter = waiters[0]
+ try:
+ waiter.release()
+ except RuntimeError:
+ # gh-92530: The previous call of notify() released the lock,
+ # but was interrupted before removing it from the queue.
+ # It can happen if a signal handler raises an exception,
+ # like CTRL+C which raises KeyboardInterrupt.
+ pass
+ else:
+ n -= 1
try:
- all_waiters.remove(waiter)
+ waiters.remove(waiter)
except ValueError:
pass
the bitmap if None is given.
Under Windows, the DEFAULT parameter can be used to set the icon
- for the widget and any descendents that don't have an icon set
+ for the widget and any descendants that don't have an icon set
explicitly. DEFAULT can be the relative path to a .ico file
(example: root.iconbitmap(default='myicon.ico') ). See Tk
documentation for more information."""
_default_root = None
def readprofile(self, baseName, className):
- """Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into
- the Tcl Interpreter and calls exec on the contents of BASENAME.py and
- CLASSNAME.py if such a file exists in the home directory."""
+ """Internal function. It reads .BASENAME.tcl and .CLASSNAME.tcl into
+ the Tcl Interpreter and calls exec on the contents of .BASENAME.py and
+ .CLASSNAME.py if such a file exists in the home directory."""
import os
if 'HOME' in os.environ: home = os.environ['HOME']
else: home = os.curdir
# shutdown asyncgens
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
+ # Prevent our executor environment from leaking to future tests.
+ loop.run_until_complete(loop.shutdown_default_executor())
asyncio.set_event_loop(None)
loop.close()
# Returned as Unicode but problems if not converted to ASCII
proxyServer = str(winreg.QueryValueEx(internetSettings,
'ProxyServer')[0])
- if '=' in proxyServer:
- # Per-protocol settings
- for p in proxyServer.split(';'):
- protocol, address = p.split('=', 1)
- # See if address has a type:// prefix
- if not re.match('(?:[^/:]+)://', address):
- address = '%s://%s' % (protocol, address)
- proxies[protocol] = address
- else:
- # Use one setting for all protocols
- if proxyServer[:5] == 'http:':
- proxies['http'] = proxyServer
- else:
- proxies['http'] = 'http://%s' % proxyServer
- proxies['https'] = 'https://%s' % proxyServer
- proxies['ftp'] = 'ftp://%s' % proxyServer
+ if '=' not in proxyServer and ';' not in proxyServer:
+ # Use one setting for all protocols.
+ proxyServer = 'http={0};https={0};ftp={0}'.format(proxyServer)
+ for p in proxyServer.split(';'):
+ protocol, address = p.split('=', 1)
+ # See if address has a type:// prefix
+ if not re.match('(?:[^/:]+)://', address):
+ # Add type:// prefix to address without specifying type
+ if protocol in ('http', 'https', 'ftp'):
+ # The default proxy type of Windows is HTTP
+ address = 'http://' + address
+ elif protocol == 'socks':
+ address = 'socks://' + address
+ proxies[protocol] = address
+ # Use SOCKS proxy for HTTP(S) protocols
+ if proxies.get('socks'):
+ # The default SOCKS proxy type of Windows is SOCKS4
+ address = re.sub(r'^socks://', 'socks4://', proxies['socks'])
+ proxies['http'] = proxies.get('http') or address
+ proxies['https'] = proxies.get('https') or address
internetSettings.Close()
except (OSError, ValueError, TypeError):
# Either registry key not found etc, or the value in an
def prepare_predicate(next, token):
# FIXME: replace with real parser!!! refs:
- # http://effbot.org/zone/simple-iterator-parser.htm
# http://javascript.crockford.com/tdop/tdop.html
signature = []
predicate = []
encoding = "utf-8"
else:
encoding = "us-ascii"
- enc_lower = encoding.lower()
- with _get_writer(file_or_filename, enc_lower) as write:
+ with _get_writer(file_or_filename, encoding) as (write, declared_encoding):
if method == "xml" and (xml_declaration or
(xml_declaration is None and
- enc_lower not in ("utf-8", "us-ascii", "unicode"))):
- declared_encoding = encoding
- if enc_lower == "unicode":
- # Retrieve the default encoding for the xml declaration
- import locale
- declared_encoding = locale.getpreferredencoding()
+ declared_encoding.lower() not in ("utf-8", "us-ascii"))):
write("<?xml version='1.0' encoding='%s'?>\n" % (
declared_encoding,))
if method == "text":
write = file_or_filename.write
except AttributeError:
# file_or_filename is a file name
- if encoding == "unicode":
- file = open(file_or_filename, "w")
+ if encoding.lower() == "unicode":
+ file = open(file_or_filename, "w",
+ errors="xmlcharrefreplace")
else:
file = open(file_or_filename, "w", encoding=encoding,
errors="xmlcharrefreplace")
with file:
- yield file.write
+ yield file.write, file.encoding
else:
# file_or_filename is a file-like object
# encoding determines if it is a text or binary writer
- if encoding == "unicode":
+ if encoding.lower() == "unicode":
# use a text writer as is
- yield write
+ yield write, getattr(file_or_filename, "encoding", None) or "utf-8"
else:
# wrap a binary writer with TextIOWrapper
with contextlib.ExitStack() as stack:
# Keep the original file open when the TextIOWrapper is
# destroyed
stack.callback(file.detach)
- yield file.write
+ yield file.write, encoding
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
+ if self.start_dir < 0:
+ raise BadZipFile("Bad offset for central directory")
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
Xuanji Li
Zekun Li
Zheao Li
+Eli Libman
Dan Lidral-Porter
Robert van Liere
Ross Light
Piet van Oostrum
Tomas Oppelstrup
Jason Orendorff
+Yan "yyyyyyyan" Orestes
Bastien Orivel
orlnub123
Douglas Orr
Claude Paroz
Heikki Partanen
Harri Pasanen
+Jeremiah Gabriel Pascual
Gaël Pasgrimaud
Feanil Patel
Ashish Nitin Patil
David Steele
Oliver Steele
Greg Stein
+Itai Steinherz
Marek Stepniowski
Baruch Sterin
Chris Stern
Kevin Walzer
Rodrigo Steinmuller Wanderley
Dingyuan Wang
+Jiahua Wang
Ke Wang
Liang-Bo Wang
Greg Ward
Python News
+++++++++++
+What's New in Python 3.10.5 final?
+==================================
+
+*Release date: 2022-06-06*
+
+Core and Builtins
+-----------------
+
+- gh-issue-93418: Fixed an assert where an f-string has an equal sign '='
+ following an expression, but there's no trailing brace. For example,
+ f"{i=".
+
+- gh-issue-91924: Fix ``__ltrace__`` debug feature if the stdout encoding is
+ not UTF-8. Patch by Victor Stinner.
+
+- gh-issue-93061: Backward jumps after ``async for`` loops are no longer
+ given dubious line numbers.
+
+- gh-issue-93065: Fix contextvars HAMT implementation to handle iteration
+ over deep trees.
+
+ The bug was discovered and fixed by Eli Libman. See
+ `MagicStack/immutables#84
+ <https://github.com/MagicStack/immutables/issues/84>`_ for more details.
+
+- gh-issue-92311: Fixed a bug where setting ``frame.f_lineno`` to jump over
+ a list comprehension could misbehave or crash.
+
+- gh-issue-92112: Fix crash triggered by an evil custom ``mro()`` on a
+ metaclass.
+
+- gh-issue-92036: Fix a crash in subinterpreters related to the garbage
+ collector. When a subinterpreter is deleted, untrack all objects tracked
+ by its GC. To prevent a crash in deallocator functions expecting objects
+ to be tracked by the GC, leak a strong reference to these objects on
+ purpose, so they are never deleted and their deallocator functions are not
+ called. Patch by Victor Stinner.
+
+- gh-issue-91421: Fix a potential integer overflow in _Py_DecodeUTF8Ex.
+
+- bpo-47212: Raise :exc:`IndentationError` instead of :exc:`SyntaxError` for
+ a bare ``except`` with no following indent. Improve :exc:`SyntaxError`
+ locations for an un-parenthesized generator used as arguments. Patch by
+ Matthieu Dartiailh.
+
+- bpo-47182: Fix a crash when using a named unicode character like
+ ``"\N{digit nine}"`` after the main interpreter has been initialized a
+ second time.
+
+- bpo-46775: Some Windows system error codes(>= 10000) are now mapped into
+ the correct errno and may now raise a subclass of :exc:`OSError`. Patch by
+ Dong-hee Na.
+
+- bpo-47117: Fix a crash if we fail to decode characters in interactive mode
+ if the tokenizer buffers are uninitialized. Patch by Pablo Galindo.
+
+- bpo-39829: Removed the ``__len__()`` call when initializing a list and
+ moved initializing to ``list_extend``. Patch by Jeremiah Pascual.
+
+- bpo-46962: Classes and functions that unconditionally declared their
+ docstrings ignoring the `--without-doc-strings` compilation flag no longer
+ do so.
+
+ The classes affected are :class:`ctypes.UnionType`,
+ :class:`pickle.PickleBuffer`, :class:`testcapi.RecursingInfinitelyError`,
+ and :class:`types.GenericAlias`.
+
+ The functions affected are 24 methods in :mod:`ctypes`.
+
+ Patch by Oleg Iarygin.
+
+- bpo-36819: Fix crashes in built-in encoders with error handlers that
+ return position less or equal than the starting position of non-encodable
+ characters.
+
+Library
+-------
+
+- gh-issue-93156: Accessing the :attr:`pathlib.PurePath.parents` sequence of
+ an absolute path using negative index values produced incorrect results.
+
+- gh-issue-89973: Fix :exc:`re.error` raised in :mod:`fnmatch` if the
+ pattern contains a character range with upper bound lower than lower bound
+ (e.g. ``[c-a]``). Now such ranges are interpreted as empty ranges.
+
+- gh-issue-93010: In a very special case, the email package tried to append
+ the nonexistent ``InvalidHeaderError`` to the defect list. It should have
+ been ``InvalidHeaderDefect``.
+
+- gh-issue-92839: Fixed crash resulting from calling bisect.insort() or
+ bisect.insort_left() with the key argument not equal to None.
+
+- gh-issue-91581: :meth:`~datetime.datetime.utcfromtimestamp` no longer
+ attempts to resolve ``fold`` in the pure Python implementation, since the
+ fold is never 1 in UTC. In addition to being slightly faster in the common
+ case, this also prevents some errors when the timestamp is close to
+ :attr:`datetime.min <datetime.datetime.min>`. Patch by Paul Ganssle.
+
+- gh-issue-92530: Fix an issue that occurred after interrupting
+ :func:`threading.Condition.notify`.
+
+- gh-issue-92049: Forbid pickling constants ``re._constants.SUCCESS`` etc.
+ Previously, pickling did not fail, but the result could not be unpickled.
+
+- bpo-47029: Always close the read end of the pipe used by
+ :class:`multiprocessing.Queue` *after* the last write of buffered data to
+ the write end of the pipe to avoid :exc:`BrokenPipeError` at garbage
+ collection and at :meth:`multiprocessing.Queue.close` calls. Patch by Géry
+ Ogam.
+
+- gh-issue-91401: Provide a fail-safe way to disable :mod:`subprocess` use
+ of ``vfork()`` via a private ``subprocess._USE_VFORK`` attribute. While
+ there is currently no known need for this, if you find a need please only
+ set it to ``False``. File a CPython issue as to why you needed it and link
+ to that from a comment in your code. This attribute is documented as a
+ footnote in 3.11.
+
+- gh-issue-91910: Add missing f prefix to f-strings in error messages from
+ the :mod:`multiprocessing` and :mod:`asyncio` modules.
+
+- gh-issue-91810: :class:`~xml.etree.ElementTree.ElementTree` method
+ :meth:`~xml.etree.ElementTree.ElementTree.write` and function
+ :func:`~xml.etree.ElementTree.tostring` now use the text file's encoding
+ ("UTF-8" if not available) instead of locale encoding in XML declaration
+ when ``encoding="unicode"`` is specified.
+
+- gh-issue-91832: Add ``required`` attribute to :class:`argparse.Action`
+ repr output.
+
+- gh-issue-91734: Fix OSS audio support on Solaris.
+
+- gh-issue-91700: Compilation of regular expression containing a conditional
+ expression ``(?(group)...)`` now raises an appropriate :exc:`re.error` if
+ the group number refers to not defined group. Previously an internal
+ RuntimeError was raised.
+
+- gh-issue-91676: Fix :class:`unittest.IsolatedAsyncioTestCase` to shutdown
+ the per test event loop executor before returning from its ``run`` method
+ so that a not yet stopped or garbage collected executor state does not
+ persist beyond the test.
+
+- gh-issue-90568: Parsing ``\N`` escapes of Unicode Named Character
+ Sequences in a :mod:`regular expression <re>` raises now :exc:`re.error`
+ instead of ``TypeError``.
+
+- gh-issue-91595: Fix the comparison of character and integer inside
+ :func:`Tools.gdb.libpython.write_repr`. Patch by Yu Liu.
+
+- gh-issue-90622: Worker processes for
+ :class:`concurrent.futures.ProcessPoolExecutor` are no longer spawned on
+ demand (a feature added in 3.9) when the multiprocessing context start
+ method is ``"fork"`` as that can lead to deadlocks in the child processes
+ due to a fork happening while threads are running.
+
+- gh-issue-91575: Update case-insensitive matching in the :mod:`re` module
+ to the latest Unicode version.
+
+- gh-issue-91581: Remove an unhandled error case in the C implementation of
+ calls to :meth:`datetime.fromtimestamp <datetime.datetime.fromtimestamp>`
+ with no time zone (i.e. getting a local time from an epoch timestamp).
+ This should have no user-facing effect other than giving a possibly more
+ accurate error message when called with timestamps that fall on
+ 10000-01-01 in the local time. Patch by Paul Ganssle.
+
+- bpo-47260: Fix ``os.closerange()`` potentially being a no-op in a Linux
+ seccomp sandbox.
+
+- bpo-39064: :class:`zipfile.ZipFile` now raises :exc:`zipfile.BadZipFile`
+ instead of ``ValueError`` when reading a corrupt zip file in which the
+ central directory offset is negative.
+
+- bpo-47151: When subprocess tries to use vfork, it now falls back to fork
+ if vfork returns an error. This allows use in situations where vfork isn't
+ allowed by the OS kernel.
+
+- bpo-27929: Fix :meth:`asyncio.loop.sock_connect` to only resolve names for
+ :const:`socket.AF_INET` or :const:`socket.AF_INET6` families. Resolution
+ may not make sense for other families, like :const:`socket.AF_BLUETOOTH`
+ and :const:`socket.AF_UNIX`.
+
+- bpo-43323: Fix errors in the :mod:`email` module if the charset itself
+ contains undecodable/unencodable characters.
+
+- bpo-47101: :const:`hashlib.algorithms_available` now lists only algorithms
+ that are provided by activated crypto providers on OpenSSL 3.0. Legacy
+ algorithms are not listed unless the legacy provider has been loaded into
+ the default OSSL context.
+
+- bpo-46787: Fix :class:`concurrent.futures.ProcessPoolExecutor` exception
+ memory leak
+
+- bpo-45393: Fix the formatting for ``await x`` and ``not x`` in the
+ operator precedence table when using the :func:`help` system.
+
+- bpo-46415: Fix ipaddress.ip_{address,interface,network} raising TypeError
+ instead of ValueError if given invalid tuple as address parameter.
+
+- bpo-28249: Set :attr:`doctest.DocTest.lineno` to ``None`` when object does
+ not have :attr:`__doc__`.
+
+- bpo-45138: Fix a regression in the :mod:`sqlite3` trace callback where
+ bound parameters were not expanded in the passed statement string. The
+ regression was introduced in Python 3.10 by :issue:`40318`. Patch by
+ Erlend E. Aasland.
+
+- bpo-44493: Add missing terminated NUL in sockaddr_un's length
+
+ This was potentially observable when using non-abstract AF_UNIX datagram
+ sockets to processes written in another programming language.
+
+- bpo-42627: Fix incorrect parsing of Windows registry proxy settings
+
+- bpo-36073: Raise :exc:`~sqlite3.ProgrammingError` instead of segfaulting
+ on recursive usage of cursors in :mod:`sqlite3` converters. Patch by
+ Sergey Fedoseev.
+
+Documentation
+-------------
+
+- gh-issue-86438: Clarify that :option:`-W` and :envvar:`PYTHONWARNINGS` are
+ matched literally and case-insensitively, rather than as regular
+ expressions, in :mod:`warnings`.
+
+- gh-issue-92240: Added release dates for "What's New in Python 3.X" for
+ 3.0, 3.1, 3.2, 3.8 and 3.10
+
+- gh-issue-91888: Add a new ``gh`` role to the documentation to link to
+ GitHub issues.
+
+- gh-issue-91783: Document security issues concerning the use of the
+ function :meth:`shutil.unpack_archive`
+
+- gh-issue-91547: Remove "Undocumented modules" page.
+
+- bpo-44347: Clarify the meaning of *dirs_exist_ok*, a kwarg of
+ :func:`shutil.copytree`.
+
+- bpo-38668: Update the introduction to documentation for :mod:`os.path` to
+ remove warnings that became irrelevant after the implementations of
+ :pep:`383` and :pep:`529`.
+
+- bpo-47138: Pin Jinja to a version compatible with Sphinx version 3.2.1.
+
+- bpo-46962: All docstrings in code snippets are now wrapped into
+ :func:`PyDoc_STR` to follow the guideline of `PEP 7's Documentation
+ Strings paragraph
+ <https://www.python.org/dev/peps/pep-0007/#documentation-strings>`_. Patch
+ by Oleg Iarygin.
+
+- bpo-26792: Improve the docstrings of :func:`runpy.run_module` and
+ :func:`runpy.run_path`. Original patch by Andrew Brezovsky.
+
+- bpo-40838: Document that :func:`inspect.getdoc`,
+ :func:`inspect.getmodule`, and :func:`inspect.getsourcefile` might return
+ ``None``.
+
+- bpo-45790: Adjust inaccurate phrasing in
+ :doc:`../extending/newtypes_tutorial` about the ``ob_base`` field and the
+ macros used to access its contents.
+
+- bpo-42340: Document that in some circumstances :exc:`KeyboardInterrupt`
+ may cause the code to enter an inconsistent state. Provided a sample
+ workaround to avoid it if needed.
+
+- bpo-41233: Link the errnos referenced in ``Doc/library/exceptions.rst`` to
+ their respective section in ``Doc/library/errno.rst``, and vice versa.
+ Previously this was only done for EINTR and InterruptedError. Patch by Yan
+ "yyyyyyyan" Orestes.
+
+- bpo-38056: Overhaul the :ref:`error-handlers` documentation in
+ :mod:`codecs`.
+
+- bpo-13553: Document tkinter.Tk args.
+
+Tests
+-----
+
+- gh-issue-92886: Fixing tests that fail when running with optimizations
+ (``-O``) in ``test_imaplib.py``.
+
+- gh-issue-92670: Skip
+ ``test_shutil.TestCopy.test_copyfile_nonexistent_dir`` test on AIX as the
+ test uses a trailing slash to force the OS consider the path as a
+ directory, but on AIX the trailing slash has no effect and is considered
+ as a file.
+
+- gh-issue-91904: Fix initialization of
+ :envvar:`PYTHONREGRTEST_UNICODE_GUARD` which prevented running regression
+ tests on non-UTF-8 locale.
+
+- gh-issue-91607: Fix ``test_concurrent_futures`` to test the correct
+ multiprocessing start method context in several cases where the test logic
+ mixed this up.
+
+- bpo-47205: Skip test for :func:`~os.sched_getaffinity` and
+ :func:`~os.sched_setaffinity` error case on FreeBSD.
+
+- bpo-47104: Rewrite :func:`asyncio.to_thread` tests to use
+ :class:`unittest.IsolatedAsyncioTestCase`.
+
+- bpo-29890: Add tests for :class:`ipaddress.IPv4Interface` and
+ :class:`ipaddress.IPv6Interface` construction with tuple arguments.
+ Original patch and tests by louisom.
+
+Build
+-----
+
+- bpo-47103: Windows ``PGInstrument`` builds now copy a required DLL into
+ the output directory, making it easier to run the profile stage of a PGO
+ build.
+
+Windows
+-------
+
+- gh-issue-92984: Explicitly disable incremental linking for non-Debug
+ builds
+
+- bpo-47194: Update ``zlib`` to v1.2.12 to resolve CVE-2018-25032.
+
+- bpo-46785: Fix race condition between :func:`os.stat` and unlinking a file
+ on Windows, by using errors codes returned by ``FindFirstFileW()`` when
+ appropriate in ``win32_xstat_impl``.
+
+- bpo-40859: Update Windows build to use xz-5.2.5
+
+Tools/Demos
+-----------
+
+- gh-issue-91583: Fix regression in the code generated by Argument Clinic
+ for functions with the ``defining_class`` parameter.
+
+
What's New in Python 3.10.4 final?
==================================
Patch by Kyungmin Lee.
- bpo-42135: Fix typo: ``importlib.find_loader`` is really slated for
- removal in Python 3.12 not 3.10, like the others in GH-25169.
+ removal in Python 3.12 not 3.10, like the others in PR 25169.
Patch by Hugo van Kemenade.
discovery. Flagged use of dict result from ``entry_points()`` as
deprecated.
-- bpo-47383: The ``P.args`` and ``P.kwargs`` attributes of
- :class:`typing.ParamSpec` are now instances of the new classes
- :class:`typing.ParamSpecArgs` and :class:`typing.ParamSpecKwargs`, which
- enables a more useful ``repr()``. Patch by Jelle Zijlstra.
+- The ``P.args`` and ``P.kwargs`` attributes of :class:`typing.ParamSpec`
+ are now instances of the new classes :class:`typing.ParamSpecArgs` and
+ :class:`typing.ParamSpecKwargs`, which enables a more useful ``repr()``.
+ Patch by Jelle Zijlstra.
- bpo-43731: Add an ``encoding`` parameter :func:`logging.fileConfig()`.
- bpo-43752: Fix :mod:`sqlite3` regression for zero-sized blobs with
converters, where ``b""`` was returned instead of ``None``. The regression
- was introduced by GH-24723. Patch by Erlend E. Aasland.
+ was introduced by PR 24723. Patch by Erlend E. Aasland.
- bpo-43655: :mod:`tkinter` dialog windows are now recognized as dialogs by
window managers on macOS and X Window.
``SQLITE_NOMEM``, :exc:`MemoryError` is now raised. Patch by Erlend E.
Aasland.
-- bpo-43368: Fix a regression introduced in GH-24562, where an empty
+- bpo-43368: Fix a regression introduced in PR 24562, where an empty
bytestring was fetched as ``None`` instead of ``b''`` in :mod:`sqlite3`.
Patch by Mariusz Felisiak.
by Erlend E. Aasland.
- bpo-40956: Fix segfault in :meth:`sqlite3.Connection.backup` if no
- argument was provided. The regression was introduced by GH-23838. Patch by
+ argument was provided. The regression was introduced by PR 23838. Patch by
Erlend E. Aasland.
- bpo-43172: The readline module now passes its tests when built directly
- bpo-38417: Added support for setting the umask in the child process to the
subprocess module on POSIX systems.
-- bpo-38449: Revert GH-15522, which introduces a regression in
+- bpo-38449: Revert PR 15522, which introduces a regression in
:meth:`mimetypes.guess_type` due to improper handling of filenames as
urls.
the decoding. Based on patch by c-fos.
- bpo-33604: Remove HMAC default to md5 marked for removal in 3.8 (removal
- originally planned in 3.6, bump to 3.8 in gh-7062).
+ originally planned in 3.6, bump to 3.8 in PR 7062).
- bpo-33582: Emit a deprecation warning for inspect.formatargspec
when getting the file size. Fixed hang of all threads with inaccessible
NFS server. Patch by Nir Soffer.
-- bpo-321010: Add :attr:`sys.flags.dev_mode` flag
+- bpo-32101: Add :attr:`sys.flags.dev_mode` flag
- bpo-32154: The ``asyncio.windows_utils.socketpair()`` function has been
removed: use directly :func:`socket.socketpair` which is available on all
*Release date: 2017-06-17*
+Security
+--------
+
+- bpo-29591: Update expat copy from 2.1.1 to 2.2.0 to get fixes of
+ CVE-2016-0718 and CVE-2016-4472. See
+ https://sourceforge.net/p/expat/bugs/537/ for more information.
+
Core and Builtins
-----------------
- bpo-30605: re.compile() no longer raises a BytesWarning when compiling a
bytes instance with misplaced inline modifier. Patch by Roy Williams.
-Security
---------
-
-- bpo-29591: Update expat copy from 2.1.1 to 2.2.0 to get fixes of
- CVE-2016-0718 and CVE-2016-4472. See
- https://sourceforge.net/p/expat/bugs/537/ for more information.
-
-Library
--------
-
- bpo-24484: Avoid race condition in multiprocessing cleanup (#2159)
- bpo-28994: The traceback no longer displayed for SystemExit raised in a
*Release date: 2016-07-11*
+Security
+--------
+
+- bpo-27278: Fix os.urandom() implementation using getrandom() on Linux.
+ Truncate size to INT_MAX and loop until we collected enough random bytes,
+ instead of casting a directly Py_ssize_t to int.
+
+- bpo-22636: Avoid shell injection problems with ctypes.util.find_library().
+
Core and Builtins
-----------------
let the new chained one through. This avoids the :pep:`479` bug described
in issue25782.
-Security
---------
-
-- bpo-27278: Fix os.urandom() implementation using getrandom() on Linux.
- Truncate size to INT_MAX and loop until we collected enough random bytes,
- instead of casting a directly Py_ssize_t to int.
-
-Library
--------
-
- bpo-16864: sqlite3.Cursor.lastrowid now supports REPLACE statement.
Initial patch by Alex LordThorsen.
- bpo-8637: Honor a pager set by the env var MANPAGER (in preference to one
set by the env var PAGER).
-Security
---------
-
-- bpo-22636: Avoid shell injection problems with ctypes.util.find_library().
-
-Library
--------
-
- bpo-16182: Fix various functions in the "readline" module to use the
locale encoding, and fix get_begidx() and get_endidx() to return code
point indexes.
*Release date: 2016-06-13*
+Security
+--------
+
+- bpo-26556: Update expat to 2.1.1, fixes CVE-2015-1283.
+
+- Fix TLS stripping vulnerability in smtplib, CVE-2016-0772. Reported by
+ Team Oststrom.
+
+- bpo-26839: On Linux, :func:`os.urandom` now calls ``getrandom()`` with
+ ``GRND_NONBLOCK`` to fall back on reading ``/dev/urandom`` if the urandom
+ entropy pool is not initialized yet. Patch written by Colm Buckley.
+
Core and Builtins
-----------------
- bpo-20508: Improve exception message of IPv{4,6}Network.__getitem__. Patch
by Gareth Rees.
-Security
---------
-
-- bpo-26556: Update expat to 2.1.1, fixes CVE-2015-1283.
-
-- Fix TLS stripping vulnerability in smtplib, CVE-2016-0772. Reported by
- Team Oststrom.
-
-Library
--------
-
- bpo-21386: Implement missing IPv4Address.is_global property. It was
documented since 07a5610bae9d. Initial patch by Roger Luethi.
- bpo-21313: Fix the "platform" module to tolerate when sys.version contains
truncated build information.
-Security
---------
-
-- bpo-26839: On Linux, :func:`os.urandom` now calls ``getrandom()`` with
- ``GRND_NONBLOCK`` to fall back on reading ``/dev/urandom`` if the urandom
- entropy pool is not initialized yet. Patch written by Colm Buckley.
-
-Library
--------
-
- bpo-23883: Added missing APIs to __all__ to match the documented APIs for
the following modules: cgi, mailbox, mimetypes, plistlib and smtpd.
Patches by Jacek Kołodziej.
launcher ``py.exe`` no longer prefers an installed Python 2 version over
Python 3 by default when used interactively.
+- bpo-17500: Remove unused and outdated icons. (See also:
+ https://github.com/python/pythondotorg/issues/945)
+
Build
-----
- bpo-26930: Update OS X 10.5+ 32-bit-only installer to build and link with
OpenSSL 1.0.2h.
-Windows
--------
-
-- bpo-17500: Remove unused and outdated icons. (See also:
- https://github.com/python/pythondotorg/issues/945)
-
C API
-----
*Release date: 2016-05-16*
+Security
+--------
+
+- bpo-26657: Fix directory traversal vulnerability with http.server on
+ Windows. This fixes a regression that was introduced in 3.3.4rc1 and
+ 3.4.0rc1. Based on patch by Philipp Hagemeister.
+
+- bpo-26313: ssl.py _load_windows_store_certs fails if windows cert store is
+ empty. Patch by Baji.
+
+- bpo-25939: On Windows open the cert store readonly in
+ ssl.enum_certificates.
+
Core and Builtins
-----------------
- bpo-24838: tarfile's ustar and gnu formats now correctly calculate name
and link field limits for multibyte character encodings like utf-8.
-Security
---------
-
-- bpo-26657: Fix directory traversal vulnerability with http.server on
- Windows. This fixes a regression that was introduced in 3.3.4rc1 and
- 3.4.0rc1. Based on patch by Philipp Hagemeister.
-
-Library
--------
-
- bpo-26717: Stop encoding Latin-1-ized WSGI paths with UTF-8. Patch by
Anthony Sottile.
:class:`warnings.WarningMessage`. Add warnings._showwarnmsg() which uses
tracemalloc to get the traceback where source object was allocated.
-Security
---------
-
-- bpo-26313: ssl.py _load_windows_store_certs fails if windows cert store is
- empty. Patch by Baji.
-
-Library
--------
-
- bpo-26569: Fix :func:`pyclbr.readmodule` and :func:`pyclbr.readmodule_ex`
to support importing packages.
trigger the handle_error() method, and will now to stop a single-threaded
server.
-Security
---------
-
-- bpo-25939: On Windows open the cert store readonly in
- ssl.enum_certificates.
-
-Library
--------
-
- bpo-25995: os.walk() no longer uses FDs proportional to the tree depth.
- bpo-25994: Added the close() method and the support of the context manager
*Release date: 2017-01-02*
+Security
+--------
+
+- bpo-27278: Fix os.urandom() implementation using getrandom() on Linux.
+ Truncate size to INT_MAX and loop until we collected enough random bytes,
+ instead of casting a directly Py_ssize_t to int.
+
+- bpo-22636: Avoid shell injection problems with ctypes.util.find_library().
+
Core and Builtins
-----------------
let the new chained one through. This avoids the :pep:`479` bug described
in issue25782.
-Security
---------
-
-- bpo-27278: Fix os.urandom() implementation using getrandom() on Linux.
- Truncate size to INT_MAX and loop until we collected enough random bytes,
- instead of casting a directly Py_ssize_t to int.
-
-Library
--------
-
- bpo-26386: Fixed ttk.TreeView selection operations with item id's
containing spaces.
-Security
---------
-
-- bpo-22636: Avoid shell injection problems with ctypes.util.find_library().
-
-Library
--------
-
- bpo-16182: Fix various functions in the "readline" module to use the
locale encoding, and fix get_begidx() and get_endidx() to return code
point indexes.
*Release date: 2016-06-12*
+Security
+--------
+
+- bpo-26556: Update expat to 2.1.1, fixes CVE-2015-1283.
+
+- Fix TLS stripping vulnerability in smtplib, CVE-2016-0772. Reported by
+ Team Oststrom
+
+- bpo-26839: On Linux, :func:`os.urandom` now calls ``getrandom()`` with
+ ``GRND_NONBLOCK`` to fall back on reading ``/dev/urandom`` if the urandom
+ entropy pool is not initialized yet. Patch written by Colm Buckley.
+
+- bpo-26657: Fix directory traversal vulnerability with http.server on
+ Windows. This fixes a regression that was introduced in 3.3.4rc1 and
+ 3.4.0rc1. Based on patch by Philipp Hagemeister.
+
+- bpo-26313: ssl.py _load_windows_store_certs fails if windows cert store is
+ empty. Patch by Baji.
+
+- bpo-25939: On Windows open the cert store readonly in
+ ssl.enum_certificates.
+
Core and Builtins
-----------------
iterator should be returned directly. Doing the former will trigger a
PendingDeprecationWarning.
-Security
---------
-
-- bpo-26556: Update expat to 2.1.1, fixes CVE-2015-1283.
-
-- Fix TLS stripping vulnerability in smtplib, CVE-2016-0772. Reported by
- Team Oststrom
-
Library
-------
- bpo-21313: Fix the "platform" module to tolerate when sys.version contains
truncated build information.
-Security
---------
-
-- bpo-26839: On Linux, :func:`os.urandom` now calls ``getrandom()`` with
- ``GRND_NONBLOCK`` to fall back on reading ``/dev/urandom`` if the urandom
- entropy pool is not initialized yet. Patch written by Colm Buckley.
-
-Library
--------
-
- bpo-27164: In the zlib module, allow decompressing raw Deflate streams
with a predefined zdict. Based on patch by Xiang Zhang.
- bpo-24838: tarfile's ustar and gnu formats now correctly calculate name
and link field limits for multibyte character encodings like utf-8.
-Security
---------
-
-- bpo-26657: Fix directory traversal vulnerability with http.server on
- Windows. This fixes a regression that was introduced in 3.3.4rc1 and
- 3.4.0rc1. Based on patch by Philipp Hagemeister.
-
-Library
--------
-
- bpo-26717: Stop encoding Latin-1-ized WSGI paths with UTF-8. Patch by
Anthony Sottile.
- bpo-26560: Avoid potential ValueError in BaseHandler.start_response.
Initial patch by Peter Inglesby.
-Security
---------
-
-- bpo-26313: ssl.py _load_windows_store_certs fails if windows cert store is
- empty. Patch by Baji.
-
-Library
--------
-
- bpo-26569: Fix :func:`pyclbr.readmodule` and :func:`pyclbr.readmodule_ex`
to support importing packages.
the connected socket) when verify_request() returns false. Patch by Aviv
Palivoda.
-Security
---------
-
-- bpo-25939: On Windows open the cert store readonly in
- ssl.enum_certificates.
-
-Library
--------
-
- bpo-25995: os.walk() no longer uses FDs proportional to the tree depth.
- bpo-26117: The os.scandir() iterator now closes file descriptor not only
- bpo-26065: Excludes venv from library when generating embeddable distro.
+- bpo-17500: Remove unused and outdated icons. (See also:
+ https://github.com/python/pythondotorg/issues/945)
+
Tools/Demos
-----------
- bpo-26316: Fix variable name typo in Argument Clinic.
-Windows
--------
-
-- bpo-17500: Remove unused and outdated icons. (See also:
- https://github.com/python/pythondotorg/issues/945)
-
What's New in Python 3.5.1 final?
=================================
.B \-x
]
[
-[
.B \-X
.I option
]
+[
.B \-?
]
.br
more verbose than the default if the code is correct: new warnings are
only emitted when an issue is detected. Effect of the developer mode:
* Add default warning filter, as -W default
- * Install debug hooks on memory allocators: see the PyMem_SetupDebugHooks() C function
+ * Install debug hooks on memory allocators: see the PyMem_SetupDebugHooks()
+ C function
* Enable the faulthandler module to dump the Python traceback on a crash
* Enable asyncio debug mode
* Set the dev_mode attribute of sys.flags to True
otherwise activate automatically). See PYTHONUTF8 for more details
-X pycache_prefix=PATH: enable writing .pyc files to a parallel tree rooted at the
- given directory instead of to the code tree.
+ given directory instead of to the code tree.
+
+ -X warn_default_encoding: enable opt-in EncodingWarning for 'encoding=None'
+
.TP
.B \-x
Skip the first line of the source. This is intended for a DOS
index = internal_bisect_right(a, x, lo, hi, key);
} else {
key_x = PyObject_CallOneArg(key, x);
- if (x == NULL) {
+ if (key_x == NULL) {
return NULL;
}
index = internal_bisect_right(a, key_x, lo, hi, key);
index = internal_bisect_left(a, x, lo, hi, key);
} else {
key_x = PyObject_CallOneArg(key, x);
- if (x == NULL) {
+ if (key_x == NULL) {
return NULL;
}
index = internal_bisect_left(a, key_x, lo, hi, key);
0, /* tp_as_buffer */
/* XXX should participate in GC? */
Py_TPFLAGS_DEFAULT, /* tp_flags */
- "deletes a key from a dictionary", /* tp_doc */
+ PyDoc_STR("deletes a key from a dictionary"), /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
return StructUnionType_new(type, args, kwds, 0);
}
-static const char from_address_doc[] =
-"C.from_address(integer) -> C instance\naccess a C instance at the specified address";
+PyDoc_STRVAR(from_address_doc,
+"C.from_address(integer) -> C instance\naccess a C instance at the specified address");
static PyObject *
CDataType_from_address(PyObject *type, PyObject *value)
return PyCData_AtAddress(type, buf);
}
-static const char from_buffer_doc[] =
-"C.from_buffer(object, offset=0) -> C instance\ncreate a C instance from a writeable buffer";
+PyDoc_STRVAR(from_buffer_doc,
+"C.from_buffer(object, offset=0) -> C instance\ncreate a C instance from a writeable buffer");
static int
KeepRef(CDataObject *target, Py_ssize_t index, PyObject *keep);
return result;
}
-static const char from_buffer_copy_doc[] =
-"C.from_buffer_copy(object, offset=0) -> C instance\ncreate a C instance from a readable buffer";
+PyDoc_STRVAR(from_buffer_copy_doc,
+"C.from_buffer_copy(object, offset=0) -> C instance\ncreate a C instance from a readable buffer");
static PyObject *
GenericPyCData_new(PyTypeObject *type, PyObject *args, PyObject *kwds);
return result;
}
-static const char in_dll_doc[] =
-"C.in_dll(dll, name) -> C instance\naccess a C instance in a dll";
+PyDoc_STRVAR(in_dll_doc,
+"C.in_dll(dll, name) -> C instance\naccess a C instance in a dll");
static PyObject *
CDataType_in_dll(PyObject *type, PyObject *args)
return PyCData_AtAddress(type, address);
}
-static const char from_param_doc[] =
-"Convert a Python object into a function call parameter.";
+PyDoc_STRVAR(from_param_doc,
+"Convert a Python object into a function call parameter.");
static PyObject *
CDataType_from_param(PyObject *type, PyObject *value)
PyCStructType_setattro, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */
- "metatype for the CData Objects", /* tp_doc */
+ PyDoc_STR("metatype for the CData Objects"), /* tp_doc */
(traverseproc)CDataType_traverse, /* tp_traverse */
(inquiry)CDataType_clear, /* tp_clear */
0, /* tp_richcompare */
UnionType_setattro, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */
- "metatype for the CData Objects", /* tp_doc */
+ PyDoc_STR("metatype for the CData Objects"), /* tp_doc */
(traverseproc)CDataType_traverse, /* tp_traverse */
(inquiry)CDataType_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */
- "metatype for the Pointer Objects", /* tp_doc */
+ PyDoc_STR("metatype for the Pointer Objects"), /* tp_doc */
(traverseproc)CDataType_traverse, /* tp_traverse */
(inquiry)CDataType_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "metatype for the Array Objects", /* tp_doc */
+ PyDoc_STR("metatype for the Array Objects"), /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "metatype for the PyCSimpleType Objects", /* tp_doc */
+ PyDoc_STR("metatype for the PyCSimpleType Objects"), /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */
- "metatype for C function pointers", /* tp_doc */
+ PyDoc_STR("metatype for C function pointers"), /* tp_doc */
(traverseproc)CDataType_traverse, /* tp_traverse */
(inquiry)CDataType_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_setattro */
&PyCData_as_buffer, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "XXX to be provided", /* tp_doc */
+ PyDoc_STR("XXX to be provided"), /* tp_doc */
(traverseproc)PyCData_traverse, /* tp_traverse */
(inquiry)PyCData_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_setattro */
&PyCData_as_buffer, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "Function Pointer", /* tp_doc */
+ PyDoc_STR("Function Pointer"), /* tp_doc */
(traverseproc)PyCFuncPtr_traverse, /* tp_traverse */
(inquiry)PyCFuncPtr_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_setattro */
&PyCData_as_buffer, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "Structure base class", /* tp_doc */
+ PyDoc_STR("Structure base class"), /* tp_doc */
(traverseproc)PyCData_traverse, /* tp_traverse */
(inquiry)PyCData_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_setattro */
&PyCData_as_buffer, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "Union base class", /* tp_doc */
+ PyDoc_STR("Union base class"), /* tp_doc */
(traverseproc)PyCData_traverse, /* tp_traverse */
(inquiry)PyCData_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_setattro */
&PyCData_as_buffer, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "XXX to be provided", /* tp_doc */
+ PyDoc_STR("XXX to be provided"), /* tp_doc */
(traverseproc)PyCData_traverse, /* tp_traverse */
(inquiry)PyCData_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_setattro */
&PyCData_as_buffer, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "XXX to be provided", /* tp_doc */
+ PyDoc_STR("XXX to be provided"), /* tp_doc */
(traverseproc)PyCData_traverse, /* tp_traverse */
(inquiry)PyCData_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_setattro */
&PyCData_as_buffer, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "XXX to be provided", /* tp_doc */
+ PyDoc_STR("XXX to be provided"), /* tp_doc */
(traverseproc)PyCData_traverse, /* tp_traverse */
(inquiry)PyCData_clear, /* tp_clear */
0, /* tp_richcompare */
* Module initialization.
*/
-static const char module_docs[] =
-"Create and manipulate C compatible data types in Python.";
+PyDoc_STRVAR(_ctypes__doc__,
+"Create and manipulate C compatible data types in Python.");
#ifdef MS_WIN32
-static const char comerror_doc[] = "Raised when a COM method call failed.";
+PyDoc_STRVAR(comerror_doc, "Raised when a COM method call failed.");
int
comerror_init(PyObject *self, PyObject *args, PyObject *kwds)
static struct PyModuleDef _ctypesmodule = {
PyModuleDef_HEAD_INIT,
.m_name = "_ctypes",
- .m_doc = module_docs,
+ .m_doc = _ctypes__doc__,
.m_size = -1,
.m_methods = _ctypes_module_methods,
};
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
- "CThunkObject", /* tp_doc */
+ PyDoc_STR("CThunkObject"), /* tp_doc */
CThunkObject_traverse, /* tp_traverse */
CThunkObject_clear, /* tp_clear */
0, /* tp_richcompare */
#ifdef MS_WIN32
-static const char format_error_doc[] =
+PyDoc_STRVAR(format_error_doc,
"FormatError([integer]) -> string\n\
\n\
Convert a win32 error code into a string. If the error code is not\n\
-given, the return value of a call to GetLastError() is used.\n";
+given, the return value of a call to GetLastError() is used.\n");
static PyObject *format_error(PyObject *self, PyObject *args)
{
PyObject *result;
return result;
}
-static const char load_library_doc[] =
+PyDoc_STRVAR(load_library_doc,
"LoadLibrary(name, load_flags) -> handle\n\
\n\
Load an executable (usually a DLL), and return a handle to it.\n\
The handle may be used to locate exported functions in this\n\
module. load_flags are as defined for LoadLibraryEx in the\n\
-Windows API.\n";
+Windows API.\n");
static PyObject *load_library(PyObject *self, PyObject *args)
{
PyObject *nameobj;
#endif
}
-static const char free_library_doc[] =
+PyDoc_STRVAR(free_library_doc,
"FreeLibrary(handle) -> void\n\
\n\
-Free the handle of an executable previously loaded by LoadLibrary.\n";
+Free the handle of an executable previously loaded by LoadLibrary.\n");
static PyObject *free_library(PyObject *self, PyObject *args)
{
void *hMod;
Py_RETURN_NONE;
}
-static const char copy_com_pointer_doc[] =
-"CopyComPointer(src, dst) -> HRESULT value\n";
+PyDoc_STRVAR(copy_com_pointer_doc,
+"CopyComPointer(src, dst) -> HRESULT value\n");
static PyObject *
copy_com_pointer(PyObject *self, PyObject *args)
/*****************************************************************
* functions
*/
-static const char sizeof_doc[] =
+PyDoc_STRVAR(sizeof_doc,
"sizeof(C type) -> integer\n"
"sizeof(C instance) -> integer\n"
-"Return the size in bytes of a C instance";
+"Return the size in bytes of a C instance");
static PyObject *
sizeof_func(PyObject *self, PyObject *obj)
return NULL;
}
-static const char alignment_doc[] =
+PyDoc_STRVAR(alignment_doc,
"alignment(C type) -> integer\n"
"alignment(C instance) -> integer\n"
-"Return the alignment requirements of a C instance";
+"Return the alignment requirements of a C instance");
static PyObject *
align_func(PyObject *self, PyObject *obj)
return NULL;
}
-static const char byref_doc[] =
+PyDoc_STRVAR(byref_doc,
"byref(C instance[, offset=0]) -> byref-object\n"
"Return a pointer lookalike to a C instance, only usable\n"
-"as function argument";
+"as function argument");
/*
* We must return something which can be converted to a parameter,
return (PyObject *)parg;
}
-static const char addressof_doc[] =
+PyDoc_STRVAR(addressof_doc,
"addressof(C instance) -> integer\n"
-"Return the address of the C instance internal buffer";
+"Return the address of the C instance internal buffer");
static PyObject *
addressof(PyObject *self, PyObject *obj)
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
- "Structure/Union member", /* tp_doc */
+ PyDoc_STR("Structure/Union member"), /* tp_doc */
(traverseproc)PyCField_traverse, /* tp_traverse */
(inquiry)PyCField_clear, /* tp_clear */
0, /* tp_richcompare */
result_seconds = utc_to_seconds(year, month, day,
hour, minute, second);
+ if (result_seconds == -1 && PyErr_Occurred()) {
+ return NULL;
+ }
+
/* Probe max_fold_seconds to detect a fold. */
probe_seconds = local(epoch + timet - max_fold_seconds);
if (probe_seconds == -1)
/* A callback function to pass to OpenSSL's OBJ_NAME_do_all(...) */
static void
+#if OPENSSL_VERSION_NUMBER >= 0x30000000L
+_openssl_hash_name_mapper(EVP_MD *md, void *arg)
+#else
_openssl_hash_name_mapper(const EVP_MD *md, const char *from,
const char *to, void *arg)
+#endif
{
_InternalNameMapperState *state = (_InternalNameMapperState *)arg;
PyObject *py_name;
assert(state != NULL);
- if (md == NULL)
+ // ignore all undefined providers
+ if ((md == NULL) || (EVP_MD_nid(md) == NID_undef)) {
return;
+ }
py_name = py_digest_name(md);
if (py_name == NULL) {
return -1;
}
+#if OPENSSL_VERSION_NUMBER >= 0x30000000L
+ // get algorithms from all activated providers in default context
+ EVP_MD_do_all_provided(NULL, &_openssl_hash_name_mapper, &state);
+#else
EVP_MD_do_all(&_openssl_hash_name_mapper, &state);
+#endif
if (state.error) {
Py_DECREF(state.set);
assert(preexec_fn == Py_None);
pid = vfork();
+ if (pid == -1) {
+ /* If vfork() fails, fall back to using fork(). When it isn't
+ * allowed in a process by the kernel, vfork can return -1
+ * with errno EINVAL. https://bugs.python.org/issue47151. */
+ pid = fork();
+ }
} else
#endif
{
#ifdef VFORK_USABLE
/* Use vfork() only if it's safe. See the comment above child_exec(). */
sigset_t old_sigs;
- if (preexec_fn == Py_None &&
- !call_setuid && !call_setgid && !call_setgroups) {
+ int allow_vfork;
+ if (preexec_fn == Py_None) {
+ allow_vfork = 1; /* 3.10.0 behavior */
+ PyObject *subprocess_module = PyImport_ImportModule("subprocess");
+ if (subprocess_module != NULL) {
+ PyObject *allow_vfork_obj = PyObject_GetAttrString(
+ subprocess_module, "_USE_VFORK");
+ Py_DECREF(subprocess_module);
+ if (allow_vfork_obj != NULL) {
+ allow_vfork = PyObject_IsTrue(allow_vfork_obj);
+ Py_DECREF(allow_vfork_obj);
+ if (allow_vfork < 0) {
+ PyErr_Clear(); /* Bad _USE_VFORK attribute. */
+ allow_vfork = 1; /* 3.10.0 behavior */
+ }
+ } else {
+ PyErr_Clear(); /* No _USE_VFORK attribute. */
+ }
+ } else {
+ PyErr_Clear(); /* no subprocess module? suspicious; don't care. */
+ }
+ } else {
+ allow_vfork = 0;
+ }
+ if (allow_vfork && !call_setuid && !call_setgid && !call_setgroups) {
/* Block all signals to ensure that no signal handlers are run in the
* child process while it shares memory with us. Note that signals
* used internally by C libraries won't be blocked by
* may change in future releases. Callback implementations should return zero
* to ensure future compatibility.
*/
-static int _trace_callback(unsigned int type, void* user_arg, void* prepared_statement, void* statement_string)
+static int
+_trace_callback(unsigned int type, void *callable, void *stmt, void *sql)
#else
-static void _trace_callback(void* user_arg, const char* statement_string)
+static void
+_trace_callback(void *callable, const char *sql)
#endif
{
- PyObject *py_statement = NULL;
- PyObject *ret = NULL;
-
- PyGILState_STATE gilstate;
-
#ifdef HAVE_TRACE_V2
if (type != SQLITE_TRACE_STMT) {
return 0;
}
#endif
- gilstate = PyGILState_Ensure();
- py_statement = PyUnicode_DecodeUTF8(statement_string,
- strlen(statement_string), "replace");
+ PyGILState_STATE gilstate = PyGILState_Ensure();
+
+ PyObject *py_statement = NULL;
+#ifdef HAVE_TRACE_V2
+ const char *expanded_sql = sqlite3_expanded_sql((sqlite3_stmt *)stmt);
+ if (expanded_sql == NULL) {
+ sqlite3 *db = sqlite3_db_handle((sqlite3_stmt *)stmt);
+ if (sqlite3_errcode(db) == SQLITE_NOMEM) {
+ (void)PyErr_NoMemory();
+ goto exit;
+ }
+
+ PyErr_SetString(pysqlite_DataError,
+ "Expanded SQL string exceeds the maximum string length");
+ if (_pysqlite_enable_callback_tracebacks) {
+ PyErr_Print();
+ } else {
+ PyErr_Clear();
+ }
+
+ // Fall back to unexpanded sql
+ py_statement = PyUnicode_FromString((const char *)sql);
+ }
+ else {
+ py_statement = PyUnicode_FromString(expanded_sql);
+ sqlite3_free((void *)expanded_sql);
+ }
+#else
+ if (sql == NULL) {
+ PyErr_SetString(pysqlite_DataError,
+ "Expanded SQL string exceeds the maximum string length");
+ if (_pysqlite_enable_callback_tracebacks) {
+ PyErr_Print();
+ } else {
+ PyErr_Clear();
+ }
+ goto exit;
+ }
+ py_statement = PyUnicode_FromString(sql);
+#endif
if (py_statement) {
- ret = PyObject_CallOneArg((PyObject*)user_arg, py_statement);
+ PyObject *ret = PyObject_CallOneArg((PyObject *)callable, py_statement);
Py_DECREF(py_statement);
+ Py_XDECREF(ret);
}
-
- if (ret) {
- Py_DECREF(ret);
- } else {
+ if (PyErr_Occurred()) {
if (_pysqlite_enable_callback_tracebacks) {
PyErr_Print();
} else {
}
}
+exit:
PyGILState_Release(gilstate);
#ifdef HAVE_TRACE_V2
return 0;
#include "util.h"
#include "clinic/cursor.c.h"
+static inline int
+check_cursor_locked(pysqlite_Cursor *cur)
+{
+ if (cur->locked) {
+ PyErr_SetString(pysqlite_ProgrammingError,
+ "Recursive use of cursors not allowed.");
+ return 0;
+ }
+ return 1;
+}
+
/*[clinic input]
module _sqlite3
class _sqlite3.Cursor "pysqlite_Cursor *" "pysqlite_CursorType"
pysqlite_Connection *connection)
/*[clinic end generated code: output=ac59dce49a809ca8 input=a8a4f75ac90999b2]*/
{
+ if (!check_cursor_locked(self)) {
+ return -1;
+ }
+
Py_INCREF(connection);
Py_XSETREF(self->connection, connection);
Py_CLEAR(self->statement);
return 0;
}
- if (cur->locked) {
- PyErr_SetString(pysqlite_ProgrammingError, "Recursive use of cursors not allowed.");
- return 0;
- }
-
- return pysqlite_check_thread(cur->connection) && pysqlite_check_connection(cur->connection);
+ return (pysqlite_check_thread(cur->connection)
+ && pysqlite_check_connection(cur->connection)
+ && check_cursor_locked(cur));
}
static PyObject *
if (self->statement) {
rc = pysqlite_step(self->statement->st, self->connection);
if (PyErr_Occurred()) {
- (void)pysqlite_statement_reset(self->statement);
- Py_DECREF(next_row);
- return NULL;
+ goto error;
}
if (rc != SQLITE_DONE && rc != SQLITE_ROW) {
- (void)pysqlite_statement_reset(self->statement);
- Py_DECREF(next_row);
_pysqlite_seterror(self->connection->db, NULL);
- return NULL;
+ goto error;
}
if (rc == SQLITE_ROW) {
+ self->locked = 1; // GH-80254: Prevent recursive use of cursors.
self->next_row = _pysqlite_fetch_one_row(self);
+ self->locked = 0;
if (self->next_row == NULL) {
- (void)pysqlite_statement_reset(self->statement);
- return NULL;
+ goto error;
}
}
}
return next_row;
+
+error:
+ (void)pysqlite_statement_reset(self->statement);
+ Py_DECREF(next_row);
+ return NULL;
}
/*[clinic input]
pysqlite_cursor_close_impl(pysqlite_Cursor *self)
/*[clinic end generated code: output=b6055e4ec6fe63b6 input=08b36552dbb9a986]*/
{
+ if (!check_cursor_locked(self)) {
+ return NULL;
+ }
+
if (!self->connection) {
PyErr_SetString(pysqlite_ProgrammingError,
"Base Cursor.__init__ not called.");
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "Instantiating this exception starts infinite recursion.", /* tp_doc */
+ PyDoc_STR("Instantiating this exception starts infinite recursion."), /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
goto exit;
}
b_obj = PyUnicode_FromString(b);
- if (a_obj == NULL) {
+ if (b_obj == NULL) {
goto exit;
}
Py_ssize_t result = _Py_UTF8_Edit_Cost(a_obj, b_obj, -1);
static PyObject *
_curses_panel_panel_bottom(PyCursesPanelObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":bottom", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "bottom() takes no arguments");
+ return NULL;
}
- return_value = _curses_panel_panel_bottom_impl(self, cls);
-
-exit:
- return return_value;
+ return _curses_panel_panel_bottom_impl(self, cls);
}
PyDoc_STRVAR(_curses_panel_panel_hide__doc__,
static PyObject *
_curses_panel_panel_hide(PyCursesPanelObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":hide", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "hide() takes no arguments");
+ return NULL;
}
- return_value = _curses_panel_panel_hide_impl(self, cls);
-
-exit:
- return return_value;
+ return _curses_panel_panel_hide_impl(self, cls);
}
PyDoc_STRVAR(_curses_panel_panel_show__doc__,
static PyObject *
_curses_panel_panel_show(PyCursesPanelObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":show", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "show() takes no arguments");
+ return NULL;
}
- return_value = _curses_panel_panel_show_impl(self, cls);
-
-exit:
- return return_value;
+ return _curses_panel_panel_show_impl(self, cls);
}
PyDoc_STRVAR(_curses_panel_panel_top__doc__,
static PyObject *
_curses_panel_panel_top(PyCursesPanelObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":top", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "top() takes no arguments");
+ return NULL;
}
- return_value = _curses_panel_panel_top_impl(self, cls);
-
-exit:
- return return_value;
+ return _curses_panel_panel_top_impl(self, cls);
}
PyDoc_STRVAR(_curses_panel_panel_above__doc__,
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", "", NULL};
- static _PyArg_Parser _parser = {"ii:move", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "move", 0};
+ PyObject *argsbuf[2];
int y;
int x;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &y, &x)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 2, 2, 0, argsbuf);
+ if (!args) {
+ goto exit;
+ }
+ y = _PyLong_AsInt(args[0]);
+ if (y == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ x = _PyLong_AsInt(args[1]);
+ if (x == -1 && PyErr_Occurred()) {
goto exit;
}
return_value = _curses_panel_panel_move_impl(self, cls, y, x);
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", NULL};
- static _PyArg_Parser _parser = {"O!:replace", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "replace", 0};
+ PyObject *argsbuf[1];
PyCursesWindowObject *win;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &PyCursesWindow_Type, &win)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf);
+ if (!args) {
+ goto exit;
+ }
+ if (!PyObject_TypeCheck(args[0], &PyCursesWindow_Type)) {
+ _PyArg_BadArgument("replace", "argument 1", (&PyCursesWindow_Type)->tp_name, args[0]);
goto exit;
}
+ win = (PyCursesWindowObject *)args[0];
return_value = _curses_panel_panel_replace_impl(self, cls, win);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", NULL};
- static _PyArg_Parser _parser = {"O:set_userptr", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "set_userptr", 0};
+ PyObject *argsbuf[1];
PyObject *obj;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &obj)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ obj = args[0];
return_value = _curses_panel_panel_set_userptr_impl(self, cls, obj);
exit:
static PyObject *
_curses_panel_panel_userptr(PyCursesPanelObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":userptr", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "userptr() takes no arguments");
+ return NULL;
}
- return_value = _curses_panel_panel_userptr_impl(self, cls);
-
-exit:
- return return_value;
+ return _curses_panel_panel_userptr_impl(self, cls);
}
PyDoc_STRVAR(_curses_panel_bottom_panel__doc__,
{
return _curses_panel_update_panels_impl(module);
}
-/*[clinic end generated code: output=3081ef24e5560cb0 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=c552457e8067bb0a input=a9049054013a1b77]*/
static PyObject *
_dbm_dbm_keys(dbmobject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":keys", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "keys() takes no arguments");
+ return NULL;
}
- return_value = _dbm_dbm_keys_impl(self, cls);
-
-exit:
- return return_value;
+ return _dbm_dbm_keys_impl(self, cls);
}
PyDoc_STRVAR(_dbm_dbm_get__doc__,
exit:
return return_value;
}
-/*[clinic end generated code: output=6947b1115df66f7c input=a9049054013a1b77]*/
+/*[clinic end generated code: output=a4599b89ce338b08 input=a9049054013a1b77]*/
static PyObject *
_gdbm_gdbm_keys(gdbmobject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":keys", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "keys() takes no arguments");
+ return NULL;
}
- return_value = _gdbm_gdbm_keys_impl(self, cls);
-
-exit:
- return return_value;
+ return _gdbm_gdbm_keys_impl(self, cls);
}
PyDoc_STRVAR(_gdbm_gdbm_firstkey__doc__,
static PyObject *
_gdbm_gdbm_firstkey(gdbmobject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":firstkey", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "firstkey() takes no arguments");
+ return NULL;
}
- return_value = _gdbm_gdbm_firstkey_impl(self, cls);
-
-exit:
- return return_value;
+ return _gdbm_gdbm_firstkey_impl(self, cls);
}
PyDoc_STRVAR(_gdbm_gdbm_nextkey__doc__,
static PyObject *
_gdbm_gdbm_reorganize(gdbmobject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":reorganize", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "reorganize() takes no arguments");
+ return NULL;
}
- return_value = _gdbm_gdbm_reorganize_impl(self, cls);
-
-exit:
- return return_value;
+ return _gdbm_gdbm_reorganize_impl(self, cls);
}
PyDoc_STRVAR(_gdbm_gdbm_sync__doc__,
static PyObject *
_gdbm_gdbm_sync(gdbmobject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":sync", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "sync() takes no arguments");
+ return NULL;
}
- return_value = _gdbm_gdbm_sync_impl(self, cls);
-
-exit:
- return return_value;
+ return _gdbm_gdbm_sync_impl(self, cls);
}
PyDoc_STRVAR(dbmopen__doc__,
exit:
return return_value;
}
-/*[clinic end generated code: output=3b88446433e43d96 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=125f5bc685304744 input=a9049054013a1b77]*/
static PyObject *
_lsprof_Profiler_getstats(ProfilerObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":getstats", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "getstats() takes no arguments");
+ return NULL;
}
- return_value = _lsprof_Profiler_getstats_impl(self, cls);
-
-exit:
- return return_value;
+ return _lsprof_Profiler_getstats_impl(self, cls);
}
-/*[clinic end generated code: output=b4727cfebecdd22d input=a9049054013a1b77]*/
+/*[clinic end generated code: output=57c7b6b0b8666429 input=a9049054013a1b77]*/
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"block", "timeout", NULL};
- static _PyArg_Parser _parser = {"|pO:get", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "get", 0};
+ PyObject *argsbuf[2];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0;
int block = 1;
PyObject *timeout = Py_None;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &block, &timeout)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 2, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ if (!noptargs) {
+ goto skip_optional_pos;
+ }
+ if (args[0]) {
+ block = PyObject_IsTrue(args[0]);
+ if (block < 0) {
+ goto exit;
+ }
+ if (!--noptargs) {
+ goto skip_optional_pos;
+ }
+ }
+ timeout = args[1];
+skip_optional_pos:
return_value = _queue_SimpleQueue_get_impl(self, cls, block, timeout);
exit:
static PyObject *
_queue_SimpleQueue_get_nowait(simplequeueobject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":get_nowait", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "get_nowait() takes no arguments");
+ return NULL;
}
- return_value = _queue_SimpleQueue_get_nowait_impl(self, cls);
-
-exit:
- return return_value;
+ return _queue_SimpleQueue_get_nowait_impl(self, cls);
}
PyDoc_STRVAR(_queue_SimpleQueue_empty__doc__,
exit:
return return_value;
}
-/*[clinic end generated code: output=ce56b46fac150909 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=a9d567e8a64e6170 input=a9049054013a1b77]*/
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"string", "pos", "endpos", NULL};
- static _PyArg_Parser _parser = {"O|nn:match", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "match", 0};
+ PyObject *argsbuf[3];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 1;
PyObject *string;
Py_ssize_t pos = 0;
Py_ssize_t endpos = PY_SSIZE_T_MAX;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &string, &pos, &endpos)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 3, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ string = args[0];
+ if (!noptargs) {
+ goto skip_optional_pos;
+ }
+ if (args[1]) {
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[1]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ pos = ival;
+ }
+ if (!--noptargs) {
+ goto skip_optional_pos;
+ }
+ }
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[2]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ endpos = ival;
+ }
+skip_optional_pos:
return_value = _sre_SRE_Pattern_match_impl(self, cls, string, pos, endpos);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"string", "pos", "endpos", NULL};
- static _PyArg_Parser _parser = {"O|nn:fullmatch", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "fullmatch", 0};
+ PyObject *argsbuf[3];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 1;
PyObject *string;
Py_ssize_t pos = 0;
Py_ssize_t endpos = PY_SSIZE_T_MAX;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &string, &pos, &endpos)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 3, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ string = args[0];
+ if (!noptargs) {
+ goto skip_optional_pos;
+ }
+ if (args[1]) {
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[1]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ pos = ival;
+ }
+ if (!--noptargs) {
+ goto skip_optional_pos;
+ }
+ }
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[2]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ endpos = ival;
+ }
+skip_optional_pos:
return_value = _sre_SRE_Pattern_fullmatch_impl(self, cls, string, pos, endpos);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"string", "pos", "endpos", NULL};
- static _PyArg_Parser _parser = {"O|nn:search", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "search", 0};
+ PyObject *argsbuf[3];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 1;
PyObject *string;
Py_ssize_t pos = 0;
Py_ssize_t endpos = PY_SSIZE_T_MAX;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &string, &pos, &endpos)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 3, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ string = args[0];
+ if (!noptargs) {
+ goto skip_optional_pos;
+ }
+ if (args[1]) {
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[1]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ pos = ival;
+ }
+ if (!--noptargs) {
+ goto skip_optional_pos;
+ }
+ }
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[2]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ endpos = ival;
+ }
+skip_optional_pos:
return_value = _sre_SRE_Pattern_search_impl(self, cls, string, pos, endpos);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"string", "pos", "endpos", NULL};
- static _PyArg_Parser _parser = {"O|nn:finditer", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "finditer", 0};
+ PyObject *argsbuf[3];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 1;
PyObject *string;
Py_ssize_t pos = 0;
Py_ssize_t endpos = PY_SSIZE_T_MAX;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &string, &pos, &endpos)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 3, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ string = args[0];
+ if (!noptargs) {
+ goto skip_optional_pos;
+ }
+ if (args[1]) {
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[1]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ pos = ival;
+ }
+ if (!--noptargs) {
+ goto skip_optional_pos;
+ }
+ }
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[2]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ endpos = ival;
+ }
+skip_optional_pos:
return_value = _sre_SRE_Pattern_finditer_impl(self, cls, string, pos, endpos);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"string", "pos", "endpos", NULL};
- static _PyArg_Parser _parser = {"O|nn:scanner", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "scanner", 0};
+ PyObject *argsbuf[3];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 1;
PyObject *string;
Py_ssize_t pos = 0;
Py_ssize_t endpos = PY_SSIZE_T_MAX;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &string, &pos, &endpos)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 3, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ string = args[0];
+ if (!noptargs) {
+ goto skip_optional_pos;
+ }
+ if (args[1]) {
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[1]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ pos = ival;
+ }
+ if (!--noptargs) {
+ goto skip_optional_pos;
+ }
+ }
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[2]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ endpos = ival;
+ }
+skip_optional_pos:
return_value = _sre_SRE_Pattern_scanner_impl(self, cls, string, pos, endpos);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"repl", "string", "count", NULL};
- static _PyArg_Parser _parser = {"OO|n:sub", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "sub", 0};
+ PyObject *argsbuf[3];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 2;
PyObject *repl;
PyObject *string;
Py_ssize_t count = 0;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &repl, &string, &count)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 2, 3, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ repl = args[0];
+ string = args[1];
+ if (!noptargs) {
+ goto skip_optional_pos;
+ }
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[2]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ count = ival;
+ }
+skip_optional_pos:
return_value = _sre_SRE_Pattern_sub_impl(self, cls, repl, string, count);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"repl", "string", "count", NULL};
- static _PyArg_Parser _parser = {"OO|n:subn", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "subn", 0};
+ PyObject *argsbuf[3];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 2;
PyObject *repl;
PyObject *string;
Py_ssize_t count = 0;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &repl, &string, &count)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 2, 3, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ repl = args[0];
+ string = args[1];
+ if (!noptargs) {
+ goto skip_optional_pos;
+ }
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[2]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ count = ival;
+ }
+skip_optional_pos:
return_value = _sre_SRE_Pattern_subn_impl(self, cls, repl, string, count);
exit:
static PyObject *
_sre_SRE_Scanner_match(ScannerObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":match", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "match() takes no arguments");
+ return NULL;
}
- return_value = _sre_SRE_Scanner_match_impl(self, cls);
-
-exit:
- return return_value;
+ return _sre_SRE_Scanner_match_impl(self, cls);
}
PyDoc_STRVAR(_sre_SRE_Scanner_search__doc__,
static PyObject *
_sre_SRE_Scanner_search(ScannerObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":search", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "search() takes no arguments");
+ return NULL;
}
- return_value = _sre_SRE_Scanner_search_impl(self, cls);
-
-exit:
- return return_value;
+ return _sre_SRE_Scanner_search_impl(self, cls);
}
-/*[clinic end generated code: output=518f7bb775c1184f input=a9049054013a1b77]*/
+/*[clinic end generated code: output=ead5eb818b7771f8 input=a9049054013a1b77]*/
static PyObject *
_testmultiphase_StateAccessType_get_defining_module(StateAccessTypeObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":get_defining_module", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "get_defining_module() takes no arguments");
+ return NULL;
}
- return_value = _testmultiphase_StateAccessType_get_defining_module_impl(self, cls);
-
-exit:
- return return_value;
+ return _testmultiphase_StateAccessType_get_defining_module_impl(self, cls);
}
PyDoc_STRVAR(_testmultiphase_StateAccessType_getmodulebydef_bad_def__doc__,
static PyObject *
_testmultiphase_StateAccessType_getmodulebydef_bad_def(StateAccessTypeObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":getmodulebydef_bad_def", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "getmodulebydef_bad_def() takes no arguments");
+ return NULL;
}
- return_value = _testmultiphase_StateAccessType_getmodulebydef_bad_def_impl(self, cls);
-
-exit:
- return return_value;
+ return _testmultiphase_StateAccessType_getmodulebydef_bad_def_impl(self, cls);
}
PyDoc_STRVAR(_testmultiphase_StateAccessType_increment_count_clinic__doc__,
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"n", "twice", NULL};
- static _PyArg_Parser _parser = {"|i$p:increment_count_clinic", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "increment_count_clinic", 0};
+ PyObject *argsbuf[2];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0;
int n = 1;
int twice = 0;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &n, &twice)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 1, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ if (!noptargs) {
+ goto skip_optional_pos;
+ }
+ if (args[0]) {
+ n = _PyLong_AsInt(args[0]);
+ if (n == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ if (!--noptargs) {
+ goto skip_optional_pos;
+ }
+ }
+skip_optional_pos:
+ if (!noptargs) {
+ goto skip_optional_kwonly;
+ }
+ twice = PyObject_IsTrue(args[1]);
+ if (twice < 0) {
+ goto exit;
+ }
+skip_optional_kwonly:
return_value = _testmultiphase_StateAccessType_increment_count_clinic_impl(self, cls, n, twice);
exit:
static PyObject *
_testmultiphase_StateAccessType_get_count(StateAccessTypeObject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":get_count", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "get_count() takes no arguments");
+ return NULL;
}
- return_value = _testmultiphase_StateAccessType_get_count_impl(self, cls);
-
-exit:
- return return_value;
+ return _testmultiphase_StateAccessType_get_count_impl(self, cls);
}
-/*[clinic end generated code: output=eb1b8c2ee6290be3 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=ec5029d1275cbf94 input=a9049054013a1b77]*/
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", NULL};
- static _PyArg_Parser _parser = {"O:extend", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "extend", 0};
+ PyObject *argsbuf[1];
PyObject *bb;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &bb)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ bb = args[0];
return_value = array_array_extend_impl(self, cls, bb);
exit:
#define ARRAY_ARRAYITERATOR___SETSTATE___METHODDEF \
{"__setstate__", (PyCFunction)array_arrayiterator___setstate__, METH_O, array_arrayiterator___setstate____doc__},
-/*[clinic end generated code: output=f130a994f98f1227 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=eb727e087d64f017 input=a9049054013a1b77]*/
static PyObject *
MD5Type_copy(MD5object *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":copy", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "copy() takes no arguments");
+ return NULL;
}
- return_value = MD5Type_copy_impl(self, cls);
-
-exit:
- return return_value;
+ return MD5Type_copy_impl(self, cls);
}
PyDoc_STRVAR(MD5Type_digest__doc__,
exit:
return return_value;
}
-/*[clinic end generated code: output=53ff7f22dbaaea36 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=3061297a669c645c input=a9049054013a1b77]*/
os_DirEntry_is_symlink(DirEntry *self, PyTypeObject *defining_class, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":is_symlink", _keywords, 0};
int _return_value;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "is_symlink() takes no arguments");
goto exit;
}
_return_value = os_DirEntry_is_symlink_impl(self, defining_class);
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"follow_symlinks", NULL};
- static _PyArg_Parser _parser = {"|$p:stat", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "stat", 0};
+ PyObject *argsbuf[1];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0;
int follow_symlinks = 1;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &follow_symlinks)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 0, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ if (!noptargs) {
+ goto skip_optional_kwonly;
+ }
+ follow_symlinks = PyObject_IsTrue(args[0]);
+ if (follow_symlinks < 0) {
+ goto exit;
+ }
+skip_optional_kwonly:
return_value = os_DirEntry_stat_impl(self, defining_class, follow_symlinks);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"follow_symlinks", NULL};
- static _PyArg_Parser _parser = {"|$p:is_dir", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "is_dir", 0};
+ PyObject *argsbuf[1];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0;
int follow_symlinks = 1;
int _return_value;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &follow_symlinks)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 0, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ if (!noptargs) {
+ goto skip_optional_kwonly;
+ }
+ follow_symlinks = PyObject_IsTrue(args[0]);
+ if (follow_symlinks < 0) {
+ goto exit;
+ }
+skip_optional_kwonly:
_return_value = os_DirEntry_is_dir_impl(self, defining_class, follow_symlinks);
if ((_return_value == -1) && PyErr_Occurred()) {
goto exit;
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"follow_symlinks", NULL};
- static _PyArg_Parser _parser = {"|$p:is_file", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "is_file", 0};
+ PyObject *argsbuf[1];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0;
int follow_symlinks = 1;
int _return_value;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &follow_symlinks)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 0, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ if (!noptargs) {
+ goto skip_optional_kwonly;
+ }
+ follow_symlinks = PyObject_IsTrue(args[0]);
+ if (follow_symlinks < 0) {
+ goto exit;
+ }
+skip_optional_kwonly:
_return_value = os_DirEntry_is_file_impl(self, defining_class, follow_symlinks);
if ((_return_value == -1) && PyErr_Occurred()) {
goto exit;
#ifndef OS_WAITSTATUS_TO_EXITCODE_METHODDEF
#define OS_WAITSTATUS_TO_EXITCODE_METHODDEF
#endif /* !defined(OS_WAITSTATUS_TO_EXITCODE_METHODDEF) */
-/*[clinic end generated code: output=65a85d7d3f2c487e input=a9049054013a1b77]*/
+/*[clinic end generated code: output=debefcf43738ec66 input=a9049054013a1b77]*/
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", "", NULL};
- static _PyArg_Parser _parser = {"O|i:Parse", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "Parse", 0};
+ PyObject *argsbuf[2];
PyObject *data;
int isfinal = 0;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &data, &isfinal)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 2, 0, argsbuf);
+ if (!args) {
+ goto exit;
+ }
+ data = args[0];
+ if (nargs < 2) {
+ goto skip_optional_posonly;
+ }
+ isfinal = _PyLong_AsInt(args[1]);
+ if (isfinal == -1 && PyErr_Occurred()) {
goto exit;
}
+skip_optional_posonly:
return_value = pyexpat_xmlparser_Parse_impl(self, cls, data, isfinal);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", NULL};
- static _PyArg_Parser _parser = {"O:ParseFile", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "ParseFile", 0};
+ PyObject *argsbuf[1];
PyObject *file;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &file)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ file = args[0];
return_value = pyexpat_xmlparser_ParseFile_impl(self, cls, file);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", "", NULL};
- static _PyArg_Parser _parser = {"z|s:ExternalEntityParserCreate", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "ExternalEntityParserCreate", 0};
+ PyObject *argsbuf[2];
const char *context;
const char *encoding = NULL;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &context, &encoding)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 2, 0, argsbuf);
+ if (!args) {
+ goto exit;
+ }
+ if (args[0] == Py_None) {
+ context = NULL;
+ }
+ else if (PyUnicode_Check(args[0])) {
+ Py_ssize_t context_length;
+ context = PyUnicode_AsUTF8AndSize(args[0], &context_length);
+ if (context == NULL) {
+ goto exit;
+ }
+ if (strlen(context) != (size_t)context_length) {
+ PyErr_SetString(PyExc_ValueError, "embedded null character");
+ goto exit;
+ }
+ }
+ else {
+ _PyArg_BadArgument("ExternalEntityParserCreate", "argument 1", "str or None", args[0]);
+ goto exit;
+ }
+ if (nargs < 2) {
+ goto skip_optional_posonly;
+ }
+ if (!PyUnicode_Check(args[1])) {
+ _PyArg_BadArgument("ExternalEntityParserCreate", "argument 2", "str", args[1]);
goto exit;
}
+ Py_ssize_t encoding_length;
+ encoding = PyUnicode_AsUTF8AndSize(args[1], &encoding_length);
+ if (encoding == NULL) {
+ goto exit;
+ }
+ if (strlen(encoding) != (size_t)encoding_length) {
+ PyErr_SetString(PyExc_ValueError, "embedded null character");
+ goto exit;
+ }
+skip_optional_posonly:
return_value = pyexpat_xmlparser_ExternalEntityParserCreate_impl(self, cls, context, encoding);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", NULL};
- static _PyArg_Parser _parser = {"|p:UseForeignDTD", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "UseForeignDTD", 0};
+ PyObject *argsbuf[1];
int flag = 1;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &flag)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 1, 0, argsbuf);
+ if (!args) {
+ goto exit;
+ }
+ if (nargs < 1) {
+ goto skip_optional_posonly;
+ }
+ flag = PyObject_IsTrue(args[0]);
+ if (flag < 0) {
goto exit;
}
+skip_optional_posonly:
return_value = pyexpat_xmlparser_UseForeignDTD_impl(self, cls, flag);
exit:
#ifndef PYEXPAT_XMLPARSER_USEFOREIGNDTD_METHODDEF
#define PYEXPAT_XMLPARSER_USEFOREIGNDTD_METHODDEF
#endif /* !defined(PYEXPAT_XMLPARSER_USEFOREIGNDTD_METHODDEF) */
-/*[clinic end generated code: output=612b9d6a17a679a7 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=5d60049d385d5d56 input=a9049054013a1b77]*/
static PyObject *
SHA1Type_copy(SHA1object *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":copy", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "copy() takes no arguments");
+ return NULL;
}
- return_value = SHA1Type_copy_impl(self, cls);
-
-exit:
- return return_value;
+ return SHA1Type_copy_impl(self, cls);
}
PyDoc_STRVAR(SHA1Type_digest__doc__,
exit:
return return_value;
}
-/*[clinic end generated code: output=abf1ab2545cea5a2 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=93ced3c8f8fa4f21 input=a9049054013a1b77]*/
static PyObject *
SHA256Type_copy(SHAobject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":copy", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "copy() takes no arguments");
+ return NULL;
}
- return_value = SHA256Type_copy_impl(self, cls);
-
-exit:
- return return_value;
+ return SHA256Type_copy_impl(self, cls);
}
PyDoc_STRVAR(SHA256Type_digest__doc__,
exit:
return return_value;
}
-/*[clinic end generated code: output=b7283f75c9d08f30 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=4f9fe3ca546b0c58 input=a9049054013a1b77]*/
static PyObject *
SHA512Type_copy(SHAobject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":copy", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "copy() takes no arguments");
+ return NULL;
}
- return_value = SHA512Type_copy_impl(self, cls);
-
-exit:
- return return_value;
+ return SHA512Type_copy_impl(self, cls);
}
PyDoc_STRVAR(SHA512Type_digest__doc__,
exit:
return return_value;
}
-/*[clinic end generated code: output=9ff9f11937fabf35 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=26d2fe27b9673ac2 input=a9049054013a1b77]*/
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", NULL};
- static _PyArg_Parser _parser = {"y*:compress", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "compress", 0};
+ PyObject *argsbuf[1];
Py_buffer data = {NULL, NULL};
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &data)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf);
+ if (!args) {
+ goto exit;
+ }
+ if (PyObject_GetBuffer(args[0], &data, PyBUF_SIMPLE) != 0) {
+ goto exit;
+ }
+ if (!PyBuffer_IsContiguous(&data, 'C')) {
+ _PyArg_BadArgument("compress", "argument 1", "contiguous buffer", args[0]);
goto exit;
}
return_value = zlib_Compress_compress_impl(self, cls, &data);
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", "max_length", NULL};
- static _PyArg_Parser _parser = {"y*|n:decompress", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "decompress", 0};
+ PyObject *argsbuf[2];
+ Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 1;
Py_buffer data = {NULL, NULL};
Py_ssize_t max_length = 0;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &data, &max_length)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 2, 0, argsbuf);
+ if (!args) {
+ goto exit;
+ }
+ if (PyObject_GetBuffer(args[0], &data, PyBUF_SIMPLE) != 0) {
goto exit;
}
+ if (!PyBuffer_IsContiguous(&data, 'C')) {
+ _PyArg_BadArgument("decompress", "argument 1", "contiguous buffer", args[0]);
+ goto exit;
+ }
+ if (!noptargs) {
+ goto skip_optional_pos;
+ }
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[1]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ max_length = ival;
+ }
+skip_optional_pos:
return_value = zlib_Decompress_decompress_impl(self, cls, &data, max_length);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", NULL};
- static _PyArg_Parser _parser = {"|i:flush", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "flush", 0};
+ PyObject *argsbuf[1];
int mode = Z_FINISH;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &mode)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 1, 0, argsbuf);
+ if (!args) {
+ goto exit;
+ }
+ if (nargs < 1) {
+ goto skip_optional_posonly;
+ }
+ mode = _PyLong_AsInt(args[0]);
+ if (mode == -1 && PyErr_Occurred()) {
goto exit;
}
+skip_optional_posonly:
return_value = zlib_Compress_flush_impl(self, cls, mode);
exit:
static PyObject *
zlib_Compress_copy(compobject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":copy", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "copy() takes no arguments");
+ return NULL;
}
- return_value = zlib_Compress_copy_impl(self, cls);
-
-exit:
- return return_value;
+ return zlib_Compress_copy_impl(self, cls);
}
#endif /* defined(HAVE_ZLIB_COPY) */
static PyObject *
zlib_Compress___copy__(compobject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":__copy__", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "__copy__() takes no arguments");
+ return NULL;
}
- return_value = zlib_Compress___copy___impl(self, cls);
-
-exit:
- return return_value;
+ return zlib_Compress___copy___impl(self, cls);
}
#endif /* defined(HAVE_ZLIB_COPY) */
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", NULL};
- static _PyArg_Parser _parser = {"O:__deepcopy__", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "__deepcopy__", 0};
+ PyObject *argsbuf[1];
PyObject *memo;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &memo)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ memo = args[0];
return_value = zlib_Compress___deepcopy___impl(self, cls, memo);
exit:
static PyObject *
zlib_Decompress_copy(compobject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":copy", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "copy() takes no arguments");
+ return NULL;
}
- return_value = zlib_Decompress_copy_impl(self, cls);
-
-exit:
- return return_value;
+ return zlib_Decompress_copy_impl(self, cls);
}
#endif /* defined(HAVE_ZLIB_COPY) */
static PyObject *
zlib_Decompress___copy__(compobject *self, PyTypeObject *cls, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
- PyObject *return_value = NULL;
- static const char * const _keywords[] = { NULL};
- static _PyArg_Parser _parser = {":__copy__", _keywords, 0};
-
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser
- )) {
- goto exit;
+ if (nargs) {
+ PyErr_SetString(PyExc_TypeError, "__copy__() takes no arguments");
+ return NULL;
}
- return_value = zlib_Decompress___copy___impl(self, cls);
-
-exit:
- return return_value;
+ return zlib_Decompress___copy___impl(self, cls);
}
#endif /* defined(HAVE_ZLIB_COPY) */
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", NULL};
- static _PyArg_Parser _parser = {"O:__deepcopy__", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "__deepcopy__", 0};
+ PyObject *argsbuf[1];
PyObject *memo;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &memo)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ memo = args[0];
return_value = zlib_Decompress___deepcopy___impl(self, cls, memo);
exit:
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"", NULL};
- static _PyArg_Parser _parser = {"|n:flush", _keywords, 0};
+ static _PyArg_Parser _parser = {NULL, _keywords, "flush", 0};
+ PyObject *argsbuf[1];
Py_ssize_t length = DEF_BUF_SIZE;
- if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser,
- &length)) {
+ args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 1, 0, argsbuf);
+ if (!args) {
goto exit;
}
+ if (nargs < 1) {
+ goto skip_optional_posonly;
+ }
+ {
+ Py_ssize_t ival = -1;
+ PyObject *iobj = _PyNumber_Index(args[0]);
+ if (iobj != NULL) {
+ ival = PyLong_AsSsize_t(iobj);
+ Py_DECREF(iobj);
+ }
+ if (ival == -1 && PyErr_Occurred()) {
+ goto exit;
+ }
+ length = ival;
+ }
+skip_optional_posonly:
return_value = zlib_Decompress_flush_impl(self, cls, length);
exit:
#ifndef ZLIB_DECOMPRESS___DEEPCOPY___METHODDEF
#define ZLIB_DECOMPRESS___DEEPCOPY___METHODDEF
#endif /* !defined(ZLIB_DECOMPRESS___DEEPCOPY___METHODDEF) */
-/*[clinic end generated code: output=6736bae59fab268b input=a9049054013a1b77]*/
+/*[clinic end generated code: output=1bda0d996fb51269 input=a9049054013a1b77]*/
for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(list)) {
PyObject *op = FROM_GC(gc);
_PyObject_GC_UNTRACK(op);
+ // gh-92036: If a deallocator function expect the object to be tracked
+ // by the GC (ex: func_dealloc()), it can crash if called on an object
+ // which is no longer tracked by the GC. Leak one strong reference on
+ // purpose so the object is never deleted and its deallocator is not
+ // called.
+ Py_INCREF(op);
}
}
_EXPORT_INT(m, SNDCTL_DSP_GETSPDIF);
#endif
_EXPORT_INT(m, SNDCTL_DSP_GETTRIGGER);
+#ifdef SNDCTL_DSP_MAPINBUF
_EXPORT_INT(m, SNDCTL_DSP_MAPINBUF);
+#endif
+#ifdef SNDCTL_DSP_MAPOUTBUF
_EXPORT_INT(m, SNDCTL_DSP_MAPOUTBUF);
+#endif
_EXPORT_INT(m, SNDCTL_DSP_NONBLOCK);
_EXPORT_INT(m, SNDCTL_DSP_POST);
#ifdef SNDCTL_DSP_PROFILE
/* Try reading the parent directory. */
if (!attributes_from_dir(path, &fileInfo, &tagInfo.ReparseTag)) {
/* Cannot read the parent directory. */
- SetLastError(error);
+ switch (GetLastError()) {
+ case ERROR_FILE_NOT_FOUND: /* File cannot be found */
+ case ERROR_PATH_NOT_FOUND: /* File parent directory cannot be found */
+ case ERROR_NOT_READY: /* Drive exists but unavailable */
+ case ERROR_BAD_NET_NAME: /* Remote drive unavailable */
+ break;
+ /* Restore the error from CreateFileW(). */
+ default:
+ SetLastError(error);
+ }
+
return -1;
}
if (fileInfo.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) {
"AF_UNIX path too long");
goto unix_out;
}
+
+ *len_ret = path.len + offsetof(struct sockaddr_un, sun_path);
}
else
#endif /* linux */
goto unix_out;
}
addr->sun_path[path.len] = 0;
+
+ /* including the tailing NUL */
+ *len_ret = path.len + offsetof(struct sockaddr_un, sun_path) + 1;
}
addr->sun_family = s->sock_family;
memcpy(addr->sun_path, path.buf, path.len);
- *len_ret = path.len + offsetof(struct sockaddr_un, sun_path);
+
retval = 1;
unix_out:
PyBuffer_Release(&path);
"split($self, /, sep=None, maxsplit=-1)\n"
"--\n"
"\n"
-"Return a list of the words in the string, using sep as the delimiter string.\n"
+"Return a list of the substrings in the string, using sep as the separator string.\n"
"\n"
" sep\n"
-" The delimiter according which to split the string.\n"
-" None (the default value) means split according to any whitespace,\n"
-" and discard empty strings from the result.\n"
+" The separator used to split the string.\n"
+"\n"
+" When set to None (the default value), will split on any whitespace\n"
+" character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard\n"
+" empty strings from the result.\n"
" maxsplit\n"
-" Maximum number of splits to do.\n"
-" -1 (the default value) means no limit.");
+" Maximum number of splits (starting from the left).\n"
+" -1 (the default value) means no limit.\n"
+"\n"
+"Note, str.split() is mainly useful for data that has been intentionally\n"
+"delimited. With natural text that includes punctuation, consider using\n"
+"the regular expression module.");
#define UNICODE_SPLIT_METHODDEF \
{"split", (PyCFunction)(void(*)(void))unicode_split, METH_FASTCALL|METH_KEYWORDS, unicode_split__doc__},
"rsplit($self, /, sep=None, maxsplit=-1)\n"
"--\n"
"\n"
-"Return a list of the words in the string, using sep as the delimiter string.\n"
+"Return a list of the substrings in the string, using sep as the separator string.\n"
"\n"
" sep\n"
-" The delimiter according which to split the string.\n"
-" None (the default value) means split according to any whitespace,\n"
-" and discard empty strings from the result.\n"
+" The separator used to split the string.\n"
+"\n"
+" When set to None (the default value), will split on any whitespace\n"
+" character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard\n"
+" empty strings from the result.\n"
" maxsplit\n"
-" Maximum number of splits to do.\n"
+" Maximum number of splits (starting from the left).\n"
" -1 (the default value) means no limit.\n"
"\n"
-"Splits are done starting at the end of the string and working to the front.");
+"Splitting starts at the end of the string and works to the front.");
#define UNICODE_RSPLIT_METHODDEF \
{"rsplit", (PyCFunction)(void(*)(void))unicode_rsplit, METH_FASTCALL|METH_KEYWORDS, unicode_rsplit__doc__},
exit:
return return_value;
}
-/*[clinic end generated code: output=f10cf85d3935b3b7 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=c494bed46209961d input=a9049054013a1b77]*/
winerrcode = PyLong_AsLong(*winerror);
if (winerrcode == -1 && PyErr_Occurred())
return -1;
- /* Set errno to the corresponding POSIX errno (overriding
- first argument). Windows Socket error codes (>= 10000)
- have the same value as their POSIX counterparts.
- */
- if (winerrcode < 10000)
- errcode = winerror_to_errno(winerrcode);
- else
- errcode = winerrcode;
+ errcode = winerror_to_errno(winerrcode);
*myerrno = PyLong_FromLong(errcode);
if (!*myerrno)
return -1;
{"min_exp", "DBL_MIN_EXP -- minimum int e such that radix**(e-1) "
"is a normalized float"},
{"min_10_exp", "DBL_MIN_10_EXP -- minimum int e such that 10**e is "
- "a normalized"},
+ "a normalized float"},
{"dig", "DBL_DIG -- maximum number of decimal digits that "
"can be faithfully represented in a float"},
{"mant_dig", "DBL_MANT_DIG -- mantissa digits"},
break;
case GET_ITER:
case GET_AITER:
- block_stack = push_block(block_stack, Loop);
+ // For-loops get a Loop block, but comprehensions do not.
+ if (_Py_OPCODE(code[i + 1]) != CALL_FUNCTION) {
+ block_stack = push_block(block_stack, Loop);
+ }
blocks[i+1] = block_stack;
break;
case FOR_ITER:
return newargs;
}
+PyDoc_STRVAR(genericalias__doc__,
+"Represent a PEP 585 generic type\n"
+"\n"
+"E.g. for t = list[int], t.__origin__ is list and t.__args__ is (int,).");
+
static PyObject *
ga_getitem(PyObject *self, PyObject *item)
{
// TODO:
// - argument clinic?
-// - __doc__?
// - cache?
PyTypeObject Py_GenericAliasType = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
.tp_name = "types.GenericAlias",
- .tp_doc = "Represent a PEP 585 generic type\n"
- "\n"
- "E.g. for t = list[int], t.__origin__ is list and t.__args__ is (int,).",
+ .tp_doc = genericalias__doc__,
.tp_basicsize = sizeof(gaobject),
.tp_dealloc = ga_dealloc,
.tp_repr = ga_repr,
PyDoc_STRVAR(throw_doc,
-"throw(typ[,val[,tb]]) -> raise exception in generator,\n\
-return next yielded value or raise StopIteration.");
+"throw(value)\n\
+throw(type[,value[,tb]])\n\
+\n\
+Raise exception in generator, return next yielded value or raise\n\
+StopIteration.");
static PyObject *
_gen_throw(PyGenObject *gen, int close_on_genexit,
return next iterated value or raise StopIteration.");
PyDoc_STRVAR(coro_throw_doc,
-"throw(typ[,val[,tb]]) -> raise exception in coroutine,\n\
-return next iterated value or raise StopIteration.");
+"throw(value)\n\
+throw(type[,value[,traceback]])\n\
+\n\
+Raise exception in coroutine, return next iterated value or raise\n\
+StopIteration.");
PyDoc_STRVAR(coro_close_doc,
"close() -> raise GeneratorExit inside coroutine.");
PyObject *it; /* iter(v) */
Py_ssize_t m; /* size of self */
Py_ssize_t n; /* guess for size of iterable */
- Py_ssize_t mn; /* m + n */
Py_ssize_t i;
PyObject *(*iternext)(PyObject *);
/* It should not be possible to allocate a list large enough to cause
an overflow on any relevant platform */
assert(m < PY_SSIZE_T_MAX - n);
- if (list_resize(self, m + n) < 0) {
+ if (self->ob_item == NULL) {
+ if (list_preallocate_exact(self, n) < 0) {
+ return NULL;
+ }
+ Py_SET_SIZE(self, n);
+ }
+ else if (list_resize(self, m + n) < 0) {
Py_DECREF(iterable);
return NULL;
}
* eventually run out of memory during the loop.
*/
}
+ else if (self->ob_item == NULL) {
+ if (n && list_preallocate_exact(self, n) < 0)
+ goto error;
+ }
else {
- mn = m + n;
/* Make room. */
- if (list_resize(self, mn) < 0)
+ if (list_resize(self, m + n) < 0)
goto error;
/* Make the list sane again. */
Py_SET_SIZE(self, m);
merge_freemem(MergeState *ms)
{
assert(ms != NULL);
- if (ms->a.keys != ms->temparray)
+ if (ms->a.keys != ms->temparray) {
PyMem_Free(ms->a.keys);
+ ms->a.keys = NULL;
+ }
}
/* Ensure enough temp memory for 'need' array slots is available.
(void)_list_clear(self);
}
if (iterable != NULL) {
- if (_PyObject_HasLen(iterable)) {
- Py_ssize_t iter_len = PyObject_Size(iterable);
- if (iter_len == -1) {
- if (!PyErr_ExceptionMatches(PyExc_TypeError)) {
- return -1;
- }
- PyErr_Clear();
- }
- if (iter_len > 0 && self->ob_item == NULL
- && list_preallocate_exact(self, iter_len)) {
- return -1;
- }
- }
PyObject *rv = list_extend(self, iterable);
if (rv == NULL)
return -1;
PyTypeObject PyPickleBuffer_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
.tp_name = "pickle.PickleBuffer",
- .tp_doc = "Wrapper for potentially out-of-band buffers",
+ .tp_doc = PyDoc_STR("Wrapper for potentially out-of-band buffers"),
.tp_basicsize = sizeof(PyPickleBufferObject),
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
.tp_new = picklebuf_new,
if (!rep)
goto error;
- /* subtract preallocated bytes */
- writer->min_size -= max_char_size * (newpos - startpos);
+ if (newpos < startpos) {
+ writer->overallocate = 1;
+ p = _PyBytesWriter_Prepare(writer, p,
+ max_char_size * (startpos - newpos));
+ if (p == NULL)
+ goto error;
+ }
+ else {
+ /* subtract preallocated bytes */
+ writer->min_size -= max_char_size * (newpos - startpos);
+ /* Only overallocate the buffer if it's not the last write */
+ writer->overallocate = (newpos < size);
+ }
if (PyBytes_Check(rep)) {
p = _PyBytesWriter_WriteBytes(writer, p,
/* fast search/count implementation, based on a mix between boyer-
moore and horspool, with a few more bells and whistles on the top.
- for some more background, see: http://effbot.org/zone/stringlib.htm */
+ for some more background, see:
+ https://web.archive.org/web/20201107074620/http://effbot.org/zone/stringlib.htm */
/* note: fastsearch may access s[n], which isn't a problem when using
Python's ordinary string types, but may cause problems if you're
Py_ssize_t i, n;
int custom = !Py_IS_TYPE(type, &PyType_Type);
int unbound;
- PyObject *mro_meth = NULL;
- PyObject *type_mro_meth = NULL;
if (custom) {
+ PyObject *mro_meth, *type_mro_meth;
mro_meth = lookup_maybe_method(
(PyObject *)type, &PyId_mro, &unbound);
- if (mro_meth == NULL)
+ if (mro_meth == NULL) {
goto clear;
+ }
type_mro_meth = lookup_maybe_method(
(PyObject *)&PyType_Type, &PyId_mro, &unbound);
- if (type_mro_meth == NULL)
+ if (type_mro_meth == NULL) {
+ Py_DECREF(mro_meth);
goto clear;
- if (mro_meth != type_mro_meth)
+ }
+ int custom_mro = (mro_meth != type_mro_meth);
+ Py_DECREF(mro_meth);
+ Py_DECREF(type_mro_meth);
+ if (custom_mro) {
goto clear;
- Py_XDECREF(mro_meth);
- Py_XDECREF(type_mro_meth);
+ }
}
n = PyTuple_GET_SIZE(bases);
for (i = 0; i < n; i++) {
}
return;
clear:
- Py_XDECREF(mro_meth);
- Py_XDECREF(type_mro_meth);
type->tp_flags &= ~Py_TPFLAGS_VALID_VERSION_TAG;
type->tp_version_tag = 0; /* 0 is not a valid version tag */
}
/* Note: size will always be longer than the resulting Unicode
character count */
- if (PY_SSIZE_T_MAX / (Py_ssize_t)sizeof(wchar_t) < (size + 1)) {
+ if (PY_SSIZE_T_MAX / (Py_ssize_t)sizeof(wchar_t) - 1 < size) {
return -1;
}
pos = 0;
while (pos < len) {
- Py_ssize_t repsize, moreunits;
+ Py_ssize_t newpos, repsize, moreunits;
if (kind == PyUnicode_2BYTE_KIND) {
pos += ucs2lib_utf32_encode((const Py_UCS2 *)data + pos, len - pos,
rep = unicode_encode_call_errorhandler(
errors, &errorHandler,
encoding, "surrogates not allowed",
- str, &exc, pos, pos + 1, &pos);
+ str, &exc, pos, pos + 1, &newpos);
if (!rep)
goto error;
repsize = PyBytes_GET_SIZE(rep);
if (repsize & 3) {
raise_encode_exception(&exc, encoding,
- str, pos - 1, pos,
+ str, pos, pos + 1,
"surrogates not allowed");
goto error;
}
moreunits = repsize = PyUnicode_GET_LENGTH(rep);
if (!PyUnicode_IS_ASCII(rep)) {
raise_encode_exception(&exc, encoding,
- str, pos - 1, pos,
+ str, pos, pos + 1,
"surrogates not allowed");
goto error;
}
}
+ moreunits += pos - newpos;
+ pos = newpos;
/* four bytes are reserved for each surrogate */
- if (moreunits > 1) {
+ if (moreunits > 0) {
Py_ssize_t outpos = out - (uint32_t*) PyBytes_AS_STRING(v);
if (moreunits >= (PY_SSIZE_T_MAX - PyBytes_GET_SIZE(v)) / 4) {
/* integer overflow */
PyErr_NoMemory();
goto error;
}
- if (_PyBytes_Resize(&v, PyBytes_GET_SIZE(v) + 4 * (moreunits - 1)) < 0)
+ if (_PyBytes_Resize(&v, PyBytes_GET_SIZE(v) + 4 * moreunits) < 0)
goto error;
out = (uint32_t*) PyBytes_AS_STRING(v) + outpos;
}
if (PyBytes_Check(rep)) {
memcpy(out, PyBytes_AS_STRING(rep), repsize);
- out += moreunits;
+ out += repsize / 4;
} else /* rep is unicode */ {
assert(PyUnicode_KIND(rep) == PyUnicode_1BYTE_KIND);
ucs1lib_utf32_encode(PyUnicode_1BYTE_DATA(rep), repsize,
pos = 0;
while (pos < len) {
- Py_ssize_t repsize, moreunits;
+ Py_ssize_t newpos, repsize, moreunits;
if (kind == PyUnicode_2BYTE_KIND) {
pos += ucs2lib_utf16_encode((const Py_UCS2 *)data + pos, len - pos,
rep = unicode_encode_call_errorhandler(
errors, &errorHandler,
encoding, "surrogates not allowed",
- str, &exc, pos, pos + 1, &pos);
+ str, &exc, pos, pos + 1, &newpos);
if (!rep)
goto error;
repsize = PyBytes_GET_SIZE(rep);
if (repsize & 1) {
raise_encode_exception(&exc, encoding,
- str, pos - 1, pos,
+ str, pos, pos + 1,
"surrogates not allowed");
goto error;
}
moreunits = repsize = PyUnicode_GET_LENGTH(rep);
if (!PyUnicode_IS_ASCII(rep)) {
raise_encode_exception(&exc, encoding,
- str, pos - 1, pos,
+ str, pos, pos + 1,
"surrogates not allowed");
goto error;
}
}
+ moreunits += pos - newpos;
+ pos = newpos;
/* two bytes are reserved for each surrogate */
- if (moreunits > 1) {
+ if (moreunits > 0) {
Py_ssize_t outpos = out - (unsigned short*) PyBytes_AS_STRING(v);
if (moreunits >= (PY_SSIZE_T_MAX - PyBytes_GET_SIZE(v)) / 2) {
/* integer overflow */
PyErr_NoMemory();
goto error;
}
- if (_PyBytes_Resize(&v, PyBytes_GET_SIZE(v) + 2 * (moreunits - 1)) < 0)
+ if (_PyBytes_Resize(&v, PyBytes_GET_SIZE(v) + 2 * moreunits) < 0)
goto error;
out = (unsigned short*) PyBytes_AS_STRING(v) + outpos;
}
if (PyBytes_Check(rep)) {
memcpy(out, PyBytes_AS_STRING(rep), repsize);
- out += moreunits;
+ out += repsize / 2;
} else /* rep is unicode */ {
assert(PyUnicode_KIND(rep) == PyUnicode_1BYTE_KIND);
ucs1lib_utf16_encode(PyUnicode_1BYTE_DATA(rep), repsize,
if (rep == NULL)
goto onError;
- /* subtract preallocated bytes */
- writer.min_size -= newpos - collstart;
+ if (newpos < collstart) {
+ writer.overallocate = 1;
+ str = _PyBytesWriter_Prepare(&writer, str,
+ collstart - newpos);
+ if (str == NULL)
+ goto onError;
+ }
+ else {
+ /* subtract preallocated bytes */
+ writer.min_size -= newpos - collstart;
+ /* Only overallocate the buffer if it's not the last write */
+ writer.overallocate = (newpos < size);
+ }
if (PyBytes_Check(rep)) {
/* Directly copy bytes result to output. */
pos, pos + 1, &newpos);
if (rep == NULL)
goto error;
- pos = newpos;
+ Py_ssize_t morebytes = pos - newpos;
if (PyBytes_Check(rep)) {
outsize = PyBytes_GET_SIZE(rep);
- if (outsize != 1) {
+ morebytes += outsize;
+ if (morebytes > 0) {
Py_ssize_t offset = out - PyBytes_AS_STRING(*outbytes);
- newoutsize = PyBytes_GET_SIZE(*outbytes) + (outsize - 1);
+ newoutsize = PyBytes_GET_SIZE(*outbytes) + morebytes;
if (_PyBytes_Resize(outbytes, newoutsize) < 0) {
Py_DECREF(rep);
goto error;
}
outsize = PyUnicode_GET_LENGTH(rep);
- if (outsize != 1) {
+ morebytes += outsize;
+ if (morebytes > 0) {
Py_ssize_t offset = out - PyBytes_AS_STRING(*outbytes);
- newoutsize = PyBytes_GET_SIZE(*outbytes) + (outsize - 1);
+ newoutsize = PyBytes_GET_SIZE(*outbytes) + morebytes;
if (_PyBytes_Resize(outbytes, newoutsize) < 0) {
Py_DECREF(rep);
goto error;
out++;
}
}
+ pos = newpos;
Py_DECREF(rep);
}
/* write a NUL byte */
str.split as unicode_split
sep: object = None
- The delimiter according which to split the string.
- None (the default value) means split according to any whitespace,
- and discard empty strings from the result.
+ The separator used to split the string.
+
+ When set to None (the default value), will split on any whitespace
+ character (including \\n \\r \\t \\f and spaces) and will discard
+ empty strings from the result.
maxsplit: Py_ssize_t = -1
- Maximum number of splits to do.
+ Maximum number of splits (starting from the left).
-1 (the default value) means no limit.
-Return a list of the words in the string, using sep as the delimiter string.
+Return a list of the substrings in the string, using sep as the separator string.
+
+Note, str.split() is mainly useful for data that has been intentionally
+delimited. With natural text that includes punctuation, consider using
+the regular expression module.
+
[clinic start generated code]*/
static PyObject *
unicode_split_impl(PyObject *self, PyObject *sep, Py_ssize_t maxsplit)
-/*[clinic end generated code: output=3a65b1db356948dc input=606e750488a82359]*/
+/*[clinic end generated code: output=3a65b1db356948dc input=906d953b44efc43b]*/
{
if (sep == Py_None)
return split(self, NULL, maxsplit);
/*[clinic input]
str.rsplit as unicode_rsplit = str.split
-Return a list of the words in the string, using sep as the delimiter string.
+Return a list of the substrings in the string, using sep as the separator string.
-Splits are done starting at the end of the string and working to the front.
+Splitting starts at the end of the string and works to the front.
[clinic start generated code]*/
static PyObject *
unicode_rsplit_impl(PyObject *self, PyObject *sep, Py_ssize_t maxsplit)
-/*[clinic end generated code: output=c2b815c63bcabffc input=12ad4bf57dd35f15]*/
+/*[clinic end generated code: output=c2b815c63bcabffc input=ea78406060fce33c]*/
{
if (sep == Py_None)
return rsplit(self, NULL, maxsplit);
if (_Py_IsMainInterpreter(interp)) {
// _PyUnicode_ClearInterned() must be called before _PyUnicode_Fini()
assert(interned == NULL);
+ // bpo-47182: force a unicodedata CAPI capsule re-import on
+ // subsequent initialization of main interpreter.
+ ucnhash_capi = NULL;
}
_PyUnicode_FiniEncodings(&state->fs_codec);
PyTypeObject _PyUnion_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
.tp_name = "types.UnionType",
- .tp_doc = "Represent a PEP 604 union type\n"
+ .tp_doc = PyDoc_STR("Represent a PEP 604 union type\n"
"\n"
- "E.g. for int | str",
+ "E.g. for int | str"),
.tp_basicsize = sizeof(unionobject),
.tp_dealloc = unionobject_dealloc,
.tp_alloc = PyType_GenericAlloc,
\r
1) The script location; the current directory without script.\r
2) The PYTHONPATH variable, if set.\r
- 3) For Win32 platforms (NT/95), paths specified in the Registry.\r
+ 3) Paths specified in the Registry.\r
4) Default directories lib, lib/win, lib/test, lib/tkinter;\r
these are searched relative to the environment variable\r
PYTHONHOME, if set, or relative to the executable and its\r
or the current directory (not useful).\r
5) The directory containing the executable.\r
\r
-The best installation strategy is to put the Python executable (and\r
-DLL, for Win32 platforms) in some convenient directory such as\r
+The best installation strategy is to put the Python executable and\r
+DLL in some convenient directory such as\r
C:/python, and copy all library files and subdirectories (using XCOPY)\r
to C:/python/lib. Then you don't need to set PYTHONPATH. Otherwise,\r
set the environment variable PYTHONPATH to your Python search path.\r
if NOT "%IncludeTkinterSrc%"=="false" set libraries=%libraries% tcl-core-8.6.12.0\r
if NOT "%IncludeTkinterSrc%"=="false" set libraries=%libraries% tk-8.6.12.0\r
if NOT "%IncludeTkinterSrc%"=="false" set libraries=%libraries% tix-8.4.3.6\r
-set libraries=%libraries% xz-5.2.2\r
-set libraries=%libraries% zlib-1.2.11\r
+set libraries=%libraries% xz-5.2.5\r
+set libraries=%libraries% zlib-1.2.12\r
\r
for %%e in (%libraries%) do (\r
if exist "%EXTERNALS_DIR%\%%e" (\r
<ItemDefinitionGroup>\r
<ClCompile>\r
<PreprocessorDefinitions>WIN32;HAVE_CONFIG_H;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
- <AdditionalIncludeDirectories>$(lzmaDir)windows;$(lzmaDir)src/liblzma/common;$(lzmaDir)src/common;$(lzmaDir)src/liblzma/api;$(lzmaDir)src/liblzma/check;$(lzmaDir)src/liblzma/delta;$(lzmaDir)src/liblzma/lz;$(lzmaDir)src/liblzma/lzma;$(lzmaDir)src/liblzma/rangecoder;$(lzmaDir)src/liblzma/simple;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <AdditionalIncludeDirectories>$(lzmaDir)windows/vs2019;$(lzmaDir)src/liblzma/common;$(lzmaDir)src/common;$(lzmaDir)src/liblzma/api;$(lzmaDir)src/liblzma/check;$(lzmaDir)src/liblzma/delta;$(lzmaDir)src/liblzma/lz;$(lzmaDir)src/liblzma/lzma;$(lzmaDir)src/liblzma/rangecoder;$(lzmaDir)src/liblzma/simple;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<DisableSpecificWarnings>4028;4113;4133;4244;4267;4996;%(DisableSpecificWarnings)</DisableSpecificWarnings>\r
</ClCompile>\r
</ItemDefinitionGroup>\r
<ClInclude Include="$(lzmaDir)src\liblzma\simple\simple_decoder.h" />\r
<ClInclude Include="$(lzmaDir)src\liblzma\simple\simple_encoder.h" />\r
<ClInclude Include="$(lzmaDir)src\liblzma\simple\simple_private.h" />\r
- <ClInclude Include="$(lzmaDir)windows\config.h" />\r
+ <ClInclude Include="$(lzmaDir)windows\vs2019\config.h" />\r
</ItemGroup>\r
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
<ImportGroup Label="ExtensionTargets">\r
<ClInclude Include="$(lzmaDir)src\liblzma\simple\simple_private.h">\r
<Filter>Header Files</Filter>\r
</ClInclude>\r
- <ClInclude Include="$(lzmaDir)windows\config.h">\r
+ <ClInclude Include="$(lzmaDir)windows\vs2019\config.h">\r
<Filter>Header Files</Filter>\r
</ClInclude>\r
</ItemGroup>\r
<SupportSigning Condition="'$(SupportSigning)' == ''">true</SupportSigning>\r
<SupportSigning Condition="'$(Configuration)' == 'Debug'">false</SupportSigning>\r
<SupportSigning Condition="'$(ConfigurationType)' == 'StaticLibrary'">false</SupportSigning>\r
+ <LinkIncremental Condition="$(Configuration) != 'Debug'">false</LinkIncremental>\r
</PropertyGroup>\r
\r
<PropertyGroup>\r
<ExternalsDir Condition="!HasTrailingSlash($(ExternalsDir))">$(ExternalsDir)\</ExternalsDir>\r
<sqlite3Dir>$(ExternalsDir)sqlite-3.37.2.0\</sqlite3Dir>\r
<bz2Dir>$(ExternalsDir)bzip2-1.0.8\</bz2Dir>\r
- <lzmaDir>$(ExternalsDir)xz-5.2.2\</lzmaDir>\r
+ <lzmaDir>$(ExternalsDir)xz-5.2.5\</lzmaDir>\r
<libffiDir>$(ExternalsDir)libffi-3.3.0\</libffiDir>\r
<libffiOutDir>$(ExternalsDir)libffi-3.3.0\$(ArchName)\</libffiOutDir>\r
<libffiIncludeDir>$(libffiOutDir)include</libffiIncludeDir>\r
<opensslOutDir>$(ExternalsDir)openssl-bin-1.1.1n\$(ArchName)\</opensslOutDir>\r
<opensslIncludeDir>$(opensslOutDir)include</opensslIncludeDir>\r
<nasmDir>$(ExternalsDir)\nasm-2.11.06\</nasmDir>\r
- <zlibDir>$(ExternalsDir)\zlib-1.2.11\</zlibDir>\r
+ <zlibDir>$(ExternalsDir)\zlib-1.2.12\</zlibDir>\r
\r
<!-- Suffix for all binaries when building for debug -->\r
<PyDebugExt Condition="'$(PyDebugExt)' == '' and $(Configuration) == 'Debug'">_d</PyDebugExt>\r
</Target>\r
<Target Name="GeneratePythonBat" AfterTargets="AfterBuild">\r
<PropertyGroup>\r
- <_PGOPath Condition="$(Configuration) == 'PGInstrument' and $(Platform) == 'Win32'">@set PATH=%PATH%%3B$(VCInstallDir)bin</_PGOPath>\r
- <_PGOPath Condition="$(Configuration) == 'PGInstrument' and $(Platform) == 'x64'">@set PATH=%PATH%%3B$(VCInstallDir)bin\amd64</_PGOPath>\r
- <_PGOPath Condition="$(Configuration) == 'PGInstrument' and $(VC_PGO_RunTime_Dir) != ''">@set PATH=%PATH%%3B$(VC_PGO_RunTime_Dir)</_PGOPath>\r
<_Content>@rem This script invokes the most recently built Python with all arguments\r
@rem passed through to the interpreter. This file is generated by the\r
@rem build process and any changes *will* be thrown away by the next\r
@echo Running $(Configuration)^|$(Platform) interpreter...\r
@setlocal\r
@set PYTHONHOME=$(PySourcePath)\r
-$(_PGOPath)\r
@"$(OutDir)python$(PyDebugExt).exe" %*\r
</_Content>\r
<_ExistingContent Condition="Exists('$(PySourcePath)python.bat')">$([System.IO.File]::ReadAllText('$(PySourcePath)python.bat'))</_ExistingContent>\r
</PropertyGroup>\r
<WriteLinesToFile File="$(PySourcePath)python.bat" Lines="$(_Content)" Overwrite="true" Condition="'$(_Content)' != '$(_ExistingContent)'" />\r
</Target>\r
+ <Target Name="CopyPGORT" AfterTargets="Link" Condition="$(Configuration) == 'PGInstrument'">\r
+ <ItemGroup>\r
+ <_PGORT Include="$(VCToolsInstallDir)bin\Hostx86\x86\pgort140.dll" Condition="$(Platform) == 'Win32'" />\r
+ <_PGORT Include="$(VCToolsInstallDir)bin\Hostx64\x64\pgort140.dll" Condition="$(Platform) == 'x64'" />\r
+ <_PGORT Include="$(VCToolsInstallDir)bin\arm64\pgort140.dll" Condition="$(Platform) == 'ARM64'" />\r
+ </ItemGroup>\r
+ <Warning Text="Unable to locate pgort140.dll for $(Platform)." Condition="@(_PGORT) == '' or !Exists(@(_PGORT))" />\r
+ <Copy SourceFiles="@(_PGORT)" DestinationFolder="$(OutDir)">\r
+ <Output TaskParameter="CopiedFiles" ItemName="FileWrites" />\r
+ </Copy>\r
+ </Target>\r
</Project>\r
<ClCompile Include="$(zlibDir)\adler32.c" />\r
<ClCompile Include="$(zlibDir)\compress.c" />\r
<ClCompile Include="$(zlibDir)\crc32.c" />\r
- <ClCompile Include="$(zlibDir)\deflate.c" />\r
+ <ClCompile Include="$(zlibDir)\deflate.c">\r
+ <DisableSpecificWarnings>4244</DisableSpecificWarnings>\r
+ </ClCompile>\r
<ClCompile Include="$(zlibDir)\infback.c" />\r
<ClCompile Include="$(zlibDir)\inffast.c" />\r
<ClCompile Include="$(zlibDir)\inflate.c" />\r
Building Python using Microsoft Visual C++\r
------------------------------------------\r
\r
-This directory is used to build CPython for Microsoft Windows NT version\r
-6.0 or higher (Windows Vista, Windows Server 2008, or later) on 32 and 64\r
+This directory is used to build CPython for Microsoft Windows on 32- and 64-\r
bit platforms. Using this directory requires an installation of\r
-Microsoft Visual Studio 2017 (MSVC 14.1) with the *Python workload* and\r
-its optional *Python native development* component selected. (For\r
-command-line builds, Visual Studio 2015 may also be used.)\r
+Microsoft Visual Studio (MSVC) with the *Python workload* and\r
+its optional *Python native development* component selected.\r
\r
Building from the command line is recommended in order to obtain any\r
external dependencies. To build, simply run the "build.bat" script without\r
Prompt window\r
pylauncher\r
py.exe, the Python Launcher for Windows, see\r
- http://docs.python.org/3/using/windows.html#launcher\r
+ https://docs.python.org/3/using/windows.html#launcher\r
pywlauncher\r
pyw.exe, a variant of py.exe that doesn't open a Command Prompt\r
window\r
_lzma\r
Python wrapper for version 5.2.2 of the liblzma compression library\r
Homepage:\r
- http://tukaani.org/xz/\r
+ https://tukaani.org/xz/\r
_ssl\r
Python wrapper for version 1.1.1k of the OpenSSL secure sockets\r
library, which is downloaded from our binaries repository at\r
https://github.com/python/cpython-bin-deps.\r
\r
Homepage:\r
- http://www.openssl.org/\r
+ https://www.openssl.org/\r
\r
Building OpenSSL requires Perl on your path, and can be performed by\r
running PCbuild\prepare_ssl.bat. This will retrieve the version of\r
_sqlite3\r
Wraps SQLite 3.37.2, which is itself built by sqlite3.vcxproj\r
Homepage:\r
- http://www.sqlite.org/\r
+ https://www.sqlite.org/\r
_tkinter\r
Wraps version 8.6.6 of the Tk windowing system, which is downloaded\r
from our binaries repository at\r
https://github.com/python/cpython-bin-deps.\r
\r
Homepage:\r
- http://www.tcl.tk/\r
+ https://www.tcl.tk/\r
\r
Building Tcl and Tk can be performed by running\r
PCbuild\prepare_tcltk.bat. This will retrieve the version of the\r
PGI python, and finally creates the optimized files.\r
\r
See\r
- http://msdn.microsoft.com/en-us/library/e7k32f4k(VS.140).aspx\r
+ https://docs.microsoft.com/en-us/cpp/build/profile-guided-optimizations\r
for more on this topic.\r
\r
\r
)
{
D(fprintf(stderr, "%*c+ invalid_arguments[%d-%d]: %s succeeded!\n", p->level, ' ', _mark, p->mark, "expression for_if_clauses ',' [args | expression for_if_clauses]"));
- _res = RAISE_SYNTAX_ERROR_KNOWN_RANGE ( a , PyPegen_last_item ( b , comprehension_ty ) -> target , "Generator expression must be parenthesized" );
+ _res = RAISE_SYNTAX_ERROR_KNOWN_RANGE ( a , _PyPegen_get_last_comprehension_item ( PyPegen_last_item ( b , comprehension_ty ) ) , "Generator expression must be parenthesized" );
if (_res == NULL && PyErr_Occurred()) {
p->error_indicator = 1;
p->level--;
)
{
D(fprintf(stderr, "%*c+ invalid_arguments[%d-%d]: %s succeeded!\n", p->level, ' ', _mark, p->mark, "args ',' expression for_if_clauses"));
- _res = RAISE_SYNTAX_ERROR_KNOWN_RANGE ( a , asdl_seq_GET ( b , b -> size - 1 ) -> target , "Generator expression must be parenthesized" );
+ _res = RAISE_SYNTAX_ERROR_KNOWN_RANGE ( a , _PyPegen_get_last_comprehension_item ( PyPegen_last_item ( b , comprehension_ty ) ) , "Generator expression must be parenthesized" );
if (_res == NULL && PyErr_Occurred()) {
p->error_indicator = 1;
p->level--;
)
{
D(fprintf(stderr, "%*c+ invalid_except_stmt_indent[%d-%d]: %s succeeded!\n", p->level, ' ', _mark, p->mark, "'except' ':' NEWLINE !INDENT"));
- _res = RAISE_SYNTAX_ERROR ( "expected an indented block after except statement on line %d" , a -> lineno );
+ _res = RAISE_INDENTATION_ERROR ( "expected an indented block after 'except' statement on line %d" , a -> lineno );
if (_res == NULL && PyErr_Occurred()) {
p->error_indicator = 1;
p->level--;
assert((p->tok->fp == NULL && p->tok->str != NULL) || p->tok->fp == stdin);
char *cur_line = p->tok->fp_interactive ? p->tok->interactive_src_start : p->tok->str;
- assert(cur_line != NULL);
+ if (cur_line == NULL) {
+ assert(p->tok->fp_interactive);
+ // We can reach this point if the tokenizer buffers for interactive source have not been
+ // initialized because we failed to decode the original source with the given locale.
+ return PyUnicode_FromStringAndSize("", 0);
+ }
const char* buf_end = p->tok->fp_interactive ? p->tok->interactive_src_end : p->tok->inp;
Py_ssize_t relative_lineno = p->starting_lineno ? lineno - p->starting_lineno + 1 : lineno;
goto error;
}
- if (p->tok->fp_interactive) {
+ if (p->tok->fp_interactive && p->tok->interactive_src_start != NULL) {
error_line = get_error_line(p, lineno);
}
else if (p->start_rule == Py_file_input) {
}
-static inline expr_ty
+expr_ty
_PyPegen_get_last_comprehension_item(comprehension_ty comprehension) {
if (comprehension->ifs == NULL || asdl_seq_LEN(comprehension->ifs) == 0) {
return comprehension->iter;
}
void *_PyPegen_arguments_parsing_error(Parser *, expr_ty);
+expr_ty _PyPegen_get_last_comprehension_item(comprehension_ty comprehension);
void *_PyPegen_nonparen_genexp_in_call(Parser *p, expr_ty args, asdl_comprehension_seq *comprehensions);
while (Py_ISSPACE(**str)) {
*str += 1;
}
-
+ if (*str >= end) {
+ goto unexpected_end_of_string;
+ }
/* Set *expr_text to the text of the expression. */
*expr_text = PyUnicode_FromStringAndSize(expr_start, *str-expr_start);
if (!*expr_text) {
}
printf("\n");
PyErr_Restore(type, value, traceback);
+ // gh-91924: PyObject_Print() can indirectly set lltrace to 0
+ lltrace = 1;
return 1;
}
#endif
/* Success block for __anext__ */
VISIT(c, expr, s->v.AsyncFor.target);
VISIT_SEQ(c, stmt, s->v.AsyncFor.body);
+ /* Mark jump as artificial */
+ c->u->u_lineno = -1;
ADDOP_JUMP(c, JUMP_ABSOLUTE, start);
compiler_pop_fblock(c, FOR_LOOP, start);
first = Py_MAX(first, 0);
_Py_BEGIN_SUPPRESS_IPH
#ifdef HAVE_CLOSE_RANGE
- if (close_range(first, last, 0) == 0 || errno != ENOSYS) {
- /* Any errors encountered while closing file descriptors are ignored;
- * ENOSYS means no kernel support, though,
- * so we'll fallback to the other methods. */
+ if (close_range(first, last, 0) == 0) {
+ /* close_range() ignores errors when it closes file descriptors.
+ * Possible reasons of an error return are lack of kernel support
+ * or denial of the underlying syscall by a seccomp sandbox on Linux.
+ * Fallback to other methods in case of any error. */
}
else
#endif /* HAVE_CLOSE_RANGE */
return -1;
}
- /* While it's suboptimal to reduce Python's 64 bit hash to
+ /* While it's somewhat suboptimal to reduce Python's 64 bit hash to
32 bits via XOR, it seems that the resulting hash function
is good enough (this is also how Long type is hashed in Java.)
Storing 10, 100, 1000 Python strings results in a relatively
shallow and uniform tree structure.
- Please don't change this hashing algorithm, as there are many
- tests that test some exact tree shape to cover all code paths.
+ Also it's worth noting that it would be possible to adapt the tree
+ structure to 64 bit hashes, but that would increase memory pressure
+ and provide little to no performance benefits for collections with
+ fewer than billions of key/value pairs.
+
+ Important: do not change this hash reducing function. There are many
+ tests that need an exact tree shape to cover all code paths and
+ we do that by specifying concrete values for test data's `__hash__`.
+ If this function is changed most of the regression tests would
+ become useless.
*/
int32_t xored = (int32_t)(hash & 0xffffffffl) ^ (int32_t)(hash >> 32);
return xored == -1 ? -2 : xored;
-d : turn on parser debugging output (for experts only, only works on\n\
debug builds); also PYTHONDEBUG=x\n\
-E : ignore PYTHON* environment variables (such as PYTHONPATH)\n\
--h : print this help message and exit (also --help)\n\
+-h : print this help message and exit (also -? or --help)\n\
";
static const char usage_2[] = "\
-i : inspect interactively after running script; forces a prompt even\n\
also PYTHONWARNINGS=arg\n\
-x : skip first line of source, allowing use of non-Unix forms of #!cmd\n\
-X opt : set implementation-specific option. The following options are available:\n\
-\n\
-X faulthandler: enable faulthandler\n\
-X showrefcount: output the total reference count and number of used\n\
memory blocks when the program finishes or after each statement in the\n\
checks which are too expensive to be enabled by default. Effect of the\n\
developer mode:\n\
* Add default warning filter, as -W default\n\
- * Install debug hooks on memory allocators: see the PyMem_SetupDebugHooks() C function\n\
+ * Install debug hooks on memory allocators: see the PyMem_SetupDebugHooks()\n\
+ C function\n\
* Enable the faulthandler module to dump the Python traceback on a crash\n\
* Enable asyncio debug mode\n\
* Set the dev_mode attribute of sys.flags to True\n\
-X pycache_prefix=PATH: enable writing .pyc files to a parallel tree rooted at the\n\
given directory instead of to the code tree\n\
-X warn_default_encoding: enable opt-in EncodingWarning for 'encoding=None'\n\
-\n\
--check-hash-based-pycs always|default|never:\n\
control how Python invalidates hash-based .pyc files\n\
";
-This is Python version 3.10.4
+This is Python version 3.10.5
=============================
.. image:: https://travis-ci.com/python/cpython.svg?branch=master
assert parameters
assert isinstance(parameters[0].converter, self_converter)
del parameters[0]
+ requires_defining_class = False
+ if parameters and isinstance(parameters[0].converter, defining_class_converter):
+ requires_defining_class = True
+ del parameters[0]
converters = [p.converter for p in parameters]
has_option_groups = parameters and (parameters[0].group or parameters[-1].group)
if not p.is_optional():
min_pos = i
- requires_defining_class = any(
- isinstance(p.converter, defining_class_converter)
- for p in parameters)
-
meth_o = (len(parameters) == 1 and
parameters[0].is_positional_only() and
not converters[0].is_optional() and
return linear_format(output(), parser_declarations=declarations)
if not parameters:
- # no parameters, METH_NOARGS
+ if not requires_defining_class:
+ # no parameters, METH_NOARGS
+ flags = "METH_NOARGS"
- flags = "METH_NOARGS"
+ parser_prototype = normalize_snippet("""
+ static PyObject *
+ {c_basename}({self_type}{self_name}, PyObject *Py_UNUSED(ignored))
+ """)
+ parser_code = []
- parser_prototype = normalize_snippet("""
- static PyObject *
- {c_basename}({self_type}{self_name}, PyObject *Py_UNUSED(ignored))
- """)
- parser_definition = parser_prototype
+ else:
+ assert not new_or_init
- if default_return_converter:
- parser_definition = parser_prototype + '\n' + normalize_snippet("""
- {{
- return {c_basename}_impl({impl_arguments});
+ flags = "METH_METHOD|METH_FASTCALL|METH_KEYWORDS"
+
+ parser_prototype = parser_prototype_def_class
+ return_error = ('return NULL;' if default_return_converter
+ else 'goto exit;')
+ parser_code = [normalize_snippet("""
+ if (nargs) {{
+ PyErr_SetString(PyExc_TypeError, "{name}() takes no arguments");
+ %s
}}
- """)
+ """ % return_error, indent=4)]
+
+ if default_return_converter:
+ parser_definition = '\n'.join([
+ parser_prototype,
+ '{{',
+ *parser_code,
+ ' return {c_basename}_impl({impl_arguments});',
+ '}}'])
else:
- parser_definition = parser_body(parser_prototype)
+ parser_definition = parser_body(parser_prototype, *parser_code)
elif meth_o:
flags = "METH_O"
add_label = None
for i, p in enumerate(parameters):
+ if isinstance(p.converter, defining_class_converter):
+ raise ValueError("defining_class should be the first "
+ "parameter (after self)")
displayname = p.get_displayname(i+1)
parsearg = p.converter.parse_arg(argname_fmt % i, displayname)
if parsearg is None:
def is_stop_line(line):
# make sure to recognize stop line even if it
# doesn't end with EOL (it could be the very end of the file)
- if not line.startswith(stop_line):
+ if line.startswith(stop_line):
+ remainder = line[len(stop_line):]
+ if remainder and not remainder.isspace():
+ fail(f"Garbage after stop line: {remainder!r}")
+ return True
+ else:
+ # gh-92256: don't allow incorrectly formatted stop lines
+ if line.lstrip().startswith(stop_line):
+ fail(f"Whitespace is not allowed before the stop line: {line!r}")
return False
- remainder = line[len(stop_line):]
- return (not remainder) or remainder.isspace()
# consume body of program
while self.input:
out.write('\\r')
# Map non-printable US ASCII to '\xhh' */
- elif ch < ' ' or ch == 0x7F:
+ elif ch < ' ' or ord(ch) == 0x7F:
out.write('\\x')
out.write(hexdigits[(ord(ch) >> 4) & 0x000F])
out.write(hexdigits[ord(ch) & 0x000F])