--- /dev/null
+# Current docs for the syntax of this file are at:
+# https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted.md
+
+name: $(BuildDefinitionName)_$(Date:yyyyMMdd)$(Rev:.rr)
+
+queue:
+ name: Hosted Linux Preview
+
+#variables:
+
+steps:
+- checkout: self
+ clean: true
+ fetchDepth: 5
+
+- script: sudo apt-get update && sudo apt-get install -qy --force-yes texlive-full
+ displayName: 'Install LaTeX'
+
+- task: UsePythonVersion@0
+ displayName: 'Use Python 3.6 or later'
+ inputs:
+ versionSpec: '>=3.6'
+
+- script: python -m pip install sphinx blurb python-docs-theme
+ displayName: 'Install build dependencies'
+
+- script: make dist PYTHON=python SPHINXBUILD='python -m sphinx' BLURB='python -m blurb'
+ workingDirectory: '$(build.sourcesDirectory)/Doc'
+ displayName: 'Build documentation'
+
+- task: PublishBuildArtifacts@1
+ displayName: 'Publish build'
+ inputs:
+ PathToPublish: '$(build.sourcesDirectory)/Doc/build'
+ ArtifactName: build
+ publishLocation: Container
+
+- task: PublishBuildArtifacts@1
+ displayName: 'Publish dist'
+ inputs:
+ PathToPublish: '$(build.sourcesDirectory)/Doc/dist'
+ ArtifactName: dist
+ publishLocation: Container
--- /dev/null
+# Current docs for the syntax of this file are at:
+# https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted.md
+
+name: $(BuildDefinitionName)_$(Date:yyyyMMdd)$(Rev:.rr)
+
+queue:
+ name: Hosted Linux Preview
+
+trigger:
+ branches:
+ include:
+ - master
+ - 3.7
+ - 3.6
+ paths:
+ include:
+ - Doc/*
+
+#variables:
+
+steps:
+- checkout: self
+ clean: true
+ fetchDepth: 5
+
+- task: UsePythonVersion@0
+ displayName: 'Use Python 3.6 or later'
+ inputs:
+ versionSpec: '>=3.6'
+
+- script: python -m pip install sphinx~=1.6.1 blurb python-docs-theme
+ displayName: 'Install build dependencies'
+
+- script: make check suspicious html PYTHON=python
+ workingDirectory: '$(build.sourcesDirectory)/Doc'
+ displayName: 'Build documentation'
+
+- task: PublishBuildArtifacts@1
+ displayName: 'Publish build'
+ condition: and(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))
+ inputs:
+ PathToPublish: '$(build.sourcesDirectory)/Doc/build'
+ ArtifactName: build
+ publishLocation: Container
--- /dev/null
+# Current docs for the syntax of this file are at:
+# https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted.md
+
+name: $(BuildDefinitionName)_$(Date:yyyyMMdd)$(Rev:.rr)
+
+queue:
+ name: Hosted Linux Preview
+
+trigger:
+ branches:
+ include:
+ - master
+ - 3.7
+ - 3.6
+ paths:
+ exclude:
+ - Doc/*
+ - Tools/*
+
+#variables:
+
+
+steps:
+- checkout: self
+ clean: true
+ fetchDepth: 5
+
+#- template: linux-deps.yml
+
+# See https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted-templates.md
+# For now, we copy/paste the steps
+- script: echo "deb-src http://archive.ubuntu.com/ubuntu/ xenial main" > /etc/apt/sources.list.d/python.list && sudo apt-get update
+ displayName: 'Update apt-get lists'
+
+- script: >
+ sudo apt-get -yq install
+ build-essential
+ zlib1g-dev
+ libbz2-dev
+ liblzma-dev
+ libncurses5-dev
+ libreadline6-dev
+ libsqlite3-dev
+ libssl-dev
+ libgdbm-dev
+ tk-dev
+ lzma
+ lzma-dev
+ liblzma-dev
+ libffi-dev
+ uuid-dev
+ displayName: 'Install dependencies'
+
+- script: ./configure --with-pydebug
+ displayName: 'Configure CPython (debug)'
+
+- script: make -s -j4
+ displayName: 'Build CPython'
+
+- script: make pythoninfo
+ displayName: 'Display build info'
+
+- script: make buildbottest TESTOPTS="-j4 -uall,-cpu"
+ displayName: 'Tests'
--- /dev/null
+# Current docs for the syntax of this file are at:
+# https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted.md
+
+name: $(BuildDefinitionName)_$(Date:yyyyMMdd)$(Rev:.rr)
+
+queue:
+ name: Hosted Linux Preview
+
+trigger:
+ branches:
+ include:
+ - master
+ - 3.7
+ - 3.6
+ paths:
+ exclude:
+ - Doc/*
+ - Tools/*
+
+#variables:
+
+steps:
+- checkout: self
+ clean: true
+ fetchDepth: 5
+
+#- template: linux-deps.yml
+
+# See https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted-templates.md
+# For now, we copy/paste the steps
+- script: echo "deb-src http://archive.ubuntu.com/ubuntu/ xenial main" > /etc/apt/sources.list.d/python.list && sudo apt-get update
+ displayName: 'Update apt-get lists'
+
+- script: >
+ sudo apt-get -yq install
+ build-essential
+ zlib1g-dev
+ libbz2-dev
+ liblzma-dev
+ libncurses5-dev
+ libreadline6-dev
+ libsqlite3-dev
+ libssl-dev
+ libgdbm-dev
+ tk-dev
+ lzma
+ lzma-dev
+ liblzma-dev
+ libffi-dev
+ uuid-dev
+ displayName: 'Install dependencies'
+
+
+- script: ./configure --with-pydebug
+ displayName: 'Configure CPython (debug)'
+
+- script: make -s -j4
+ displayName: 'Build CPython'
+
+- script: ./python -m venv venv && ./venv/bin/python -m pip install -U coverage
+ displayName: 'Set up virtual environment'
+
+- script: ./venv/bin/python -m test.pythoninfo
+ displayName: 'Display build info'
+
+- script: ./venv/bin/python -m coverage run --pylib -m test -uall,-cpu -x test_multiprocessing_fork -x test_multiprocessing_forkserver -x test_multiprocessing_spawn -x test_concurrent_futures
+ displayName: 'Tests with coverage'
+
+- script: source ./venv/bin/activate && bash <(curl -s https://codecov.io/bash)
+ displayName: 'Publish code coverage results'
--- /dev/null
+# Note: this file is not currently used, but when template support comes to VSTS it
+# will be referenced from the other scripts..
+
+# Current docs for the syntax of this file are at:
+# https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted.md
+
+#parameters:
+
+steps:
+- script: echo "deb-src http://archive.ubuntu.com/ubuntu/ xenial main" > /etc/apt/sources.list.d/python.list && sudo apt-get update
+ displayName: 'Update apt-get lists'
+
+- script: >
+ sudo apt-get -yq install
+ build-essential
+ zlib1g-dev
+ libbz2-dev
+ liblzma-dev
+ libncurses5-dev
+ libreadline6-dev
+ libsqlite3-dev
+ libssl-dev
+ libgdbm-dev
+ tk-dev
+ lzma
+ lzma-dev
+ liblzma-dev
+ libffi-dev
+ uuid-dev
+ displayName: 'Install dependencies'
--- /dev/null
+# Current docs for the syntax of this file are at:
+# https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted.md
+
+name: $(BuildDefinitionName)_$(Date:yyyyMMdd)$(Rev:.rr)
+
+queue:
+ name: Hosted Linux Preview
+
+trigger:
+ branches:
+ include:
+ - master
+ - 3.7
+ - 3.6
+ paths:
+ exclude:
+ - Doc/*
+ - Tools/*
+
+#variables:
+
+steps:
+- checkout: self
+ clean: true
+ fetchDepth: 5
+
+#- template: linux-deps.yml
+
+# See https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted-templates.md
+# For now, we copy/paste the steps
+- script: echo "deb-src http://archive.ubuntu.com/ubuntu/ xenial main" > /etc/apt/sources.list.d/python.list && sudo apt-get update
+ displayName: 'Update apt-get lists'
+
+- script: >
+ sudo apt-get -yq install
+ build-essential
+ zlib1g-dev
+ libbz2-dev
+ liblzma-dev
+ libncurses5-dev
+ libreadline6-dev
+ libsqlite3-dev
+ libssl-dev
+ libgdbm-dev
+ tk-dev
+ lzma
+ lzma-dev
+ liblzma-dev
+ libffi-dev
+ uuid-dev
+ displayName: 'Install dependencies'
+
+
+- script: ./configure --with-pydebug
+ displayName: 'Configure CPython (debug)'
+
+- script: make -s -j4
+ displayName: 'Build CPython'
+
+- script: make pythoninfo
+ displayName: 'Display build info'
+
+# Run patchcheck and fail if anything is discovered
+- script: ./python Tools/scripts/patchcheck.py --travis true
+ displayName: 'Run patchcheck.py'
+
+- script: make buildbottest TESTOPTS="-j4 -uall,-cpu"
+ displayName: 'Tests'
--- /dev/null
+# Current docs for the syntax of this file are at:
+# https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted.md
+
+name: $(BuildDefinitionName)_$(Date:yyyyMMdd)$(Rev:.rr)
+
+queue:
+ name: Hosted macOS Preview
+
+trigger:
+ branches:
+ include:
+ - master
+ - 3.7
+ - 3.6
+ paths:
+ exclude:
+ - Doc/*
+ - Tools/*
+
+#variables:
+
+steps:
+- checkout: self
+ clean: true
+ fetchDepth: 5
+
+- script: ./configure --with-pydebug --with-openssl=/usr/local/opt/openssl
+ displayName: 'Configure CPython (debug)'
+
+- script: make -s -j4
+ displayName: 'Build CPython'
+
+- script: make pythoninfo
+ displayName: 'Display build info'
+
+- script: make buildbottest TESTOPTS="-j4 -uall,-cpu"
+ displayName: 'Tests'
--- /dev/null
+# Current docs for the syntax of this file are at:
+# https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted.md
+
+name: $(BuildDefinitionName)_$(Date:yyyyMMdd)$(Rev:.rr)
+
+queue:
+ name: Hosted macOS Preview
+
+trigger:
+ branches:
+ include:
+ - master
+ - 3.7
+ - 3.6
+ paths:
+ exclude:
+ - Doc/*
+ - Tools/*
+
+#variables:
+
+steps:
+- checkout: self
+ clean: true
+ fetchDepth: 5
+
+- script: ./configure --with-pydebug --with-openssl=/usr/local/opt/openssl
+ displayName: 'Configure CPython (debug)'
+
+- script: make -s -j4
+ displayName: 'Build CPython'
+
+- script: make pythoninfo
+ displayName: 'Display build info'
+
+- script: make buildbottest TESTOPTS="-j4 -uall,-cpu"
+ displayName: 'Tests'
--- /dev/null
+# Current docs for the syntax of this file are at:
+# https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted.md
+
+name: $(BuildDefinitionName)_$(Date:yyyyMMdd)$(Rev:.rr)
+
+queue:
+ name: Hosted VS2017
+ parallel: 2
+ matrix:
+ amd64:
+ buildOpt: -p x64
+ outDirSuffix: amd64
+ win32:
+ buildOpt:
+ outDirSuffix: win32
+
+trigger:
+ branches:
+ include:
+ - master
+ - 3.7
+ - 3.6
+ paths:
+ exclude:
+ - Doc/*
+ - Tools/*
+
+variables:
+ # Relocate build outputs outside of source directory to make cleaning faster
+ Py_IntDir: $(Build.BinariesDirectory)\obj
+ # UNDONE: Do not build to a different directory because of broken tests
+ Py_OutDir: $(Build.SourcesDirectory)\PCbuild
+ EXTERNAL_DIR: $(Build.BinariesDirectory)\externals
+
+steps:
+- checkout: self
+ clean: true
+ fetchDepth: 5
+
+- script: PCbuild\build.bat -e $(buildOpt)
+ displayName: 'Build CPython'
+
+- script: python.bat -m test.pythoninfo
+ displayName: 'Display build info'
+
+- script: PCbuild\rt.bat -q -uall -u-cpu -rwW --slowest --timeout=1200 -j0
+ displayName: 'Tests'
+ env:
+ PREFIX: $(Py_OutDir)\$(outDirSuffix)
--- /dev/null
+# Current docs for the syntax of this file are at:
+# https://github.com/Microsoft/vsts-agent/blob/master/docs/preview/yamlgettingstarted.md
+
+name: $(BuildDefinitionName)_$(Date:yyyyMMdd)$(Rev:.rr)
+
+queue:
+ name: Hosted VS2017
+ parallel: 2
+ matrix:
+ amd64:
+ buildOpt: -p x64
+ outDirSuffix: amd64
+ win32:
+ buildOpt:
+ outDirSuffix: win32
+
+trigger:
+ branches:
+ include:
+ - master
+ - 3.7
+ - 3.6
+ paths:
+ exclude:
+ - Doc/*
+ - Tools/*
+
+variables:
+ # Relocate build outputs outside of source directory to make cleaning faster
+ Py_IntDir: $(Build.BinariesDirectory)\obj
+ # UNDONE: Do not build to a different directory because of broken tests
+ Py_OutDir: $(Build.SourcesDirectory)\PCbuild
+ EXTERNAL_DIR: $(Build.BinariesDirectory)\externals
+
+steps:
+- checkout: self
+ clean: true
+ fetchDepth: 5
+
+- script: PCbuild\build.bat -e $(buildOpt)
+ displayName: 'Build CPython'
+
+- script: python.bat -m test.pythoninfo
+ displayName: 'Display build info'
+
+- script: PCbuild\rt.bat -q -uall -u-cpu -rwW --slowest --timeout=1200 -j0
+ displayName: 'Tests'
+ env:
+ PREFIX: $(Py_OutDir)\$(outDirSuffix)
Previously, :exc:`TypeError` was raised when embedded null code points
were encountered in the Python string.
+ .. deprecated-removed:: 3.3 4.0
+ Part of the old-style :c:type:`Py_UNICODE` API; please migrate to using
+ :c:func:`PyUnicode_AsWideCharString`.
+
``u#`` (:class:`str`) [Py_UNICODE \*, int]
This variant on ``u`` stores into two C variables, the first one a pointer to a
Unicode data buffer, the second one its length. This variant allows
null code points.
+ .. deprecated-removed:: 3.3 4.0
+ Part of the old-style :c:type:`Py_UNICODE` API; please migrate to using
+ :c:func:`PyUnicode_AsWideCharString`.
+
``Z`` (:class:`str` or ``None``) [Py_UNICODE \*]
Like ``u``, but the Python object may also be ``None``, in which case the
:c:type:`Py_UNICODE` pointer is set to *NULL*.
+ .. deprecated-removed:: 3.3 4.0
+ Part of the old-style :c:type:`Py_UNICODE` API; please migrate to using
+ :c:func:`PyUnicode_AsWideCharString`.
+
``Z#`` (:class:`str` or ``None``) [Py_UNICODE \*, int]
Like ``u#``, but the Python object may also be ``None``, in which case the
:c:type:`Py_UNICODE` pointer is set to *NULL*.
+ .. deprecated-removed:: 3.3 4.0
+ Part of the old-style :c:type:`Py_UNICODE` API; please migrate to using
+ :c:func:`PyUnicode_AsWideCharString`.
+
``U`` (:class:`str`) [PyObject \*]
Requires that the Python object is a Unicode object, without attempting
any conversion. Raises :exc:`TypeError` if the object is not a Unicode
``z#`` (:class:`str` or ``None``) [char \*, int]
Same as ``s#``.
- ``u`` (:class:`str`) [Py_UNICODE \*]
- Convert a null-terminated buffer of Unicode (UCS-2 or UCS-4) data to a Python
- Unicode object. If the Unicode buffer pointer is *NULL*, ``None`` is returned.
+ ``u`` (:class:`str`) [wchar_t \*]
+ Convert a null-terminated :c:type:`wchar_t` buffer of Unicode (UTF-16 or UCS-4)
+ data to a Python Unicode object. If the Unicode buffer pointer is *NULL*,
+ ``None`` is returned.
- ``u#`` (:class:`str`) [Py_UNICODE \*, int]
- Convert a Unicode (UCS-2 or UCS-4) data buffer and its length to a Python
+ ``u#`` (:class:`str`) [wchar_t \*, int]
+ Convert a Unicode (UTF-16 or UCS-4) data buffer and its length to a Python
Unicode object. If the Unicode buffer pointer is *NULL*, the length is ignored
and ``None`` is returned.
(*order* is ``'A'``). Return ``0`` otherwise.
+.. c:function:: int PyBuffer_ToContiguous(void *buf, Py_buffer *src, Py_ssize_t len, char order)
+
+ Copy *len* bytes from *src* to its contiguous representation in *buf*.
+ *order* can be ``'C'`` or ``'F'`` (for C-style or Fortran-style ordering).
+ ``0`` is returned on success, ``-1`` on error.
+
+ This function fails if *len* != *src->len*.
+
+
.. c:function:: void PyBuffer_FillContiguousStrides(int ndims, Py_ssize_t *shape, Py_ssize_t *strides, int itemsize, char order)
Fill the *strides* array with byte-strides of a :term:`contiguous` (C-style if
If this function is used as part of a :ref:`getbufferproc <buffer-structs>`,
*exporter* MUST be set to the exporting object and *flags* must be passed
unmodified. Otherwise, *exporter* MUST be NULL.
-
-
-
Refer to :ref:`using-capsules` for more information on using these objects.
+.. versionadded:: 3.1
+
.. c:type:: PyCapsule
regular import mechanism can be used to access C APIs defined in dynamically
loaded modules.
+
.. c:type:: PyCapsule_Destructor
The type of a destructor callback for a capsule. Defined as::
import the module conventionally (using :c:func:`PyImport_ImportModule`).
Return the capsule's internal *pointer* on success. On failure, set an
- exception and return *NULL*. However, if :c:func:`PyCapsule_Import` failed to
- import the module, and *no_block* was true, no exception is set.
+ exception and return *NULL*.
+
.. c:function:: int PyCapsule_IsValid(PyObject *capsule, const char *name)
Return a nonzero value if the object is valid and matches the name passed in.
Return ``0`` otherwise. This function will not fail.
+
.. c:function:: int PyCapsule_SetContext(PyObject *capsule, void *context)
Set the context pointer inside *capsule* to *context*.
Return ``0`` on success. Return nonzero and set an exception on failure.
+
.. c:function:: int PyCapsule_SetDestructor(PyObject *capsule, PyCapsule_Destructor destructor)
Set the destructor inside *capsule* to *destructor*.
Return ``0`` on success. Return nonzero and set an exception on failure.
+
.. c:function:: int PyCapsule_SetName(PyObject *capsule, const char *name)
Set the name inside *capsule* to *name*. If non-*NULL*, the name must
Return ``0`` on success. Return nonzero and set an exception on failure.
+
.. c:function:: int PyCapsule_SetPointer(PyObject *capsule, void *pointer)
Set the void pointer inside *capsule* to *pointer*. The pointer may not be
.. c:function:: TYPE* PyObject_GC_Resize(TYPE, PyVarObject *op, Py_ssize_t newsize)
Resize an object allocated by :c:func:`PyObject_NewVar`. Returns the
- resized object or *NULL* on failure.
+ resized object or *NULL* on failure. *op* must not be tracked by the collector yet.
.. c:function:: void PyObject_GC_Track(PyObject *op)
Mapping Protocol
================
+See also :c:func:`PyObject_GetItem`, :c:func:`PyObject_SetItem` and
+:c:func:`PyObject_DelItem`.
+
.. c:function:: int PyMapping_Check(PyObject *o)
- Return ``1`` if the object provides mapping protocol, and ``0`` otherwise. This
- function always succeeds.
+ Return ``1`` if the object provides mapping protocol or supports slicing,
+ and ``0`` otherwise. Note that it returns ``1`` for Python classes with
+ a :meth:`__getitem__` method since in general case it is impossible to
+ determine what the type of keys it supports. This function always
+ succeeds.
.. c:function:: Py_ssize_t PyMapping_Size(PyObject *o)
.. index:: builtin: len
- Returns the number of keys in object *o* on success, and ``-1`` on failure. For
- objects that do not provide mapping protocol, this is equivalent to the Python
- expression ``len(o)``.
+ Returns the number of keys in object *o* on success, and ``-1`` on failure.
+ This is equivalent to the Python expression ``len(o)``.
-.. c:function:: int PyMapping_DelItemString(PyObject *o, const char *key)
+.. c:function:: PyObject* PyMapping_GetItemString(PyObject *o, const char *key)
+
+ Return element of *o* corresponding to the string *key* or *NULL* on failure.
+ This is the equivalent of the Python expression ``o[key]``.
+ See also :c:func:`PyObject_GetItem`.
+
- Remove the mapping for object *key* from the object *o*. Return ``-1`` on
- failure. This is equivalent to the Python statement ``del o[key]``.
+.. c:function:: int PyMapping_SetItemString(PyObject *o, const char *key, PyObject *v)
+
+ Map the string *key* to the value *v* in object *o*. Returns ``-1`` on
+ failure. This is the equivalent of the Python statement ``o[key] = v``.
+ See also :c:func:`PyObject_SetItem`.
.. c:function:: int PyMapping_DelItem(PyObject *o, PyObject *key)
- Remove the mapping for object *key* from the object *o*. Return ``-1`` on
- failure. This is equivalent to the Python statement ``del o[key]``.
+ Remove the mapping for the object *key* from the object *o*. Return ``-1``
+ on failure. This is equivalent to the Python statement ``del o[key]``.
+ This is an alias of :c:func:`PyObject_DelItem`.
-.. c:function:: int PyMapping_HasKeyString(PyObject *o, const char *key)
+.. c:function:: int PyMapping_DelItemString(PyObject *o, const char *key)
- On success, return ``1`` if the mapping object has the key *key* and ``0``
- otherwise. This is equivalent to the Python expression ``key in o``.
- This function always succeeds.
+ Remove the mapping for the string *key* from the object *o*. Return ``-1``
+ on failure. This is equivalent to the Python statement ``del o[key]``.
.. c:function:: int PyMapping_HasKey(PyObject *o, PyObject *key)
- Return ``1`` if the mapping object has the key *key* and ``0`` otherwise. This
- is equivalent to the Python expression ``key in o``. This function always
- succeeds.
+ Return ``1`` if the mapping object has the key *key* and ``0`` otherwise.
+ This is equivalent to the Python expression ``key in o``.
+ This function always succeeds.
+
+
+.. c:function:: int PyMapping_HasKeyString(PyObject *o, const char *key)
+
+ Return ``1`` if the mapping object has the key *key* and ``0`` otherwise.
+ This is equivalent to the Python expression ``key in o``.
+ This function always succeeds.
.. c:function:: PyObject* PyMapping_Keys(PyObject *o)
On success, return a list or tuple of the items in object *o*, where each item
is a tuple containing a key-value pair. On failure, return *NULL*.
-
-
-.. c:function:: PyObject* PyMapping_GetItemString(PyObject *o, const char *key)
-
- Return element of *o* corresponding to the object *key* or *NULL* on failure.
- This is the equivalent of the Python expression ``o[key]``.
-
-
-.. c:function:: int PyMapping_SetItemString(PyObject *o, const char *key, PyObject *v)
-
- Map the object *key* to the value *v* in object *o*. Returns ``-1`` on failure.
- This is the equivalent of the Python statement ``o[key] = v``.
.. c:member:: traverseproc m_traverse
A traversal function to call during GC traversal of the module object, or
- *NULL* if not needed.
+ *NULL* if not needed. This function may be called before module state
+ is allocated (:c:func:`PyModule_GetState()` may return `NULL`),
+ and before the :c:member:`Py_mod_exec` function is executed.
.. c:member:: inquiry m_clear
A clear function to call during GC clearing of the module object, or
- *NULL* if not needed.
+ *NULL* if not needed. This function may be called before module state
+ is allocated (:c:func:`PyModule_GetState()` may return `NULL`),
+ and before the :c:member:`Py_mod_exec` function is executed.
.. c:member:: freefunc m_free
A function to call during deallocation of the module object, or *NULL* if
- not needed.
+ not needed. This function may be called before module state
+ is allocated (:c:func:`PyModule_GetState()` may return `NULL`),
+ and before the :c:member:`Py_mod_exec` function is executed.
Single-phase initialization
...........................
parameters must be non-*NULL*.
-.. c:function:: Py_ssize_t PyObject_Length(PyObject *o)
- Py_ssize_t PyObject_Size(PyObject *o)
+.. c:function:: Py_ssize_t PyObject_Size(PyObject *o)
+ Py_ssize_t PyObject_Length(PyObject *o)
.. index:: builtin: len
.. c:function:: int PyObject_DelItem(PyObject *o, PyObject *key)
- Delete the mapping for *key* from *o*. Returns ``-1`` on failure. This is the
- equivalent of the Python statement ``del o[key]``.
+ Remove the mapping for the object *key* from the object *o*. Return ``-1``
+ on failure. This is equivalent to the Python statement ``del o[key]``.
.. c:function:: PyObject* PyObject_Dir(PyObject *o)
.. c:function:: int PySequence_Check(PyObject *o)
Return ``1`` if the object provides sequence protocol, and ``0`` otherwise.
- This function always succeeds.
+ Note that it returns ``1`` for Python classes with a :meth:`__getitem__`
+ method unless they are :class:`dict` subclasses since in general case it
+ is impossible to determine what the type of keys it supports. This
+ function always succeeds.
.. c:function:: Py_ssize_t PySequence_Size(PyObject *o)
.. index:: builtin: tuple
- Return a tuple object with the same contents as the arbitrary sequence *o* or
- *NULL* on failure. If *o* is a tuple, a new reference will be returned,
+ Return a tuple object with the same contents as the sequence or iterable *o*,
+ or *NULL* on failure. If *o* is a tuple, a new reference will be returned,
otherwise a tuple will be constructed with the appropriate contents. This is
equivalent to the Python expression ``tuple(o)``.
.. c:function:: PyObject* PySequence_Fast(PyObject *o, const char *m)
- Return the sequence *o* as a list, unless it is already a tuple or list, in
+ Return the sequence or iterable *o* as a list, unless it is already a tuple or list, in
which case *o* is returned. Use :c:func:`PySequence_Fast_GET_ITEM` to access
the members of the result. Returns *NULL* on failure. If the object is not
- a sequence, raises :exc:`TypeError` with *m* as the message text.
+ a sequence or iterable, raises :exc:`TypeError` with *m* as the message text.
+
+
+.. c:function:: Py_ssize_t PySequence_Fast_GET_SIZE(PyObject *o)
+
+ Returns the length of *o*, assuming that *o* was returned by
+ :c:func:`PySequence_Fast` and that *o* is not *NULL*. The size can also be
+ gotten by calling :c:func:`PySequence_Size` on *o*, but
+ :c:func:`PySequence_Fast_GET_SIZE` is faster because it can assume *o* is a list
+ or tuple.
.. c:function:: PyObject* PySequence_Fast_GET_ITEM(PyObject *o, Py_ssize_t i)
:c:func:`PySequence_GetItem` but without checking that
:c:func:`PySequence_Check` on *o* is true and without adjustment for negative
indices.
-
-
-.. c:function:: Py_ssize_t PySequence_Fast_GET_SIZE(PyObject *o)
-
- Returns the length of *o*, assuming that *o* was returned by
- :c:func:`PySequence_Fast` and that *o* is not *NULL*. The size can also be
- gotten by calling :c:func:`PySequence_Size` on *o*, but
- :c:func:`PySequence_Fast_GET_SIZE` is faster because it can assume *o* is a list
- or tuple.
.. c:member:: lenfunc PyMappingMethods.mp_length
- This function is used by :c:func:`PyMapping_Length` and
+ This function is used by :c:func:`PyMapping_Size` and
:c:func:`PyObject_Size`, and has the same signature. This slot may be set to
*NULL* if the object has no defined length.
.. c:member:: binaryfunc PyMappingMethods.mp_subscript
- This function is used by :c:func:`PyObject_GetItem` and has the same
- signature. This slot must be filled for the :c:func:`PyMapping_Check`
- function to return ``1``, it can be *NULL* otherwise.
+ This function is used by :c:func:`PyObject_GetItem` and
+ :c:func:`PySequence_GetSlice`, and has the same signature as
+ :c:func:`!PyObject_GetItem`. This slot must be filled for the
+ :c:func:`PyMapping_Check` function to return ``1``, it can be *NULL*
+ otherwise.
.. c:member:: objobjargproc PyMappingMethods.mp_ass_subscript
- This function is used by :c:func:`PyObject_SetItem` and
- :c:func:`PyObject_DelItem`. It has the same signature as
- :c:func:`PyObject_SetItem`, but *v* can also be set to *NULL* to delete
+ This function is used by :c:func:`PyObject_SetItem`,
+ :c:func:`PyObject_DelItem`, :c:func:`PyObject_SetSlice` and
+ :c:func:`PyObject_DelSlice`. It has the same signature as
+ :c:func:`!PyObject_SetItem`, but *v* can also be set to *NULL* to delete
an item. If this slot is *NULL*, the object does not support item
assignment and deletion.
.. c:member:: lenfunc PySequenceMethods.sq_length
- This function is used by :c:func:`PySequence_Size` and :c:func:`PyObject_Size`,
- and has the same signature.
+ This function is used by :c:func:`PySequence_Size` and
+ :c:func:`PyObject_Size`, and has the same signature. It is also used for
+ handling negative indices via the :c:member:`~PySequenceMethods.sq_item`
+ and the :c:member:`~PySequenceMethods.sq_ass_item` slots.
.. c:member:: binaryfunc PySequenceMethods.sq_concat
This function is used by :c:func:`PySequence_Concat` and has the same
signature. It is also used by the ``+`` operator, after trying the numeric
- addition via the :c:member:`~PyTypeObject.tp_as_number.nb_add` slot.
+ addition via the :c:member:`~PyNumberMethods.nb_add` slot.
.. c:member:: ssizeargfunc PySequenceMethods.sq_repeat
This function is used by :c:func:`PySequence_Repeat` and has the same
signature. It is also used by the ``*`` operator, after trying numeric
- multiplication via the :c:member:`~PyTypeObject.tp_as_number.nb_multiply`
- slot.
+ multiplication via the :c:member:`~PyNumberMethods.nb_multiply` slot.
.. c:member:: ssizeargfunc PySequenceMethods.sq_item
This function is used by :c:func:`PySequence_GetItem` and has the same
- signature. This slot must be filled for the :c:func:`PySequence_Check`
+ signature. It is also used by :c:func:`PyObject_GetItem`, after trying
+ the subscription via the :c:member:`~PyMappingMethods.mp_subscript` slot.
+ This slot must be filled for the :c:func:`PySequence_Check`
function to return ``1``, it can be *NULL* otherwise.
Negative indexes are handled as follows: if the :attr:`sq_length` slot is
.. c:member:: ssizeobjargproc PySequenceMethods.sq_ass_item
This function is used by :c:func:`PySequence_SetItem` and has the same
- signature. This slot may be left to *NULL* if the object does not support
+ signature. It is also used by :c:func:`PyObject_SetItem` and
+ :c:func:`PyObject_DelItem`, after trying the item assignment and deletion
+ via the :c:member:`~PyMappingMethods.mp_ass_subscript` slot.
+ This slot may be left to *NULL* if the object does not support
item assignment and deletion.
.. c:member:: objobjproc PySequenceMethods.sq_contains
This function may be used by :c:func:`PySequence_Contains` and has the same
signature. This slot may be left to *NULL*, in this case
- :c:func:`PySequence_Contains` simply traverses the sequence until it finds a
- match.
+ :c:func:`!PySequence_Contains` simply traverses the sequence until it
+ finds a match.
.. c:member:: binaryfunc PySequenceMethods.sq_inplace_concat
This function is used by :c:func:`PySequence_InPlaceConcat` and has the same
- signature. It should modify its first operand, and return it.
+ signature. It should modify its first operand, and return it. This slot
+ may be left to *NULL*, in this case :c:func:`!PySequence_InPlaceConcat`
+ will fall back to :c:func:`PySequence_Concat`. It is also used by the
+ augmented assignment ``+=``, after trying numeric inplace addition
+ via the :c:member:`~PyNumberMethods.nb_inplace_add` slot.
.. c:member:: ssizeargfunc PySequenceMethods.sq_inplace_repeat
This function is used by :c:func:`PySequence_InPlaceRepeat` and has the same
- signature. It should modify its first operand, and return it.
-
-.. XXX need to explain precedence between mapping and sequence
-.. XXX explains when to implement the sq_inplace_* slots
+ signature. It should modify its first operand, and return it. This slot
+ may be left to *NULL*, in this case :c:func:`!PySequence_InPlaceRepeat`
+ will fall back to :c:func:`PySequence_Repeat`. It is also used by the
+ augmented assignment ``*=``, after trying numeric inplace multiplication
+ via the :c:member:`~PyNumberMethods.nb_inplace_multiply` slot.
.. _buffer-structs:
Key terms
=========
-* the `Python Packaging Index <https://pypi.python.org/pypi>`__ is a public
+* the `Python Packaging Index <https://pypi.org>`__ is a public
repository of open source licensed packages made available for use by
other Python users
* the `Python Packaging Authority
| | be built | :class:`distutils.core.Extension` |
+--------------------+--------------------------------+-------------------------------------------------------------+
| *classifiers* | A list of categories for the | a list of strings; valid classifiers are listed on `PyPI |
- | | package | <https://pypi.python.org/pypi?:action=list_classifiers>`_. |
+ | | package | <https://pypi.org/classifiers>`_. |
+--------------------+--------------------------------+-------------------------------------------------------------+
| *distclass* | the :class:`Distribution` | a subclass of |
| | class to use | :class:`distutils.core.Distribution` |
* installers can override anything in :file:`setup.cfg` using the command-line
options to :file:`setup.py`
-The basic syntax of the configuration file is simple::
+The basic syntax of the configuration file is simple:
+
+.. code-block:: ini
[command]
option=value
continuation lines.
You can find out the list of options supported by a particular command with the
-universal :option:`!--help` option, e.g. ::
+universal :option:`!--help` option, e.g.
+
+.. code-block:: shell-session
- > python setup.py --help build_ext
+ $ python setup.py --help build_ext
[...]
Options for 'build_ext' command:
--build-lib (-b) directory for compiled extension modules
have an extension :mod:`pkg.ext`, and you want the compiled extension file
(:file:`ext.so` on Unix, say) to be put in the same source directory as your
pure Python modules :mod:`pkg.mod1` and :mod:`pkg.mod2`. You can always use the
-:option:`!--inplace` option on the command-line to ensure this::
+:option:`!--inplace` option on the command-line to ensure this:
+
+.. code-block:: sh
python setup.py build_ext --inplace
But this requires that you always specify the :command:`build_ext` command
explicitly, and remember to provide :option:`!--inplace`. An easier way is to
"set and forget" this option, by encoding it in :file:`setup.cfg`, the
-configuration file for this distribution::
+configuration file for this distribution:
+
+.. code-block:: ini
[build_ext]
inplace=1
the Distutils (such as the list of files installed). But some of it has to be
supplied as options to :command:`bdist_rpm`, which would be very tedious to do
on the command-line for every run. Hence, here is a snippet from the Distutils'
-own :file:`setup.cfg`::
+own :file:`setup.cfg`:
+
+.. code-block:: ini
[bdist_rpm]
release = 1
module distribution
a collection of Python modules distributed together as a single downloadable
resource and meant to be installed *en masse*. Examples of some well-known
- module distributions are NumPy, SciPy, PIL (the Python Imaging
- Library), or mxBase. (This would be called a *package*, except that term is
+ module distributions are NumPy, SciPy, Pillow,
+ or mxBase. (This would be called a *package*, except that term is
already taken in the Python context: a single module distribution may contain
zero, one, or many Python packages.)
existence of a :file:`.pypirc` file at the location :file:`$HOME/.pypirc`.
If this file exists, the command uses the username, password, and repository
URL configured in the file. The format of a :file:`.pypirc` file is as
-follows::
+follows:
+
+.. code-block:: ini
[distutils]
index-servers =
will be prompt to type it when needed.
If you want to define another server a new section can be created and
-listed in the *index-servers* variable::
+listed in the *index-servers* variable:
+
+.. code-block:: ini
[distutils]
index-servers =
successfully.
-.. _Python Package Index (PyPI): https://pypi.python.org/pypi
+.. _Python Package Index (PyPI): https://pypi.org
(4)
These fields should not be used if your package is to be compatible with Python
versions prior to 2.2.3 or 2.3. The list is available from the `PyPI website
- <https://pypi.python.org/pypi>`_.
+ <https://pypi.org/>`_.
(5)
The ``long_description`` field is used by PyPI when you are
programmatically extract the configuration values that you will want to
combine together. For example:
-.. code-block:: python
+.. code-block:: pycon
>>> import sysconfig
>>> sysconfig.get_config_var('LIBS')
Python fans...) and let's say we want to create a Python interface to the C
library function :c:func:`system` [#]_. This function takes a null-terminated
character string as argument and returns an integer. We want this function to
-be callable from Python as follows::
+be callable from Python as follows:
+
+.. code-block:: pycon
>>> import spam
>>> status = spam.system("ls -l")
and rebuild the interpreter. Luckily, this is very simple on Unix: just place
your file (:file:`spammodule.c` for example) in the :file:`Modules/` directory
of an unpacked source distribution, add a line to the file
-:file:`Modules/Setup.local` describing your file::
+:file:`Modules/Setup.local` describing your file:
+
+.. code-block:: sh
spam spammodule.o
:file:`Setup` file.)
If your module requires additional libraries to link with, these can be listed
-on the line in the configuration file as well, for instance::
+on the line in the configuration file as well, for instance:
+
+.. code-block:: sh
spam spammodule.o -lX11
=============================
This guide only covers the basic tools for creating extensions provided
-as part of this version of CPython. Third party tools like Cython,
-``cffi``, SWIG and Numba offer both simpler and more sophisticated
-approaches to creating C and C++ extensions for Python.
+as part of this version of CPython. Third party tools like
+`Cython <http://cython.org/>`_, `cffi <https://cffi.readthedocs.io>`_,
+`SWIG <http://www.swig.org>`_ and `Numba <https://numba.pydata.org/>`_
+offer both simpler and more sophisticated approaches to creating C and C++
+extensions for Python.
.. seealso::
:numbered:
extending.rst
+ newtypes_tutorial.rst
newtypes.rst
building.rst
windows.rst
.. highlightlang:: c
-
-.. _defining-new-types:
-
-******************
-Defining New Types
-******************
-
-.. sectionauthor:: Michael Hudson <mwh@python.net>
-.. sectionauthor:: Dave Kuhlman <dkuhlman@rexx.com>
-.. sectionauthor:: Jim Fulton <jim@zope.com>
-
-
-As mentioned in the last chapter, Python allows the writer of an extension
-module to define new types that can be manipulated from Python code, much like
-strings and lists in core Python.
-
-This is not hard; the code for all extension types follows a pattern, but there
-are some details that you need to understand before you can get started.
-
-
-.. _dnt-basics:
-
-The Basics
-==========
-
-The Python runtime sees all Python objects as variables of type
-:c:type:`PyObject\*`, which serves as a "base type" for all Python objects.
-:c:type:`PyObject` itself only contains the refcount and a pointer to the
-object's "type object". This is where the action is; the type object determines
-which (C) functions get called when, for instance, an attribute gets looked
-up on an object or it is multiplied by another object. These C functions
-are called "type methods".
-
-So, if you want to define a new object type, you need to create a new type
-object.
-
-This sort of thing can only be explained by example, so here's a minimal, but
-complete, module that defines a new type:
-
-.. literalinclude:: ../includes/noddy.c
-
-
-Now that's quite a bit to take in at once, but hopefully bits will seem familiar
-from the last chapter.
-
-The first bit that will be new is::
-
- typedef struct {
- PyObject_HEAD
- } noddy_NoddyObject;
-
-This is what a Noddy object will contain---in this case, nothing more than what
-every Python object contains---a field called ``ob_base`` of type
-:c:type:`PyObject`. :c:type:`PyObject` in turn, contains an ``ob_refcnt``
-field and a pointer to a type object. These can be accessed using the macros
-:c:macro:`Py_REFCNT` and :c:macro:`Py_TYPE` respectively. These are the fields
-the :c:macro:`PyObject_HEAD` macro brings in. The reason for the macro is to
-standardize the layout and to enable special debugging fields in debug builds.
-
-Note that there is no semicolon after the :c:macro:`PyObject_HEAD` macro;
-one is included in the macro definition. Be wary of adding one by
-accident; it's easy to do from habit, and your compiler might not complain,
-but someone else's probably will! (On Windows, MSVC is known to call this an
-error and refuse to compile the code.)
-
-For contrast, let's take a look at the corresponding definition for standard
-Python floats::
-
- typedef struct {
- PyObject_HEAD
- double ob_fval;
- } PyFloatObject;
-
-Moving on, we come to the crunch --- the type object. ::
-
- static PyTypeObject noddy_NoddyType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "noddy.Noddy", /* tp_name */
- sizeof(noddy_NoddyObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_as_async */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- "Noddy objects", /* tp_doc */
- };
-
-Now if you go and look up the definition of :c:type:`PyTypeObject` in
-:file:`object.h` you'll see that it has many more fields that the definition
-above. The remaining fields will be filled with zeros by the C compiler, and
-it's common practice to not specify them explicitly unless you need them.
-
-This is so important that we're going to pick the top of it apart still
-further::
-
- PyVarObject_HEAD_INIT(NULL, 0)
-
-This line is a bit of a wart; what we'd like to write is::
-
- PyVarObject_HEAD_INIT(&PyType_Type, 0)
-
-as the type of a type object is "type", but this isn't strictly conforming C and
-some compilers complain. Fortunately, this member will be filled in for us by
-:c:func:`PyType_Ready`. ::
-
- "noddy.Noddy", /* tp_name */
-
-The name of our type. This will appear in the default textual representation of
-our objects and in some error messages, for example::
-
- >>> "" + noddy.new_noddy()
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- TypeError: cannot add type "noddy.Noddy" to string
-
-Note that the name is a dotted name that includes both the module name and the
-name of the type within the module. The module in this case is :mod:`noddy` and
-the type is :class:`Noddy`, so we set the type name to :class:`noddy.Noddy`.
-One side effect of using an undotted name is that the pydoc documentation tool
-will not list the new type in the module documentation. ::
-
- sizeof(noddy_NoddyObject), /* tp_basicsize */
-
-This is so that Python knows how much memory to allocate when you call
-:c:func:`PyObject_New`.
-
-.. note::
-
- If you want your type to be subclassable from Python, and your type has the same
- :c:member:`~PyTypeObject.tp_basicsize` as its base type, you may have problems with multiple
- inheritance. A Python subclass of your type will have to list your type first
- in its :attr:`~class.__bases__`, or else it will not be able to call your type's
- :meth:`__new__` method without getting an error. You can avoid this problem by
- ensuring that your type has a larger value for :c:member:`~PyTypeObject.tp_basicsize` than its
- base type does. Most of the time, this will be true anyway, because either your
- base type will be :class:`object`, or else you will be adding data members to
- your base type, and therefore increasing its size.
-
-::
-
- 0, /* tp_itemsize */
-
-This has to do with variable length objects like lists and strings. Ignore this
-for now.
-
-Skipping a number of type methods that we don't provide, we set the class flags
-to :const:`Py_TPFLAGS_DEFAULT`. ::
-
- Py_TPFLAGS_DEFAULT, /* tp_flags */
-
-All types should include this constant in their flags. It enables all of the
-members defined until at least Python 3.3. If you need further members,
-you will need to OR the corresponding flags.
-
-We provide a doc string for the type in :c:member:`~PyTypeObject.tp_doc`. ::
-
- "Noddy objects", /* tp_doc */
-
-Now we get into the type methods, the things that make your objects different
-from the others. We aren't going to implement any of these in this version of
-the module. We'll expand this example later to have more interesting behavior.
-
-For now, all we want to be able to do is to create new :class:`Noddy` objects.
-To enable object creation, we have to provide a :c:member:`~PyTypeObject.tp_new` implementation.
-In this case, we can just use the default implementation provided by the API
-function :c:func:`PyType_GenericNew`. We'd like to just assign this to the
-:c:member:`~PyTypeObject.tp_new` slot, but we can't, for portability sake, On some platforms or
-compilers, we can't statically initialize a structure member with a function
-defined in another C module, so, instead, we'll assign the :c:member:`~PyTypeObject.tp_new` slot
-in the module initialization function just before calling
-:c:func:`PyType_Ready`::
-
- noddy_NoddyType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&noddy_NoddyType) < 0)
- return;
-
-All the other type methods are *NULL*, so we'll go over them later --- that's
-for a later section!
-
-Everything else in the file should be familiar, except for some code in
-:c:func:`PyInit_noddy`::
-
- if (PyType_Ready(&noddy_NoddyType) < 0)
- return;
-
-This initializes the :class:`Noddy` type, filing in a number of members,
-including :attr:`ob_type` that we initially set to *NULL*. ::
-
- PyModule_AddObject(m, "Noddy", (PyObject *)&noddy_NoddyType);
-
-This adds the type to the module dictionary. This allows us to create
-:class:`Noddy` instances by calling the :class:`Noddy` class::
-
- >>> import noddy
- >>> mynoddy = noddy.Noddy()
-
-That's it! All that remains is to build it; put the above code in a file called
-:file:`noddy.c` and ::
-
- from distutils.core import setup, Extension
- setup(name="noddy", version="1.0",
- ext_modules=[Extension("noddy", ["noddy.c"])])
-
-in a file called :file:`setup.py`; then typing
-
-.. code-block:: shell-session
-
- $ python setup.py build
-
-at a shell should produce a file :file:`noddy.so` in a subdirectory; move to
-that directory and fire up Python --- you should be able to ``import noddy`` and
-play around with Noddy objects.
-
-That wasn't so hard, was it?
-
-Of course, the current Noddy type is pretty uninteresting. It has no data and
-doesn't do anything. It can't even be subclassed.
-
-
-Adding data and methods to the Basic example
---------------------------------------------
-
-Let's extend the basic example to add some data and methods. Let's also make
-the type usable as a base class. We'll create a new module, :mod:`noddy2` that
-adds these capabilities:
-
-.. literalinclude:: ../includes/noddy2.c
-
-
-This version of the module has a number of changes.
-
-We've added an extra include::
-
- #include <structmember.h>
-
-This include provides declarations that we use to handle attributes, as
-described a bit later.
-
-The name of the :class:`Noddy` object structure has been shortened to
-:class:`Noddy`. The type object name has been shortened to :class:`NoddyType`.
-
-The :class:`Noddy` type now has three data attributes, *first*, *last*, and
-*number*. The *first* and *last* variables are Python strings containing first
-and last names. The *number* attribute is an integer.
-
-The object structure is updated accordingly::
-
- typedef struct {
- PyObject_HEAD
- PyObject *first;
- PyObject *last;
- int number;
- } Noddy;
-
-Because we now have data to manage, we have to be more careful about object
-allocation and deallocation. At a minimum, we need a deallocation method::
-
- static void
- Noddy_dealloc(Noddy* self)
- {
- Py_XDECREF(self->first);
- Py_XDECREF(self->last);
- Py_TYPE(self)->tp_free((PyObject*)self);
- }
-
-which is assigned to the :c:member:`~PyTypeObject.tp_dealloc` member::
-
- (destructor)Noddy_dealloc, /*tp_dealloc*/
-
-This method decrements the reference counts of the two Python attributes. We use
-:c:func:`Py_XDECREF` here because the :attr:`first` and :attr:`last` members
-could be *NULL*. It then calls the :c:member:`~PyTypeObject.tp_free` member of the object's type
-to free the object's memory. Note that the object's type might not be
-:class:`NoddyType`, because the object may be an instance of a subclass.
-
-We want to make sure that the first and last names are initialized to empty
-strings, so we provide a new method::
-
- static PyObject *
- Noddy_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
- {
- Noddy *self;
-
- self = (Noddy *)type->tp_alloc(type, 0);
- if (self != NULL) {
- self->first = PyUnicode_FromString("");
- if (self->first == NULL) {
- Py_DECREF(self);
- return NULL;
- }
-
- self->last = PyUnicode_FromString("");
- if (self->last == NULL) {
- Py_DECREF(self);
- return NULL;
- }
-
- self->number = 0;
- }
-
- return (PyObject *)self;
- }
-
-and install it in the :c:member:`~PyTypeObject.tp_new` member::
-
- Noddy_new, /* tp_new */
-
-The new member is responsible for creating (as opposed to initializing) objects
-of the type. It is exposed in Python as the :meth:`__new__` method. See the
-paper titled "Unifying types and classes in Python" for a detailed discussion of
-the :meth:`__new__` method. One reason to implement a new method is to assure
-the initial values of instance variables. In this case, we use the new method
-to make sure that the initial values of the members :attr:`first` and
-:attr:`last` are not *NULL*. If we didn't care whether the initial values were
-*NULL*, we could have used :c:func:`PyType_GenericNew` as our new method, as we
-did before. :c:func:`PyType_GenericNew` initializes all of the instance variable
-members to *NULL*.
-
-The new method is a static method that is passed the type being instantiated and
-any arguments passed when the type was called, and that returns the new object
-created. New methods always accept positional and keyword arguments, but they
-often ignore the arguments, leaving the argument handling to initializer
-methods. Note that if the type supports subclassing, the type passed may not be
-the type being defined. The new method calls the :c:member:`~PyTypeObject.tp_alloc` slot to
-allocate memory. We don't fill the :c:member:`~PyTypeObject.tp_alloc` slot ourselves. Rather
-:c:func:`PyType_Ready` fills it for us by inheriting it from our base class,
-which is :class:`object` by default. Most types use the default allocation.
-
-.. note::
-
- If you are creating a co-operative :c:member:`~PyTypeObject.tp_new` (one that calls a base type's
- :c:member:`~PyTypeObject.tp_new` or :meth:`__new__`), you must *not* try to determine what method
- to call using method resolution order at runtime. Always statically determine
- what type you are going to call, and call its :c:member:`~PyTypeObject.tp_new` directly, or via
- ``type->tp_base->tp_new``. If you do not do this, Python subclasses of your
- type that also inherit from other Python-defined classes may not work correctly.
- (Specifically, you may not be able to create instances of such subclasses
- without getting a :exc:`TypeError`.)
-
-We provide an initialization function::
-
- static int
- Noddy_init(Noddy *self, PyObject *args, PyObject *kwds)
- {
- PyObject *first=NULL, *last=NULL, *tmp;
-
- static char *kwlist[] = {"first", "last", "number", NULL};
-
- if (! PyArg_ParseTupleAndKeywords(args, kwds, "|OOi", kwlist,
- &first, &last,
- &self->number))
- return -1;
-
- if (first) {
- tmp = self->first;
- Py_INCREF(first);
- self->first = first;
- Py_XDECREF(tmp);
- }
-
- if (last) {
- tmp = self->last;
- Py_INCREF(last);
- self->last = last;
- Py_XDECREF(tmp);
- }
-
- return 0;
- }
-
-by filling the :c:member:`~PyTypeObject.tp_init` slot. ::
-
- (initproc)Noddy_init, /* tp_init */
-
-The :c:member:`~PyTypeObject.tp_init` slot is exposed in Python as the :meth:`__init__` method. It
-is used to initialize an object after it's created. Unlike the new method, we
-can't guarantee that the initializer is called. The initializer isn't called
-when unpickling objects and it can be overridden. Our initializer accepts
-arguments to provide initial values for our instance. Initializers always accept
-positional and keyword arguments. Initializers should return either ``0`` on
-success or ``-1`` on error.
-
-Initializers can be called multiple times. Anyone can call the :meth:`__init__`
-method on our objects. For this reason, we have to be extra careful when
-assigning the new values. We might be tempted, for example to assign the
-:attr:`first` member like this::
-
- if (first) {
- Py_XDECREF(self->first);
- Py_INCREF(first);
- self->first = first;
- }
-
-But this would be risky. Our type doesn't restrict the type of the
-:attr:`first` member, so it could be any kind of object. It could have a
-destructor that causes code to be executed that tries to access the
-:attr:`first` member. To be paranoid and protect ourselves against this
-possibility, we almost always reassign members before decrementing their
-reference counts. When don't we have to do this?
-
-* when we absolutely know that the reference count is greater than 1
-
-* when we know that deallocation of the object [#]_ will not cause any calls
- back into our type's code
-
-* when decrementing a reference count in a :c:member:`~PyTypeObject.tp_dealloc` handler when
- garbage-collections is not supported [#]_
-
-We want to expose our instance variables as attributes. There are a
-number of ways to do that. The simplest way is to define member definitions::
-
- static PyMemberDef Noddy_members[] = {
- {"first", T_OBJECT_EX, offsetof(Noddy, first), 0,
- "first name"},
- {"last", T_OBJECT_EX, offsetof(Noddy, last), 0,
- "last name"},
- {"number", T_INT, offsetof(Noddy, number), 0,
- "noddy number"},
- {NULL} /* Sentinel */
- };
-
-and put the definitions in the :c:member:`~PyTypeObject.tp_members` slot::
-
- Noddy_members, /* tp_members */
-
-Each member definition has a member name, type, offset, access flags and
-documentation string. See the :ref:`Generic-Attribute-Management` section below for
-details.
-
-A disadvantage of this approach is that it doesn't provide a way to restrict the
-types of objects that can be assigned to the Python attributes. We expect the
-first and last names to be strings, but any Python objects can be assigned.
-Further, the attributes can be deleted, setting the C pointers to *NULL*. Even
-though we can make sure the members are initialized to non-*NULL* values, the
-members can be set to *NULL* if the attributes are deleted.
-
-We define a single method, :meth:`name`, that outputs the objects name as the
-concatenation of the first and last names. ::
-
- static PyObject *
- Noddy_name(Noddy* self)
- {
- if (self->first == NULL) {
- PyErr_SetString(PyExc_AttributeError, "first");
- return NULL;
- }
-
- if (self->last == NULL) {
- PyErr_SetString(PyExc_AttributeError, "last");
- return NULL;
- }
-
- return PyUnicode_FromFormat("%S %S", self->first, self->last);
- }
-
-The method is implemented as a C function that takes a :class:`Noddy` (or
-:class:`Noddy` subclass) instance as the first argument. Methods always take an
-instance as the first argument. Methods often take positional and keyword
-arguments as well, but in this case we don't take any and don't need to accept
-a positional argument tuple or keyword argument dictionary. This method is
-equivalent to the Python method::
-
- def name(self):
- return "%s %s" % (self.first, self.last)
-
-Note that we have to check for the possibility that our :attr:`first` and
-:attr:`last` members are *NULL*. This is because they can be deleted, in which
-case they are set to *NULL*. It would be better to prevent deletion of these
-attributes and to restrict the attribute values to be strings. We'll see how to
-do that in the next section.
-
-Now that we've defined the method, we need to create an array of method
-definitions::
-
- static PyMethodDef Noddy_methods[] = {
- {"name", (PyCFunction)Noddy_name, METH_NOARGS,
- "Return the name, combining the first and last name"
- },
- {NULL} /* Sentinel */
- };
-
-and assign them to the :c:member:`~PyTypeObject.tp_methods` slot::
-
- Noddy_methods, /* tp_methods */
-
-Note that we used the :const:`METH_NOARGS` flag to indicate that the method is
-passed no arguments.
-
-Finally, we'll make our type usable as a base class. We've written our methods
-carefully so far so that they don't make any assumptions about the type of the
-object being created or used, so all we need to do is to add the
-:const:`Py_TPFLAGS_BASETYPE` to our class flag definition::
-
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
-
-We rename :c:func:`PyInit_noddy` to :c:func:`PyInit_noddy2` and update the module
-name in the :c:type:`PyModuleDef` struct.
-
-Finally, we update our :file:`setup.py` file to build the new module::
-
- from distutils.core import setup, Extension
- setup(name="noddy", version="1.0",
- ext_modules=[
- Extension("noddy", ["noddy.c"]),
- Extension("noddy2", ["noddy2.c"]),
- ])
-
-
-Providing finer control over data attributes
---------------------------------------------
-
-In this section, we'll provide finer control over how the :attr:`first` and
-:attr:`last` attributes are set in the :class:`Noddy` example. In the previous
-version of our module, the instance variables :attr:`first` and :attr:`last`
-could be set to non-string values or even deleted. We want to make sure that
-these attributes always contain strings.
-
-.. literalinclude:: ../includes/noddy3.c
-
-
-To provide greater control, over the :attr:`first` and :attr:`last` attributes,
-we'll use custom getter and setter functions. Here are the functions for
-getting and setting the :attr:`first` attribute::
-
- Noddy_getfirst(Noddy *self, void *closure)
- {
- Py_INCREF(self->first);
- return self->first;
- }
-
- static int
- Noddy_setfirst(Noddy *self, PyObject *value, void *closure)
- {
- if (value == NULL) {
- PyErr_SetString(PyExc_TypeError, "Cannot delete the first attribute");
- return -1;
- }
-
- if (! PyUnicode_Check(value)) {
- PyErr_SetString(PyExc_TypeError,
- "The first attribute value must be a str");
- return -1;
- }
-
- Py_DECREF(self->first);
- Py_INCREF(value);
- self->first = value;
-
- return 0;
- }
-
-The getter function is passed a :class:`Noddy` object and a "closure", which is
-void pointer. In this case, the closure is ignored. (The closure supports an
-advanced usage in which definition data is passed to the getter and setter. This
-could, for example, be used to allow a single set of getter and setter functions
-that decide the attribute to get or set based on data in the closure.)
-
-The setter function is passed the :class:`Noddy` object, the new value, and the
-closure. The new value may be *NULL*, in which case the attribute is being
-deleted. In our setter, we raise an error if the attribute is deleted or if the
-attribute value is not a string.
-
-We create an array of :c:type:`PyGetSetDef` structures::
-
- static PyGetSetDef Noddy_getseters[] = {
- {"first",
- (getter)Noddy_getfirst, (setter)Noddy_setfirst,
- "first name",
- NULL},
- {"last",
- (getter)Noddy_getlast, (setter)Noddy_setlast,
- "last name",
- NULL},
- {NULL} /* Sentinel */
- };
-
-and register it in the :c:member:`~PyTypeObject.tp_getset` slot::
-
- Noddy_getseters, /* tp_getset */
-
-to register our attribute getters and setters.
-
-The last item in a :c:type:`PyGetSetDef` structure is the closure mentioned
-above. In this case, we aren't using the closure, so we just pass *NULL*.
-
-We also remove the member definitions for these attributes::
-
- static PyMemberDef Noddy_members[] = {
- {"number", T_INT, offsetof(Noddy, number), 0,
- "noddy number"},
- {NULL} /* Sentinel */
- };
-
-We also need to update the :c:member:`~PyTypeObject.tp_init` handler to only allow strings [#]_ to
-be passed::
-
- static int
- Noddy_init(Noddy *self, PyObject *args, PyObject *kwds)
- {
- PyObject *first=NULL, *last=NULL, *tmp;
-
- static char *kwlist[] = {"first", "last", "number", NULL};
-
- if (! PyArg_ParseTupleAndKeywords(args, kwds, "|SSi", kwlist,
- &first, &last,
- &self->number))
- return -1;
-
- if (first) {
- tmp = self->first;
- Py_INCREF(first);
- self->first = first;
- Py_DECREF(tmp);
- }
-
- if (last) {
- tmp = self->last;
- Py_INCREF(last);
- self->last = last;
- Py_DECREF(tmp);
- }
-
- return 0;
- }
-
-With these changes, we can assure that the :attr:`first` and :attr:`last`
-members are never *NULL* so we can remove checks for *NULL* values in almost all
-cases. This means that most of the :c:func:`Py_XDECREF` calls can be converted to
-:c:func:`Py_DECREF` calls. The only place we can't change these calls is in the
-deallocator, where there is the possibility that the initialization of these
-members failed in the constructor.
-
-We also rename the module initialization function and module name in the
-initialization function, as we did before, and we add an extra definition to the
-:file:`setup.py` file.
-
-
-Supporting cyclic garbage collection
-------------------------------------
-
-Python has a cyclic-garbage collector that can identify unneeded objects even
-when their reference counts are not zero. This can happen when objects are
-involved in cycles. For example, consider::
-
- >>> l = []
- >>> l.append(l)
- >>> del l
-
-In this example, we create a list that contains itself. When we delete it, it
-still has a reference from itself. Its reference count doesn't drop to zero.
-Fortunately, Python's cyclic-garbage collector will eventually figure out that
-the list is garbage and free it.
-
-In the second version of the :class:`Noddy` example, we allowed any kind of
-object to be stored in the :attr:`first` or :attr:`last` attributes [#]_. This
-means that :class:`Noddy` objects can participate in cycles::
-
- >>> import noddy2
- >>> n = noddy2.Noddy()
- >>> l = [n]
- >>> n.first = l
-
-This is pretty silly, but it gives us an excuse to add support for the
-cyclic-garbage collector to the :class:`Noddy` example. To support cyclic
-garbage collection, types need to fill two slots and set a class flag that
-enables these slots:
-
-.. literalinclude:: ../includes/noddy4.c
-
-
-The traversal method provides access to subobjects that could participate in
-cycles::
-
- static int
- Noddy_traverse(Noddy *self, visitproc visit, void *arg)
- {
- int vret;
-
- if (self->first) {
- vret = visit(self->first, arg);
- if (vret != 0)
- return vret;
- }
- if (self->last) {
- vret = visit(self->last, arg);
- if (vret != 0)
- return vret;
- }
-
- return 0;
- }
-
-For each subobject that can participate in cycles, we need to call the
-:c:func:`visit` function, which is passed to the traversal method. The
-:c:func:`visit` function takes as arguments the subobject and the extra argument
-*arg* passed to the traversal method. It returns an integer value that must be
-returned if it is non-zero.
-
-Python provides a :c:func:`Py_VISIT` macro that automates calling visit
-functions. With :c:func:`Py_VISIT`, :c:func:`Noddy_traverse` can be simplified::
-
- static int
- Noddy_traverse(Noddy *self, visitproc visit, void *arg)
- {
- Py_VISIT(self->first);
- Py_VISIT(self->last);
- return 0;
- }
-
-.. note::
-
- Note that the :c:member:`~PyTypeObject.tp_traverse` implementation must name its arguments exactly
- *visit* and *arg* in order to use :c:func:`Py_VISIT`. This is to encourage
- uniformity across these boring implementations.
-
-We also need to provide a method for clearing any subobjects that can
-participate in cycles.
-
-::
-
- static int
- Noddy_clear(Noddy *self)
- {
- PyObject *tmp;
-
- tmp = self->first;
- self->first = NULL;
- Py_XDECREF(tmp);
-
- tmp = self->last;
- self->last = NULL;
- Py_XDECREF(tmp);
-
- return 0;
- }
-
-Notice the use of a temporary variable in :c:func:`Noddy_clear`. We use the
-temporary variable so that we can set each member to *NULL* before decrementing
-its reference count. We do this because, as was discussed earlier, if the
-reference count drops to zero, we might cause code to run that calls back into
-the object. In addition, because we now support garbage collection, we also
-have to worry about code being run that triggers garbage collection. If garbage
-collection is run, our :c:member:`~PyTypeObject.tp_traverse` handler could get called. We can't
-take a chance of having :c:func:`Noddy_traverse` called when a member's reference
-count has dropped to zero and its value hasn't been set to *NULL*.
-
-Python provides a :c:func:`Py_CLEAR` that automates the careful decrementing of
-reference counts. With :c:func:`Py_CLEAR`, the :c:func:`Noddy_clear` function can
-be simplified::
-
- static int
- Noddy_clear(Noddy *self)
- {
- Py_CLEAR(self->first);
- Py_CLEAR(self->last);
- return 0;
- }
-
-Note that :c:func:`Noddy_dealloc` may call arbitrary functions through
-``__del__`` method or weakref callback. It means circular GC can be
-triggered inside the function. Since GC assumes reference count is not zero,
-we need to untrack the object from GC by calling :c:func:`PyObject_GC_UnTrack`
-before clearing members. Here is reimplemented deallocator which uses
-:c:func:`PyObject_GC_UnTrack` and :c:func:`Noddy_clear`.
-
-::
-
- static void
- Noddy_dealloc(Noddy* self)
- {
- PyObject_GC_UnTrack(self);
- Noddy_clear(self);
- Py_TYPE(self)->tp_free((PyObject*)self);
- }
-
-Finally, we add the :const:`Py_TPFLAGS_HAVE_GC` flag to the class flags::
-
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */
-
-That's pretty much it. If we had written custom :c:member:`~PyTypeObject.tp_alloc` or
-:c:member:`~PyTypeObject.tp_free` slots, we'd need to modify them for cyclic-garbage collection.
-Most extensions will use the versions automatically provided.
-
-
-Subclassing other types
------------------------
-
-It is possible to create new extension types that are derived from existing
-types. It is easiest to inherit from the built in types, since an extension can
-easily use the :class:`PyTypeObject` it needs. It can be difficult to share
-these :class:`PyTypeObject` structures between extension modules.
-
-In this example we will create a :class:`Shoddy` type that inherits from the
-built-in :class:`list` type. The new type will be completely compatible with
-regular lists, but will have an additional :meth:`increment` method that
-increases an internal counter. ::
-
- >>> import shoddy
- >>> s = shoddy.Shoddy(range(3))
- >>> s.extend(s)
- >>> print(len(s))
- 6
- >>> print(s.increment())
- 1
- >>> print(s.increment())
- 2
-
-.. literalinclude:: ../includes/shoddy.c
-
-
-As you can see, the source code closely resembles the :class:`Noddy` examples in
-previous sections. We will break down the main differences between them. ::
-
- typedef struct {
- PyListObject list;
- int state;
- } Shoddy;
-
-The primary difference for derived type objects is that the base type's object
-structure must be the first value. The base type will already include the
-:c:func:`PyObject_HEAD` at the beginning of its structure.
-
-When a Python object is a :class:`Shoddy` instance, its *PyObject\** pointer can
-be safely cast to both *PyListObject\** and *Shoddy\**. ::
-
- static int
- Shoddy_init(Shoddy *self, PyObject *args, PyObject *kwds)
- {
- if (PyList_Type.tp_init((PyObject *)self, args, kwds) < 0)
- return -1;
- self->state = 0;
- return 0;
- }
-
-In the :attr:`__init__` method for our type, we can see how to call through to
-the :attr:`__init__` method of the base type.
-
-This pattern is important when writing a type with custom :attr:`new` and
-:attr:`dealloc` methods. The :attr:`new` method should not actually create the
-memory for the object with :c:member:`~PyTypeObject.tp_alloc`, that will be handled by the base
-class when calling its :c:member:`~PyTypeObject.tp_new`.
-
-When filling out the :c:func:`PyTypeObject` for the :class:`Shoddy` type, you see
-a slot for :c:func:`tp_base`. Due to cross platform compiler issues, you can't
-fill that field directly with the :c:func:`PyList_Type`; it can be done later in
-the module's :c:func:`init` function. ::
-
- PyMODINIT_FUNC
- PyInit_shoddy(void)
- {
- PyObject *m;
-
- ShoddyType.tp_base = &PyList_Type;
- if (PyType_Ready(&ShoddyType) < 0)
- return NULL;
-
- m = PyModule_Create(&shoddymodule);
- if (m == NULL)
- return NULL;
-
- Py_INCREF(&ShoddyType);
- PyModule_AddObject(m, "Shoddy", (PyObject *) &ShoddyType);
- return m;
- }
-
-Before calling :c:func:`PyType_Ready`, the type structure must have the
-:c:member:`~PyTypeObject.tp_base` slot filled in. When we are deriving a new type, it is not
-necessary to fill out the :c:member:`~PyTypeObject.tp_alloc` slot with :c:func:`PyType_GenericNew`
--- the allocate function from the base type will be inherited.
-
-After that, calling :c:func:`PyType_Ready` and adding the type object to the
-module is the same as with the basic :class:`Noddy` examples.
-
+*****************************************
+Defining Extension Types: Assorted Topics
+*****************************************
.. _dnt-type-methods:
-Type Methods
-============
-
This section aims to give a quick fly-by on the various type methods you can
implement and what they do.
.. literalinclude:: ../includes/typestruct.h
-Now that's a *lot* of methods. Don't worry too much though - if you have a type
-you want to define, the chances are very good that you will only implement a
-handful of these.
+Now that's a *lot* of methods. Don't worry too much though -- if you have
+a type you want to define, the chances are very good that you will only
+implement a handful of these.
As you probably expect by now, we're going to go over this and give more
information about the various handlers. We won't go in the order they are
defined in the structure, because there is a lot of historical baggage that
-impacts the ordering of the fields; be sure your type initialization keeps the
-fields in the right order! It's often easiest to find an example that includes
-all the fields you need (even if they're initialized to ``0``) and then change
-the values to suit your new type. ::
+impacts the ordering of the fields. It's often easiest to find an example
+that includes the fields you need and then change the values to suit your new
+type. ::
const char *tp_name; /* For printing */
-The name of the type - as mentioned in the last section, this will appear in
+The name of the type -- as mentioned in the previous chapter, this will appear in
various places, almost entirely for diagnostic purposes. Try to choose something
that will be helpful in such a situation! ::
These fields tell the runtime how much memory to allocate when new objects of
this type are created. Python has some built-in support for variable length
-structures (think: strings, lists) which is where the :c:member:`~PyTypeObject.tp_itemsize` field
+structures (think: strings, tuples) which is where the :c:member:`~PyTypeObject.tp_itemsize` field
comes in. This will be dealt with later. ::
const char *tp_doc;
Here you can put a string (or its address) that you want returned when the
Python script references ``obj.__doc__`` to retrieve the doc string.
-Now we come to the basic type methods---the ones most extension types will
+Now we come to the basic type methods -- the ones most extension types will
implement.
function::
static void
- newdatatype_dealloc(newdatatypeobject * obj)
+ newdatatype_dealloc(newdatatypeobject *obj)
{
free(obj->obj_UnderlyingDatatypePtr);
Py_TYPE(obj)->tp_free(obj);
static PyObject *
newdatatype_repr(newdatatypeobject * obj)
{
- return PyUnicode_FromFormat("Repr-ified_newdatatype{{size:\%d}}",
+ return PyUnicode_FromFormat("Repr-ified_newdatatype{{size:%d}}",
obj->obj_UnderlyingDatatypePtr->size);
}
static PyObject *
newdatatype_str(newdatatypeobject * obj)
{
- return PyUnicode_FromFormat("Stringified_newdatatype{{size:\%d}}",
+ return PyUnicode_FromFormat("Stringified_newdatatype{{size:%d}}",
obj->obj_UnderlyingDatatypePtr->size);
}
static int
newdatatype_setattr(newdatatypeobject *obj, char *name, PyObject *v)
{
- (void)PyErr_Format(PyExc_RuntimeError, "Read-only attribute: \%s", name);
+ PyErr_Format(PyExc_RuntimeError, "Read-only attribute: %s", name);
return -1;
}
hashfunc tp_hash;
This function, if you choose to provide it, should return a hash number for an
-instance of your data type. Here is a moderately pointless example::
+instance of your data type. Here is a simple example::
- static long
+ static Py_hash_t
newdatatype_hash(newdatatypeobject *obj)
{
- long result;
- result = obj->obj_UnderlyingDatatypePtr->size;
- result = result * 3;
+ Py_hash_t result;
+ result = obj->some_size + 32767 * obj->some_number;
+ if (result == -1)
+ result = -2;
return result;
}
+:c:type:`Py_hash_t` is a signed integer type with a platform-varying width.
+Returning ``-1`` from :c:member:`~PyTypeObject.tp_hash` indicates an error,
+which is why you should be careful to avoid returning it when hash computation
+is successful, as seen above.
+
::
ternaryfunc tp_call;
This function takes three arguments:
-#. *arg1* is the instance of the data type which is the subject of the call. If
- the call is ``obj1('hello')``, then *arg1* is ``obj1``.
+#. *self* is the instance of the data type which is the subject of the call.
+ If the call is ``obj1('hello')``, then *self* is ``obj1``.
-#. *arg2* is a tuple containing the arguments to the call. You can use
+#. *args* is a tuple containing the arguments to the call. You can use
:c:func:`PyArg_ParseTuple` to extract the arguments.
-#. *arg3* is a dictionary of keyword arguments that were passed. If this is
+#. *kwds* is a dictionary of keyword arguments that were passed. If this is
non-*NULL* and you support keyword arguments, use
- :c:func:`PyArg_ParseTupleAndKeywords` to extract the arguments. If you do not
- want to support keyword arguments and this is non-*NULL*, raise a
+ :c:func:`PyArg_ParseTupleAndKeywords` to extract the arguments. If you
+ do not want to support keyword arguments and this is non-*NULL*, raise a
:exc:`TypeError` with a message saying that keyword arguments are not supported.
-Here is a desultory example of the implementation of the call function. ::
+Here is a toy ``tp_call`` implementation::
- /* Implement the call function.
- * obj1 is the instance receiving the call.
- * obj2 is a tuple containing the arguments to the call, in this
- * case 3 strings.
- */
static PyObject *
- newdatatype_call(newdatatypeobject *obj, PyObject *args, PyObject *other)
+ newdatatype_call(newdatatypeobject *self, PyObject *args, PyObject *kwds)
{
PyObject *result;
char *arg1;
return NULL;
}
result = PyUnicode_FromFormat(
- "Returning -- value: [\%d] arg1: [\%s] arg2: [\%s] arg3: [\%s]\n",
+ "Returning -- value: [%d] arg1: [%s] arg2: [%s] arg3: [%s]\n",
obj->obj_UnderlyingDatatypePtr->size,
arg1, arg2, arg3);
return result;
getiterfunc tp_iter;
iternextfunc tp_iternext;
-These functions provide support for the iterator protocol. Any object which
-wishes to support iteration over its contents (which may be generated during
-iteration) must implement the ``tp_iter`` handler. Objects which are returned
-by a ``tp_iter`` handler must implement both the ``tp_iter`` and ``tp_iternext``
-handlers. Both handlers take exactly one parameter, the instance for which they
-are being called, and return a new reference. In the case of an error, they
-should set an exception and return *NULL*.
-
-For an object which represents an iterable collection, the ``tp_iter`` handler
-must return an iterator object. The iterator object is responsible for
-maintaining the state of the iteration. For collections which can support
-multiple iterators which do not interfere with each other (as lists and tuples
-do), a new iterator should be created and returned. Objects which can only be
-iterated over once (usually due to side effects of iteration) should implement
-this handler by returning a new reference to themselves, and should also
-implement the ``tp_iternext`` handler. File objects are an example of such an
-iterator.
-
-Iterator objects should implement both handlers. The ``tp_iter`` handler should
-return a new reference to the iterator (this is the same as the ``tp_iter``
-handler for objects which can only be iterated over destructively). The
-``tp_iternext`` handler should return a new reference to the next object in the
-iteration if there is one. If the iteration has reached the end, it may return
-*NULL* without setting an exception or it may set :exc:`StopIteration`; avoiding
-the exception can yield slightly better performance. If an actual error occurs,
-it should set an exception and return *NULL*.
+These functions provide support for the iterator protocol. Both handlers
+take exactly one parameter, the instance for which they are being called,
+and return a new reference. In the case of an error, they should set an
+exception and return *NULL*. :c:member:`~PyTypeObject.tp_iter` corresponds
+to the Python :meth:`__iter__` method, while :c:member:`~PyTypeObject.tp_iternext`
+corresponds to the Python :meth:`~iterator.__next__` method.
+
+Any :term:`iterable` object must implement the :c:member:`~PyTypeObject.tp_iter`
+handler, which must return an :term:`iterator` object. Here the same guidelines
+apply as for Python classes:
+
+* For collections (such as lists and tuples) which can support multiple
+ independent iterators, a new iterator should be created and returned by
+ each call to :c:member:`~PyTypeObject.tp_iter`.
+* Objects which can only be iterated over once (usually due to side effects of
+ iteration, such as file objects) can implement :c:member:`~PyTypeObject.tp_iter`
+ by returning a new reference to themselves -- and should also therefore
+ implement the :c:member:`~PyTypeObject.tp_iternext` handler.
+
+Any :term:`iterator` object should implement both :c:member:`~PyTypeObject.tp_iter`
+and :c:member:`~PyTypeObject.tp_iternext`. An iterator's
+:c:member:`~PyTypeObject.tp_iter` handler should return a new reference
+to the iterator. Its :c:member:`~PyTypeObject.tp_iternext` handler should
+return a new reference to the next object in the iteration, if there is one.
+If the iteration has reached the end, :c:member:`~PyTypeObject.tp_iternext`
+may return *NULL* without setting an exception, or it may set
+:exc:`StopIteration` *in addition* to returning *NULL*; avoiding
+the exception can yield slightly better performance. If an actual error
+occurs, :c:member:`~PyTypeObject.tp_iternext` should always set an exception
+and return *NULL*.
.. _weakref-support:
Weak Reference Support
----------------------
-One of the goals of Python's weak-reference implementation is to allow any type
+One of the goals of Python's weak reference implementation is to allow any type
to participate in the weak reference mechanism without incurring the overhead on
-those objects which do not benefit by weak referencing (such as numbers).
+performance-critical objects (such as numbers).
-For an object to be weakly referencable, the extension must include a
-:c:type:`PyObject\*` field in the instance structure for the use of the weak
-reference mechanism; it must be initialized to *NULL* by the object's
-constructor. It must also set the :c:member:`~PyTypeObject.tp_weaklistoffset` field of the
-corresponding type object to the offset of the field. For example, the instance
-type is defined with the following structure::
+.. seealso::
+ Documentation for the :mod:`weakref` module.
- typedef struct {
- PyObject_HEAD
- PyClassObject *in_class; /* The class object */
- PyObject *in_dict; /* A dictionary */
- PyObject *in_weakreflist; /* List of weak references */
- } PyInstanceObject;
-
-The statically-declared type object for instances is defined this way::
-
- PyTypeObject PyInstance_Type = {
- PyVarObject_HEAD_INIT(&PyType_Type, 0)
- 0,
- "module.instance",
-
- /* Lots of stuff omitted for brevity... */
-
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- offsetof(PyInstanceObject, in_weakreflist), /* tp_weaklistoffset */
- };
+For an object to be weakly referencable, the extension type must do two things:
-The type constructor is responsible for initializing the weak reference list to
-*NULL*::
+#. Include a :c:type:`PyObject\*` field in the C object structure dedicated to
+ the weak reference mechanism. The object's constructor should leave it
+ *NULL* (which is automatic when using the default
+ :c:member:`~PyTypeObject.tp_alloc`).
- static PyObject *
- instance_new() {
- /* Other initialization stuff omitted for brevity */
+#. Set the :c:member:`~PyTypeObject.tp_weaklistoffset` type member
+ to the offset of the aforementioned field in the C object structure,
+ so that the interpreter knows how to access and modify that field.
- self->in_weakreflist = NULL;
+Concretely, here is how a trivial object structure would be augmented
+with the required field::
- return (PyObject *) self;
- }
+ typedef struct {
+ PyObject_HEAD
+ PyObject *weakreflist; /* List of weak references */
+ } TrivialObject;
-The only further addition is that the destructor needs to call the weak
-reference manager to clear any weak references. This is only required if the
-weak reference list is non-*NULL*::
+And the corresponding member in the statically-declared type object::
- static void
- instance_dealloc(PyInstanceObject *inst)
- {
- /* Allocate temporaries if needed, but do not begin
- destruction just yet.
- */
+ static PyTypeObject TrivialType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ /* ... other members omitted for brevity ... */
+ .tp_weaklistoffset = offsetof(TrivialObject, weakreflist),
+ };
- if (inst->in_weakreflist != NULL)
- PyObject_ClearWeakRefs((PyObject *) inst);
+The only further addition is that ``tp_dealloc`` needs to clear any weak
+references (by calling :c:func:`PyObject_ClearWeakRefs`) if the field is
+non-*NULL*::
- /* Proceed with object destruction normally. */
+ static void
+ Trivial_dealloc(TrivialObject *self)
+ {
+ /* Clear weakrefs first before calling any destructors */
+ if (self->weakreflist != NULL)
+ PyObject_ClearWeakRefs((PyObject *) self);
+ /* ... remainder of destruction code omitted for brevity ... */
+ Py_TYPE(self)->tp_free((PyObject *) self);
}
More Suggestions
----------------
-Remember that you can omit most of these functions, in which case you provide
-``0`` as a value. There are type definitions for each of the functions you must
-provide. They are in :file:`object.h` in the Python include directory that
-comes with the source distribution of Python.
-
In order to learn how to implement any specific method for your new data type,
-do the following: Download and unpack the Python source distribution. Go to
-the :file:`Objects` directory, then search the C source files for ``tp_`` plus
-the function you want (for example, ``tp_richcompare``). You will find examples
-of the function you want to implement.
+get the :term:`CPython` source code. Go to the :file:`Objects` directory,
+then search the C source files for ``tp_`` plus the function you want
+(for example, ``tp_richcompare``). You will find examples of the function
+you want to implement.
-When you need to verify that an object is an instance of the type you are
-implementing, use the :c:func:`PyObject_TypeCheck` function. A sample of its use
-might be something like the following::
+When you need to verify that an object is a concrete instance of the type you
+are implementing, use the :c:func:`PyObject_TypeCheck` function. A sample of
+its use might be something like the following::
- if (! PyObject_TypeCheck(some_object, &MyType)) {
+ if (!PyObject_TypeCheck(some_object, &MyType)) {
PyErr_SetString(PyExc_TypeError, "arg #1 not a mything");
return NULL;
}
-.. rubric:: Footnotes
-
-.. [#] This is true when we know that the object is a basic type, like a string or a
- float.
-
-.. [#] We relied on this in the :c:member:`~PyTypeObject.tp_dealloc` handler in this example, because our
- type doesn't support garbage collection. Even if a type supports garbage
- collection, there are calls that can be made to "untrack" the object from
- garbage collection, however, these calls are advanced and not covered here.
-
-.. [#] We now know that the first and last members are strings, so perhaps we could be
- less careful about decrementing their reference counts, however, we accept
- instances of string subclasses. Even though deallocating normal strings won't
- call back into our objects, we can't guarantee that deallocating an instance of
- a string subclass won't call back into our objects.
+.. seealso::
+ Download CPython source releases.
+ https://www.python.org/downloads/source/
-.. [#] Even in the third version, we aren't guaranteed to avoid cycles. Instances of
- string subclasses are allowed and string subclasses could allow cycles even if
- normal strings don't.
+ The CPython project on GitHub, where the CPython source code is developed.
+ https://github.com/python/cpython
--- /dev/null
+.. highlightlang:: c
+
+.. _defining-new-types:
+
+**********************************
+Defining Extension Types: Tutorial
+**********************************
+
+.. sectionauthor:: Michael Hudson <mwh@python.net>
+.. sectionauthor:: Dave Kuhlman <dkuhlman@rexx.com>
+.. sectionauthor:: Jim Fulton <jim@zope.com>
+
+
+Python allows the writer of a C extension module to define new types that
+can be manipulated from Python code, much like the built-in :class:`str`
+and :class:`list` types. The code for all extension types follows a
+pattern, but there are some details that you need to understand before you
+can get started. This document is a gentle introduction to the topic.
+
+
+.. _dnt-basics:
+
+The Basics
+==========
+
+The :term:`CPython` runtime sees all Python objects as variables of type
+:c:type:`PyObject\*`, which serves as a "base type" for all Python objects.
+The :c:type:`PyObject` structure itself only contains the object's
+:term:`reference count` and a pointer to the object's "type object".
+This is where the action is; the type object determines which (C) functions
+get called by the interpreter when, for instance, an attribute gets looked up
+on an object, a method called, or it is multiplied by another object. These
+C functions are called "type methods".
+
+So, if you want to define a new extension type, you need to create a new type
+object.
+
+This sort of thing can only be explained by example, so here's a minimal, but
+complete, module that defines a new type named :class:`Custom` inside a C
+extension module :mod:`custom`:
+
+.. note::
+ What we're showing here is the traditional way of defining *static*
+ extension types. It should be adequate for most uses. The C API also
+ allows defining heap-allocated extension types using the
+ :c:func:`PyType_FromSpec` function, which isn't covered in this tutorial.
+
+.. literalinclude:: ../includes/custom.c
+
+Now that's quite a bit to take in at once, but hopefully bits will seem familiar
+from the previous chapter. This file defines three things:
+
+#. What a :class:`Custom` **object** contains: this is the ``CustomObject``
+ struct, which is allocated once for each :class:`Custom` instance.
+#. How the :class:`Custom` **type** behaves: this is the ``CustomType`` struct,
+ which defines a set of flags and function pointers that the interpreter
+ inspects when specific operations are requested.
+#. How to initialize the :mod:`custom` module: this is the ``PyInit_custom``
+ function and the associated ``custommodule`` struct.
+
+The first bit is::
+
+ typedef struct {
+ PyObject_HEAD
+ } CustomObject;
+
+This is what a Custom object will contain. ``PyObject_HEAD`` is mandatory
+at the start of each object struct and defines a field called ``ob_base``
+of type :c:type:`PyObject`, containing a pointer to a type object and a
+reference count (these can be accessed using the macros :c:macro:`Py_REFCNT`
+and :c:macro:`Py_TYPE` respectively). The reason for the macro is to
+abstract away the layout and to enable additional fields in debug builds.
+
+.. note::
+ There is no semicolon above after the :c:macro:`PyObject_HEAD` macro.
+ Be wary of adding one by accident: some compilers will complain.
+
+Of course, objects generally store additional data besides the standard
+``PyObject_HEAD`` boilerplate; for example, here is the definition for
+standard Python floats::
+
+ typedef struct {
+ PyObject_HEAD
+ double ob_fval;
+ } PyFloatObject;
+
+The second bit is the definition of the type object. ::
+
+ static PyTypeObject CustomType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "custom.Custom",
+ .tp_doc = "Custom objects",
+ .tp_basicsize = sizeof(CustomObject),
+ .tp_itemsize = 0,
+ .tp_new = PyType_GenericNew,
+ };
+
+.. note::
+ We recommend using C99-style designated initializers as above, to
+ avoid listing all the :c:type:`PyTypeObject` fields that you don't care
+ about and also to avoid caring about the fields' declaration order.
+
+The actual definition of :c:type:`PyTypeObject` in :file:`object.h` has
+many more :ref:`fields <type-structs>` than the definition above. The
+remaining fields will be filled with zeros by the C compiler, and it's
+common practice to not specify them explicitly unless you need them.
+
+We're going to pick it apart, one field at a time::
+
+ PyVarObject_HEAD_INIT(NULL, 0)
+
+This line is mandatory boilerplate to initialize the ``ob_base``
+field mentioned above. ::
+
+ .tp_name = "custom.Custom",
+
+The name of our type. This will appear in the default textual representation of
+our objects and in some error messages, for example:
+
+.. code-block:: pycon
+
+ >>> "" + custom.Custom()
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ TypeError: can only concatenate str (not "custom.Custom") to str
+
+Note that the name is a dotted name that includes both the module name and the
+name of the type within the module. The module in this case is :mod:`custom` and
+the type is :class:`Custom`, so we set the type name to :class:`custom.Custom`.
+Using the real dotted import path is important to make your type compatible
+with the :mod:`pydoc` and :mod:`pickle` modules. ::
+
+ .tp_basicsize = sizeof(CustomObject),
+ .tp_itemsize = 0,
+
+This is so that Python knows how much memory to allocate when creating
+new :class:`Custom` instances. :c:member:`~PyTypeObject.tp_itemsize` is
+only used for variable-sized objects and should otherwise be zero.
+
+.. note::
+
+ If you want your type to be subclassable from Python, and your type has the same
+ :c:member:`~PyTypeObject.tp_basicsize` as its base type, you may have problems with multiple
+ inheritance. A Python subclass of your type will have to list your type first
+ in its :attr:`~class.__bases__`, or else it will not be able to call your type's
+ :meth:`__new__` method without getting an error. You can avoid this problem by
+ ensuring that your type has a larger value for :c:member:`~PyTypeObject.tp_basicsize` than its
+ base type does. Most of the time, this will be true anyway, because either your
+ base type will be :class:`object`, or else you will be adding data members to
+ your base type, and therefore increasing its size.
+
+We set the class flags to :const:`Py_TPFLAGS_DEFAULT`. ::
+
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+
+All types should include this constant in their flags. It enables all of the
+members defined until at least Python 3.3. If you need further members,
+you will need to OR the corresponding flags.
+
+We provide a doc string for the type in :c:member:`~PyTypeObject.tp_doc`. ::
+
+ .tp_doc = "Custom objects",
+
+To enable object creation, we have to provide a :c:member:`~PyTypeObject.tp_new`
+handler. This is the equivalent of the Python method :meth:`__new__`, but
+has to be specified explicitly. In this case, we can just use the default
+implementation provided by the API function :c:func:`PyType_GenericNew`. ::
+
+ .tp_new = PyType_GenericNew,
+
+Everything else in the file should be familiar, except for some code in
+:c:func:`PyInit_custom`::
+
+ if (PyType_Ready(&CustomType) < 0)
+ return;
+
+This initializes the :class:`Custom` type, filling in a number of members
+to the appropriate default values, including :attr:`ob_type` that we initially
+set to *NULL*. ::
+
+ PyModule_AddObject(m, "Custom", (PyObject *) &CustomType);
+
+This adds the type to the module dictionary. This allows us to create
+:class:`Custom` instances by calling the :class:`Custom` class:
+
+.. code-block:: pycon
+
+ >>> import custom
+ >>> mycustom = custom.Custom()
+
+That's it! All that remains is to build it; put the above code in a file called
+:file:`custom.c` and:
+
+.. code-block:: python
+
+ from distutils.core import setup, Extension
+ setup(name="custom", version="1.0",
+ ext_modules=[Extension("custom", ["custom.c"])])
+
+in a file called :file:`setup.py`; then typing
+
+.. code-block:: shell-session
+
+ $ python setup.py build
+
+at a shell should produce a file :file:`custom.so` in a subdirectory; move to
+that directory and fire up Python --- you should be able to ``import custom`` and
+play around with Custom objects.
+
+That wasn't so hard, was it?
+
+Of course, the current Custom type is pretty uninteresting. It has no data and
+doesn't do anything. It can't even be subclassed.
+
+.. note::
+ While this documentation showcases the standard :mod:`distutils` module
+ for building C extensions, it is recommended in real-world use cases to
+ use the newer and better-maintained ``setuptools`` library. Documentation
+ on how to do this is out of scope for this document and can be found in
+ the `Python Packaging User's Guide <https://packaging.python.org/tutorials/distributing-packages/>`_.
+
+
+Adding data and methods to the Basic example
+============================================
+
+Let's extend the basic example to add some data and methods. Let's also make
+the type usable as a base class. We'll create a new module, :mod:`custom2` that
+adds these capabilities:
+
+.. literalinclude:: ../includes/custom2.c
+
+
+This version of the module has a number of changes.
+
+We've added an extra include::
+
+ #include <structmember.h>
+
+This include provides declarations that we use to handle attributes, as
+described a bit later.
+
+The :class:`Custom` type now has three data attributes in its C struct,
+*first*, *last*, and *number*. The *first* and *last* variables are Python
+strings containing first and last names. The *number* attribute is a C integer.
+
+The object structure is updated accordingly::
+
+ typedef struct {
+ PyObject_HEAD
+ PyObject *first; /* first name */
+ PyObject *last; /* last name */
+ int number;
+ } CustomObject;
+
+Because we now have data to manage, we have to be more careful about object
+allocation and deallocation. At a minimum, we need a deallocation method::
+
+ static void
+ Custom_dealloc(CustomObject *self)
+ {
+ Py_XDECREF(self->first);
+ Py_XDECREF(self->last);
+ Py_TYPE(self)->tp_free((PyObject *) self);
+ }
+
+which is assigned to the :c:member:`~PyTypeObject.tp_dealloc` member::
+
+ .tp_dealloc = (destructor) Custom_dealloc,
+
+This method first clears the reference counts of the two Python attributes.
+:c:func:`Py_XDECREF` correctly handles the case where its argument is
+*NULL* (which might happen here if ``tp_new`` failed midway). It then
+calls the :c:member:`~PyTypeObject.tp_free` member of the object's type
+(computed by ``Py_TYPE(self)``) to free the object's memory. Note that
+the object's type might not be :class:`CustomType`, because the object may
+be an instance of a subclass.
+
+.. note::
+ The explicit cast to ``destructor`` above is needed because we defined
+ ``Custom_dealloc`` to take a ``CustomObject *`` argument, but the ``tp_dealloc``
+ function pointer expects to receive a ``PyObject *`` argument. Otherwise,
+ the compiler will emit a warning. This is object-oriented polymorphism,
+ in C!
+
+We want to make sure that the first and last names are initialized to empty
+strings, so we provide a ``tp_new`` implementation::
+
+ static PyObject *
+ Custom_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+ {
+ CustomObject *self;
+ self = (CustomObject *) type->tp_alloc(type, 0);
+ if (self != NULL) {
+ self->first = PyUnicode_FromString("");
+ if (self->first == NULL) {
+ Py_DECREF(self);
+ return NULL;
+ }
+ self->last = PyUnicode_FromString("");
+ if (self->last == NULL) {
+ Py_DECREF(self);
+ return NULL;
+ }
+ self->number = 0;
+ }
+ return (PyObject *) self;
+ }
+
+and install it in the :c:member:`~PyTypeObject.tp_new` member::
+
+ .tp_new = Custom_new,
+
+The ``tp_new`` handler is responsible for creating (as opposed to initializing)
+objects of the type. It is exposed in Python as the :meth:`__new__` method.
+It is not required to define a ``tp_new`` member, and indeed many extension
+types will simply reuse :c:func:`PyType_GenericNew` as done in the first
+version of the ``Custom`` type above. In this case, we use the ``tp_new``
+handler to initialize the ``first`` and ``last`` attributes to non-*NULL*
+default values.
+
+``tp_new`` is passed the type being instantiated (not necessarily ``CustomType``,
+if a subclass is instantiated) and any arguments passed when the type was
+called, and is expected to return the instance created. ``tp_new`` handlers
+always accept positional and keyword arguments, but they often ignore the
+arguments, leaving the argument handling to initializer (a.k.a. ``tp_init``
+in C or ``__init__`` in Python) methods.
+
+.. note::
+ ``tp_new`` shouldn't call ``tp_init`` explicitly, as the interpreter
+ will do it itself.
+
+The ``tp_new`` implementation calls the :c:member:`~PyTypeObject.tp_alloc`
+slot to allocate memory::
+
+ self = (CustomObject *) type->tp_alloc(type, 0);
+
+Since memory allocation may fail, we must check the :c:member:`~PyTypeObject.tp_alloc`
+result against *NULL* before proceeding.
+
+.. note::
+ We didn't fill the :c:member:`~PyTypeObject.tp_alloc` slot ourselves. Rather
+ :c:func:`PyType_Ready` fills it for us by inheriting it from our base class,
+ which is :class:`object` by default. Most types use the default allocation
+ strategy.
+
+.. note::
+ If you are creating a co-operative :c:member:`~PyTypeObject.tp_new` (one
+ that calls a base type's :c:member:`~PyTypeObject.tp_new` or :meth:`__new__`),
+ you must *not* try to determine what method to call using method resolution
+ order at runtime. Always statically determine what type you are going to
+ call, and call its :c:member:`~PyTypeObject.tp_new` directly, or via
+ ``type->tp_base->tp_new``. If you do not do this, Python subclasses of your
+ type that also inherit from other Python-defined classes may not work correctly.
+ (Specifically, you may not be able to create instances of such subclasses
+ without getting a :exc:`TypeError`.)
+
+We also define an initialization function which accepts arguments to provide
+initial values for our instance::
+
+ static int
+ Custom_init(CustomObject *self, PyObject *args, PyObject *kwds)
+ {
+ static char *kwlist[] = {"first", "last", "number", NULL};
+ PyObject *first = NULL, *last = NULL, *tmp;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOi", kwlist,
+ &first, &last,
+ &self->number))
+ return -1;
+
+ if (first) {
+ tmp = self->first;
+ Py_INCREF(first);
+ self->first = first;
+ Py_XDECREF(tmp);
+ }
+ if (last) {
+ tmp = self->last;
+ Py_INCREF(last);
+ self->last = last;
+ Py_XDECREF(tmp);
+ }
+ return 0;
+ }
+
+by filling the :c:member:`~PyTypeObject.tp_init` slot. ::
+
+ .tp_init = (initproc) Custom_init,
+
+The :c:member:`~PyTypeObject.tp_init` slot is exposed in Python as the
+:meth:`__init__` method. It is used to initialize an object after it's
+created. Initializers always accept positional and keyword arguments,
+and they should return either ``0`` on success or ``-1`` on error.
+
+Unlike the ``tp_new`` handler, there is no guarantee that ``tp_init``
+is called at all (for example, the :mod:`pickle` module by default
+doesn't call :meth:`__init__` on unpickled instances). It can also be
+called multiple times. Anyone can call the :meth:`__init__` method on
+our objects. For this reason, we have to be extra careful when assigning
+the new attribute values. We might be tempted, for example to assign the
+``first`` member like this::
+
+ if (first) {
+ Py_XDECREF(self->first);
+ Py_INCREF(first);
+ self->first = first;
+ }
+
+But this would be risky. Our type doesn't restrict the type of the
+``first`` member, so it could be any kind of object. It could have a
+destructor that causes code to be executed that tries to access the
+``first`` member; or that destructor could release the
+:term:`Global interpreter Lock` and let arbitrary code run in other
+threads that accesses and modifies our object.
+
+To be paranoid and protect ourselves against this possibility, we almost
+always reassign members before decrementing their reference counts. When
+don't we have to do this?
+
+* when we absolutely know that the reference count is greater than 1;
+
+* when we know that deallocation of the object [#]_ will neither release
+ the :term:`GIL` nor cause any calls back into our type's code;
+
+* when decrementing a reference count in a :c:member:`~PyTypeObject.tp_dealloc`
+ handler on a type which doesn't support cyclic garbage collection [#]_.
+
+We want to expose our instance variables as attributes. There are a
+number of ways to do that. The simplest way is to define member definitions::
+
+ static PyMemberDef Custom_members[] = {
+ {"first", T_OBJECT_EX, offsetof(CustomObject, first), 0,
+ "first name"},
+ {"last", T_OBJECT_EX, offsetof(CustomObject, last), 0,
+ "last name"},
+ {"number", T_INT, offsetof(CustomObject, number), 0,
+ "custom number"},
+ {NULL} /* Sentinel */
+ };
+
+and put the definitions in the :c:member:`~PyTypeObject.tp_members` slot::
+
+ .tp_members = Custom_members,
+
+Each member definition has a member name, type, offset, access flags and
+documentation string. See the :ref:`Generic-Attribute-Management` section
+below for details.
+
+A disadvantage of this approach is that it doesn't provide a way to restrict the
+types of objects that can be assigned to the Python attributes. We expect the
+first and last names to be strings, but any Python objects can be assigned.
+Further, the attributes can be deleted, setting the C pointers to *NULL*. Even
+though we can make sure the members are initialized to non-*NULL* values, the
+members can be set to *NULL* if the attributes are deleted.
+
+We define a single method, :meth:`Custom.name()`, that outputs the objects name as the
+concatenation of the first and last names. ::
+
+ static PyObject *
+ Custom_name(CustomObject *self)
+ {
+ if (self->first == NULL) {
+ PyErr_SetString(PyExc_AttributeError, "first");
+ return NULL;
+ }
+ if (self->last == NULL) {
+ PyErr_SetString(PyExc_AttributeError, "last");
+ return NULL;
+ }
+ return PyUnicode_FromFormat("%S %S", self->first, self->last);
+ }
+
+The method is implemented as a C function that takes a :class:`Custom` (or
+:class:`Custom` subclass) instance as the first argument. Methods always take an
+instance as the first argument. Methods often take positional and keyword
+arguments as well, but in this case we don't take any and don't need to accept
+a positional argument tuple or keyword argument dictionary. This method is
+equivalent to the Python method:
+
+.. code-block:: python
+
+ def name(self):
+ return "%s %s" % (self.first, self.last)
+
+Note that we have to check for the possibility that our :attr:`first` and
+:attr:`last` members are *NULL*. This is because they can be deleted, in which
+case they are set to *NULL*. It would be better to prevent deletion of these
+attributes and to restrict the attribute values to be strings. We'll see how to
+do that in the next section.
+
+Now that we've defined the method, we need to create an array of method
+definitions::
+
+ static PyMethodDef Custom_methods[] = {
+ {"name", (PyCFunction) Custom_name, METH_NOARGS,
+ "Return the name, combining the first and last name"
+ },
+ {NULL} /* Sentinel */
+ };
+
+(note that we used the :const:`METH_NOARGS` flag to indicate that the method
+is expecting no arguments other than *self*)
+
+and assign it to the :c:member:`~PyTypeObject.tp_methods` slot::
+
+ .tp_methods = Custom_methods,
+
+Finally, we'll make our type usable as a base class for subclassing. We've
+written our methods carefully so far so that they don't make any assumptions
+about the type of the object being created or used, so all we need to do is
+to add the :const:`Py_TPFLAGS_BASETYPE` to our class flag definition::
+
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
+
+We rename :c:func:`PyInit_custom` to :c:func:`PyInit_custom2`, update the
+module name in the :c:type:`PyModuleDef` struct, and update the full class
+name in the :c:type:`PyTypeObject` struct.
+
+Finally, we update our :file:`setup.py` file to build the new module:
+
+.. code-block:: python
+
+ from distutils.core import setup, Extension
+ setup(name="custom", version="1.0",
+ ext_modules=[
+ Extension("custom", ["custom.c"]),
+ Extension("custom2", ["custom2.c"]),
+ ])
+
+
+Providing finer control over data attributes
+============================================
+
+In this section, we'll provide finer control over how the :attr:`first` and
+:attr:`last` attributes are set in the :class:`Custom` example. In the previous
+version of our module, the instance variables :attr:`first` and :attr:`last`
+could be set to non-string values or even deleted. We want to make sure that
+these attributes always contain strings.
+
+.. literalinclude:: ../includes/custom3.c
+
+
+To provide greater control, over the :attr:`first` and :attr:`last` attributes,
+we'll use custom getter and setter functions. Here are the functions for
+getting and setting the :attr:`first` attribute::
+
+ static PyObject *
+ Custom_getfirst(CustomObject *self, void *closure)
+ {
+ Py_INCREF(self->first);
+ return self->first;
+ }
+
+ static int
+ Custom_setfirst(CustomObject *self, PyObject *value, void *closure)
+ {
+ PyObject *tmp;
+ if (value == NULL) {
+ PyErr_SetString(PyExc_TypeError, "Cannot delete the first attribute");
+ return -1;
+ }
+ if (!PyUnicode_Check(value)) {
+ PyErr_SetString(PyExc_TypeError,
+ "The first attribute value must be a string");
+ return -1;
+ }
+ tmp = self->first;
+ Py_INCREF(value);
+ self->first = value;
+ Py_DECREF(tmp);
+ return 0;
+ }
+
+The getter function is passed a :class:`Custom` object and a "closure", which is
+a void pointer. In this case, the closure is ignored. (The closure supports an
+advanced usage in which definition data is passed to the getter and setter. This
+could, for example, be used to allow a single set of getter and setter functions
+that decide the attribute to get or set based on data in the closure.)
+
+The setter function is passed the :class:`Custom` object, the new value, and the
+closure. The new value may be *NULL*, in which case the attribute is being
+deleted. In our setter, we raise an error if the attribute is deleted or if its
+new value is not a string.
+
+We create an array of :c:type:`PyGetSetDef` structures::
+
+ static PyGetSetDef Custom_getsetters[] = {
+ {"first", (getter) Custom_getfirst, (setter) Custom_setfirst,
+ "first name", NULL},
+ {"last", (getter) Custom_getlast, (setter) Custom_setlast,
+ "last name", NULL},
+ {NULL} /* Sentinel */
+ };
+
+and register it in the :c:member:`~PyTypeObject.tp_getset` slot::
+
+ .tp_getset = Custom_getsetters,
+
+The last item in a :c:type:`PyGetSetDef` structure is the "closure" mentioned
+above. In this case, we aren't using a closure, so we just pass *NULL*.
+
+We also remove the member definitions for these attributes::
+
+ static PyMemberDef Custom_members[] = {
+ {"number", T_INT, offsetof(CustomObject, number), 0,
+ "custom number"},
+ {NULL} /* Sentinel */
+ };
+
+We also need to update the :c:member:`~PyTypeObject.tp_init` handler to only
+allow strings [#]_ to be passed::
+
+ static int
+ Custom_init(CustomObject *self, PyObject *args, PyObject *kwds)
+ {
+ static char *kwlist[] = {"first", "last", "number", NULL};
+ PyObject *first = NULL, *last = NULL, *tmp;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|UUi", kwlist,
+ &first, &last,
+ &self->number))
+ return -1;
+
+ if (first) {
+ tmp = self->first;
+ Py_INCREF(first);
+ self->first = first;
+ Py_DECREF(tmp);
+ }
+ if (last) {
+ tmp = self->last;
+ Py_INCREF(last);
+ self->last = last;
+ Py_DECREF(tmp);
+ }
+ return 0;
+ }
+
+With these changes, we can assure that the ``first`` and ``last`` members are
+never *NULL* so we can remove checks for *NULL* values in almost all cases.
+This means that most of the :c:func:`Py_XDECREF` calls can be converted to
+:c:func:`Py_DECREF` calls. The only place we can't change these calls is in
+the ``tp_dealloc`` implementation, where there is the possibility that the
+initialization of these members failed in ``tp_new``.
+
+We also rename the module initialization function and module name in the
+initialization function, as we did before, and we add an extra definition to the
+:file:`setup.py` file.
+
+
+Supporting cyclic garbage collection
+====================================
+
+Python has a :term:`cyclic garbage collector (GC) <garbage collection>` that
+can identify unneeded objects even when their reference counts are not zero.
+This can happen when objects are involved in cycles. For example, consider:
+
+.. code-block:: pycon
+
+ >>> l = []
+ >>> l.append(l)
+ >>> del l
+
+In this example, we create a list that contains itself. When we delete it, it
+still has a reference from itself. Its reference count doesn't drop to zero.
+Fortunately, Python's cyclic garbage collector will eventually figure out that
+the list is garbage and free it.
+
+In the second version of the :class:`Custom` example, we allowed any kind of
+object to be stored in the :attr:`first` or :attr:`last` attributes [#]_.
+Besides, in the second and third versions, we allowed subclassing
+:class:`Custom`, and subclasses may add arbitrary attributes. For any of
+those two reasons, :class:`Custom` objects can participate in cycles:
+
+.. code-block:: pycon
+
+ >>> import custom3
+ >>> class Derived(custom3.Custom): pass
+ ...
+ >>> n = Derived()
+ >>> n.some_attribute = n
+
+To allow a :class:`Custom` instance participating in a reference cycle to
+be properly detected and collected by the cyclic GC, our :class:`Custom` type
+needs to fill two additional slots and to enable a flag that enables these slots:
+
+.. literalinclude:: ../includes/custom4.c
+
+
+First, the traversal method lets the cyclic GC know about subobjects that could
+participate in cycles::
+
+ static int
+ Custom_traverse(CustomObject *self, visitproc visit, void *arg)
+ {
+ int vret;
+ if (self->first) {
+ vret = visit(self->first, arg);
+ if (vret != 0)
+ return vret;
+ }
+ if (self->last) {
+ vret = visit(self->last, arg);
+ if (vret != 0)
+ return vret;
+ }
+ return 0;
+ }
+
+For each subobject that can participate in cycles, we need to call the
+:c:func:`visit` function, which is passed to the traversal method. The
+:c:func:`visit` function takes as arguments the subobject and the extra argument
+*arg* passed to the traversal method. It returns an integer value that must be
+returned if it is non-zero.
+
+Python provides a :c:func:`Py_VISIT` macro that automates calling visit
+functions. With :c:func:`Py_VISIT`, we can minimize the amount of boilerplate
+in ``Custom_traverse``::
+
+ static int
+ Custom_traverse(CustomObject *self, visitproc visit, void *arg)
+ {
+ Py_VISIT(self->first);
+ Py_VISIT(self->last);
+ return 0;
+ }
+
+.. note::
+ The :c:member:`~PyTypeObject.tp_traverse` implementation must name its
+ arguments exactly *visit* and *arg* in order to use :c:func:`Py_VISIT`.
+
+Second, we need to provide a method for clearing any subobjects that can
+participate in cycles::
+
+ static int
+ Custom_clear(CustomObject *self)
+ {
+ Py_CLEAR(self->first);
+ Py_CLEAR(self->last);
+ return 0;
+ }
+
+Notice the use of the :c:func:`Py_CLEAR` macro. It is the recommended and safe
+way to clear data attributes of arbitrary types while decrementing
+their reference counts. If you were to call :c:func:`Py_XDECREF` instead
+on the attribute before setting it to *NULL*, there is a possibility
+that the attribute's destructor would call back into code that reads the
+attribute again (*especially* if there is a reference cycle).
+
+.. note::
+ You could emulate :c:func:`Py_CLEAR` by writing::
+
+ PyObject *tmp;
+ tmp = self->first;
+ self->first = NULL;
+ Py_XDECREF(tmp);
+
+ Nevertheless, it is much easier and less error-prone to always
+ use :c:func:`Py_CLEAR` when deleting an attribute. Don't
+ try to micro-optimize at the expense of robustness!
+
+The deallocator ``Custom_dealloc`` may call arbitrary code when clearing
+attributes. It means the circular GC can be triggered inside the function.
+Since the GC assumes reference count is not zero, we need to untrack the object
+from the GC by calling :c:func:`PyObject_GC_UnTrack` before clearing members.
+Here is our reimplemented deallocator using :c:func:`PyObject_GC_UnTrack`
+and ``Custom_clear``::
+
+ static void
+ Custom_dealloc(CustomObject *self)
+ {
+ PyObject_GC_UnTrack(self);
+ Custom_clear(self);
+ Py_TYPE(self)->tp_free((PyObject *) self);
+ }
+
+Finally, we add the :const:`Py_TPFLAGS_HAVE_GC` flag to the class flags::
+
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
+
+That's pretty much it. If we had written custom :c:member:`~PyTypeObject.tp_alloc` or
+:c:member:`~PyTypeObject.tp_free` handlers, we'd need to modify them for cyclic
+garbage collection. Most extensions will use the versions automatically provided.
+
+
+Subclassing other types
+=======================
+
+It is possible to create new extension types that are derived from existing
+types. It is easiest to inherit from the built in types, since an extension can
+easily use the :c:type:`PyTypeObject` it needs. It can be difficult to share
+these :c:type:`PyTypeObject` structures between extension modules.
+
+In this example we will create a :class:`SubList` type that inherits from the
+built-in :class:`list` type. The new type will be completely compatible with
+regular lists, but will have an additional :meth:`increment` method that
+increases an internal counter:
+
+.. code-block:: pycon
+
+ >>> import sublist
+ >>> s = sublist.SubList(range(3))
+ >>> s.extend(s)
+ >>> print(len(s))
+ 6
+ >>> print(s.increment())
+ 1
+ >>> print(s.increment())
+ 2
+
+.. literalinclude:: ../includes/sublist.c
+
+
+As you can see, the source code closely resembles the :class:`Custom` examples in
+previous sections. We will break down the main differences between them. ::
+
+ typedef struct {
+ PyListObject list;
+ int state;
+ } SubListObject;
+
+The primary difference for derived type objects is that the base type's
+object structure must be the first value. The base type will already include
+the :c:func:`PyObject_HEAD` at the beginning of its structure.
+
+When a Python object is a :class:`SubList` instance, its ``PyObject *`` pointer
+can be safely cast to both ``PyListObject *`` and ``SubListObject *``::
+
+ static int
+ SubList_init(SubListObject *self, PyObject *args, PyObject *kwds)
+ {
+ if (PyList_Type.tp_init((PyObject *) self, args, kwds) < 0)
+ return -1;
+ self->state = 0;
+ return 0;
+ }
+
+We see above how to call through to the :attr:`__init__` method of the base
+type.
+
+This pattern is important when writing a type with custom
+:c:member:`~PyTypeObject.tp_new` and :c:member:`~PyTypeObject.tp_dealloc`
+members. The :c:member:`~PyTypeObject.tp_new` handler should not actually
+create the memory for the object with its :c:member:`~PyTypeObject.tp_alloc`,
+but let the base class handle it by calling its own :c:member:`~PyTypeObject.tp_new`.
+
+The :c:type:`PyTypeObject` struct supports a :c:member:`~PyTypeObject.tp_base`
+specifying the type's concrete base class. Due to cross-platform compiler
+issues, you can't fill that field directly with a reference to
+:c:type:`PyList_Type`; it should be done later in the module initialization
+function::
+
+ PyMODINIT_FUNC
+ PyInit_sublist(void)
+ {
+ PyObject* m;
+ SubListType.tp_base = &PyList_Type;
+ if (PyType_Ready(&SubListType) < 0)
+ return NULL;
+
+ m = PyModule_Create(&sublistmodule);
+ if (m == NULL)
+ return NULL;
+
+ Py_INCREF(&SubListType);
+ PyModule_AddObject(m, "SubList", (PyObject *) &SubListType);
+ return m;
+ }
+
+Before calling :c:func:`PyType_Ready`, the type structure must have the
+:c:member:`~PyTypeObject.tp_base` slot filled in. When we are deriving an
+existing type, it is not necessary to fill out the :c:member:`~PyTypeObject.tp_alloc`
+slot with :c:func:`PyType_GenericNew` -- the allocation function from the base
+type will be inherited.
+
+After that, calling :c:func:`PyType_Ready` and adding the type object to the
+module is the same as with the basic :class:`Custom` examples.
+
+
+.. rubric:: Footnotes
+
+.. [#] This is true when we know that the object is a basic type, like a string or a
+ float.
+
+.. [#] We relied on this in the :c:member:`~PyTypeObject.tp_dealloc` handler
+ in this example, because our type doesn't support garbage collection.
+
+.. [#] We now know that the first and last members are strings, so perhaps we
+ could be less careful about decrementing their reference counts, however,
+ we accept instances of string subclasses. Even though deallocating normal
+ strings won't call back into our objects, we can't guarantee that deallocating
+ an instance of a string subclass won't call back into our objects.
+
+.. [#] Also, even with our attributes restricted to strings instances, the user
+ could pass arbitrary :class:`str` subclasses and therefore still create
+ reference cycles.
Python code), and operating system interfaces (system calls, filesystems, TCP/IP
sockets). Look at the table of contents for :ref:`library-index` to get an idea
of what's available. A wide variety of third-party extensions are also
-available. Consult `the Python Package Index <https://pypi.python.org/pypi>`_ to
+available. Consult `the Python Package Index <https://pypi.org>`_ to
find packages of interest to you.
library and will be able to skip this step.)
For third-party packages, search the `Python Package Index
-<https://pypi.python.org/pypi>`_ or try `Google <https://www.google.com>`_ or
+<https://pypi.org>`_ or try `Google <https://www.google.com>`_ or
another Web search engine. Searching for "Python" plus a keyword or two for
your topic of interest will usually find something helpful.
Occasionally, a user's environment is so full that the :program:`/usr/bin/env`
program fails; or there's no env program at all. In that case, you can try the
-following hack (due to Alex Rezinsky)::
+following hack (due to Alex Rezinsky):
+
+.. code-block:: sh
#! /bin/sh
""":"
"expect" library. A Python extension that interfaces to expect is called
"expy" and available from http://expectpy.sourceforge.net. A pure Python
solution that works like expect is `pexpect
- <https://pypi.python.org/pypi/pexpect/>`_.
+ <https://pypi.org/project/pexpect/>`_.
How do I access the serial (RS232) port?
:tocdepth: 2
+.. highlightlang:: none
+
.. _windows-faq:
=====================
Start menu; under Windows 7 the menu selection is :menuselection:`Start -->
Programs --> Accessories --> Command Prompt`. You should be able to recognize
when you have started such a window because you will see a Windows "command
-prompt", which usually looks like this::
+prompt", which usually looks like this:
+
+.. code-block:: doscon
C:\>
The letter may be different, and there might be other things after it, so you
-might just as easily see something like::
+might just as easily see something like:
+
+.. code-block:: doscon
D:\YourName\Projects\Python>
First, you need to make sure that your command window recognises the word
"python" as an instruction to start the interpreter. If you have opened a
command window, you should try entering the command ``python`` and hitting
-return.::
+return:
+
+.. code-block:: doscon
C:\Users\YourName> python
-You should then see something like::
+You should then see something like:
+
+.. code-block:: pycon
Python 3.3.0 (v3.3.0:bd8afb90ebf2, Sep 29 2012, 10:55:48) [MSC v.1600 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license" for more information.
You have started the interpreter in "interactive mode". That means you can enter
Python statements or expressions interactively and have them executed or
evaluated while you wait. This is one of Python's strongest features. Check it
-by entering a few expressions of your choice and seeing the results::
+by entering a few expressions of your choice and seeing the results:
+
+.. code-block:: pycon
>>> print("Hello")
Hello
How do I emulate os.kill() in Windows?
--------------------------------------
-Prior to Python 2.7 and 3.2, to terminate a process, you can use :mod:`ctypes`::
+Prior to Python 2.7 and 3.2, to terminate a process, you can use :mod:`ctypes`:
+
+.. code-block:: python
import ctypes
and loaders (in the :mod:`importlib.abc` module). You can create your own
ABCs with the :mod:`abc` module.
+ annotation
+ A label associated with a variable, a class
+ attribute or a function parameter or return value,
+ used by convention as a :term:`type hint`.
+
+ Annotations of local variables cannot be accessed at runtime, but
+ annotations of global variables, class attributes, and functions
+ are stored in the :attr:`__annotations__`
+ special attribute of modules, classes, and functions,
+ respectively.
+
+ See :term:`variable annotation`, :term:`function annotation`, :pep:`484`
+ and :pep:`526`, which describe this functionality.
+
argument
A value passed to a :term:`function` (or :term:`method`) when calling the
function. There are two kinds of argument:
location execution state (including local variables and pending
try-statements). When the *asynchronous generator iterator* effectively
resumes with another awaitable returned by :meth:`__anext__`, it
- picks-up where it left-off. See :pep:`492` and :pep:`525`.
+ picks up where it left off. See :pep:`492` and :pep:`525`.
asynchronous iterable
An object, that can be used in an :keyword:`async for` statement.
:data:`sys.stdout.buffer`, and instances of :class:`io.BytesIO` and
:class:`gzip.GzipFile`.
- .. seealso::
- A :term:`text file` reads and writes :class:`str` objects.
+ See also :term:`text file` for a file object able to read and write
+ :class:`str` objects.
bytes-like object
An object that supports the :ref:`bufferobjects` and can
normally contain method definitions which operate on instances of the
class.
+ class variable
+ A variable defined in a class and intended to be modified only at
+ class level (i.e., not in an instance of the class).
+
coercion
The implicit conversion of an instance of one type to another during an
operation which involves two arguments of the same type. For example,
and the :ref:`function` section.
function annotation
- An arbitrary metadata value associated with a function parameter or return
- value. Its syntax is explained in section :ref:`function`. Annotations
- may be accessed via the :attr:`__annotations__` special attribute of a
- function object.
+ An :term:`annotation` of a function parameter or return value.
- Python itself does not assign any particular meaning to function
- annotations. They are intended to be interpreted by third-party libraries
- or tools. See :pep:`3107`, which describes some of their potential uses.
+ Function annotations are usually used for
+ :term:`type hints <type hint>`: for example this function is expected to take two
+ :class:`int` arguments and is also expected to have an :class:`int`
+ return value::
+
+ def sum_two_numbers(a: int, b: int) -> int:
+ return a + b
+
+ Function annotation syntax is explained in section :ref:`function`.
+
+ See :term:`variable annotation` and :pep:`484`,
+ which describe this functionality.
__future__
A pseudo-module which programmers can use to enable new language features
Each :keyword:`yield` temporarily suspends processing, remembering the
location execution state (including local variables and pending
- try-statements). When the *generator iterator* resumes, it picks-up where
- it left-off (in contrast to functions which start fresh on every
+ try-statements). When the *generator iterator* resumes, it picks up where
+ it left off (in contrast to functions which start fresh on every
invocation).
.. index:: single: generator expression
lambda
An anonymous inline function consisting of a single :term:`expression`
which is evaluated when the function is called. The syntax to create
- a lambda function is ``lambda [arguments]: expression``
+ a lambda function is ``lambda [parameters]: expression``
LBYL
Look before you leap. This coding style explicitly tests for
:class:`str` or :class:`bytes` result instead, respectively. Introduced
by :pep:`519`.
+ PEP
+ Python Enhancement Proposal. A PEP is a design document
+ providing information to the Python community, or describing a new
+ feature for Python or its processes or environment. PEPs should
+ provide a concise technical specification and a rationale for proposed
+ features.
+
+ PEPs are intended to be the primary mechanisms for proposing major new
+ features, for collecting community input on an issue, and for documenting
+ the design decisions that have gone into Python. The PEP author is
+ responsible for building consensus within the community and documenting
+ dissenting opinions.
+
+ See :pep:`1`.
+
portion
A set of files in a single directory (possibly stored in a zip file)
that contribute to a namespace package, as defined in :pep:`420`.
:data:`sys.stdin`, :data:`sys.stdout`, and instances of
:class:`io.StringIO`.
- .. seealso::
- A :term:`binary file` reads and write :class:`bytes` objects.
+ See also :term:`binary file` for a file object able to read and write
+ :term:`bytes-like objects <bytes-like object>`.
triple-quoted string
A string which is bound by three instances of either a quotation mark
:attr:`~instance.__class__` attribute or can be retrieved with
``type(obj)``.
+ type alias
+ A synonym for a type, created by assigning the type to an identifier.
+
+ Type aliases are useful for simplifying :term:`type hints <type hint>`.
+ For example::
+
+ from typing import List, Tuple
+
+ def remove_gray_shades(
+ colors: List[Tuple[int, int, int]]) -> List[Tuple[int, int, int]]:
+ pass
+
+ could be made more readable like this::
+
+ from typing import List, Tuple
+
+ Color = Tuple[int, int, int]
+
+ def remove_gray_shades(colors: List[Color]) -> List[Color]:
+ pass
+
+ See :mod:`typing` and :pep:`484`, which describe this functionality.
+
+ type hint
+ An :term:`annotation` that specifies the expected type for a variable, a class
+ attribute, or a function parameter or return value.
+
+ Type hints are optional and are not enforced by Python but
+ they are useful to static type analysis tools, and aid IDEs with code
+ completion and refactoring.
+
+ Type hints of global variables, class attributes, and functions,
+ but not local variables, can be accessed using
+ :func:`typing.get_type_hints`.
+
+ See :mod:`typing` and :pep:`484`, which describe this functionality.
+
universal newlines
A manner of interpreting text streams in which all of the following are
recognized as ending a line: the Unix end-of-line convention ``'\n'``,
:func:`bytes.splitlines` for an additional use.
variable annotation
- A type metadata value associated with a module global variable or
- a class attribute. Its syntax is explained in section :ref:`annassign`.
- Annotations are stored in the :attr:`__annotations__` special
- attribute of a class or module object and can be accessed using
- :func:`typing.get_type_hints`.
+ An :term:`annotation` of a variable or a class attribute.
+
+ When annotating a variable or a class attribute, assignment is optional::
+
+ class C:
+ field: 'annotation'
+
+ Variable annotations are usually used for
+ :term:`type hints <type hint>`: for example this variable is expected to take
+ :class:`int` values::
+
+ count: int = 0
+
+ Variable annotation syntax is explained in section :ref:`annassign`.
- Python itself does not assign any particular meaning to variable
- annotations. They are intended to be interpreted by third-party libraries
- or type checking tools. See :pep:`526`, :pep:`484` which describe
- some of their potential uses.
+ See :term:`function annotation`, :pep:`484`
+ and :pep:`526`, which describe this functionality.
virtual environment
A cooperatively isolated runtime environment that allows Python users
Let's show the sort of functionality that we are going to explore in this
introductory tutorial by making use of the :command:`ls` command:
-.. code-block:: sh
+.. code-block:: shell-session
$ ls
cpython devguide prog.py pypy rm-unused-function.patch
Following is a result of running the code:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py
$ python3 prog.py --help
And running the code:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py
usage: prog.py [-h] echo
And we get:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py -h
usage: prog.py [-h] echo
Following is a result of running the code:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py 4
Traceback (most recent call last):
Following is a result of running the code:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py 4
16
And the output:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py --verbosity 1
verbosity turned on
And the output:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py --verbose
verbosity turned on
And here goes:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py -v
verbosity turned on
And now the output:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py
usage: prog.py [-h] [-v] square
And the output:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py 4
16
And the output:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py 4 -v 3
usage: prog.py [-h] [-v {0,1,2}] square
We have introduced another action, "count",
to count the number of occurrences of a specific optional arguments:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py 4
16
And this is what it gives:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py 4 -vvv
the square of 4 equals 16
And:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py 4
16
Output:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py
usage: prog.py [-h] [-v] x y
Output:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py 4 2
16
Our program is now simpler, and we've lost some functionality for the sake of
demonstration. Anyways, here's the output:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py 4 2
4^2 == 16
which tells us that we can either use ``-v`` or ``-q``,
but not both at the same time:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 prog.py --help
usage: prog.py [-h] [-v | -q] x y
compatibility for future versions. In other words: if you
maintain an external C extension for CPython, you're welcome
to experiment with Argument Clinic in your own code. But the
- version of Argument Clinic that ships with CPython 3.5 *could*
- be totally incompatible and break all your code.
+ version of Argument Clinic that ships with the next version
+ of CPython *could* be totally incompatible and break all your code.
The Goals Of Argument Clinic
============================
should get its own line. All the parameter lines should be
indented from the function name and the docstring.
- The general form of these parameter lines is as follows::
+ The general form of these parameter lines is as follows:
+
+ .. code-block:: none
name_of_parameter: converter
If the parameter has a default value, add that after the
- converter::
+ converter:
+
+ .. code-block:: none
name_of_parameter: converter = default_value
------------------------
Default values for parameters can be any of a number of values.
-At their simplest, they can be string, int, or float literals::
+At their simplest, they can be string, int, or float literals:
+
+.. code-block:: none
foo: str = "abc"
bar: int = 123
bat: float = 45.6
-They can also use any of Python's built-in constants::
+They can also use any of Python's built-in constants:
+
+.. code-block:: none
yep: bool = True
nope: bool = False
on objects. However, this support isn't exactly simple, because of some
non-obvious semantics.
-Consider the following example::
+Consider the following example:
+
+.. code-block:: none
foo: Py_ssize_t = sys.maxsize - 1
What namespace is available when the expression is evaluated? It's evaluated
in the context of the module the builtin came from. So, if your module has an
-attribute called "``max_widgets``", you may simply use it::
+attribute called "``max_widgets``", you may simply use it:
+
+.. code-block:: none
foo: Py_ssize_t = max_widgets
Evaluating default values only at runtime means Argument Clinic can't compute
the correct equivalent C default value. So you need to tell it explicitly.
When you use an expression, you must also specify the equivalent expression
-in C, using the ``c_default`` parameter to the converter::
+in C, using the ``c_default`` parameter to the converter:
+
+.. code-block:: none
foo: Py_ssize_t(c_default="PY_SSIZE_T_MAX - 1") = sys.maxsize - 1
A field, in this context, is a subsection of Clinic's output.
For example, the ``#define`` for the ``PyMethodDef`` structure
is a field, called ``methoddef_define``. Clinic has seven
- different fields it can output per function definition::
+ different fields it can output per function definition:
+
+ .. code-block:: none
docstring_prototype
docstring_definition
Clinic defines five new directives that let you reconfigure its output.
-The first new directive is ``dump``::
+The first new directive is ``dump``:
+
+.. code-block:: none
dump <destination>
``two-pass`` destinations.
The second new directive is ``output``. The most basic form of ``output``
-is like this::
+is like this:
+
+.. code-block:: none
output <field> <destination>
supports a special meta-destination, called ``everything``, which tells
Clinic to output *all* fields to that *destination*.
-``output`` has a number of other functions::
+``output`` has a number of other functions:
+
+.. code-block:: none
output push
output pop
Suppresses the ``impl_prototype``, write the ``docstring_definition``
and ``parser_definition`` to ``buffer``, write everything else to ``block``.
-The third new directive is ``destination``::
+The third new directive is ``destination``:
+
+.. code-block:: none
destination <name> <command> [...]
There are two defined subcommands: ``new`` and ``clear``.
-The ``new`` subcommand works like this::
+The ``new`` subcommand works like this:
+
+.. code-block:: none
destination <name> new <type>
A two-pass buffer, like the "two-pass" builtin destination above.
-The ``clear`` subcommand works like this::
+The ``clear`` subcommand works like this:
+
+.. code-block:: none
destination <name> clear
(I don't know what you'd need this for, but I thought maybe it'd be
useful while someone's experimenting.)
-The fourth new directive is ``set``::
+The fourth new directive is ``set``:
+
+.. code-block:: none
set line_prefix "string"
set line_suffix "string"
Turns into the string ``*/``, the end-comment text sequence for C files.
The final new directive is one you shouldn't need to use directly,
-called ``preserve``::
+called ``preserve``:
+
+.. code-block:: none
preserve
#endif /* HAVE_FUNCTIONNAME */
Then, remove those three lines from the ``PyMethodDef`` structure,
-replacing them with the macro Argument Clinic generated::
+replacing them with the macro Argument Clinic generated:
+
+.. code-block:: none
MODULE_FUNCTIONNAME_METHODDEF
need to be sent to the terminal to produce the right output. curses
doesn't provide many user-interface concepts such as buttons, checkboxes,
or dialogs; if you need such features, consider a user interface library such as
-`Urwid <https://pypi.python.org/pypi/urwid/>`_.
+`Urwid <https://pypi.org/project/urwid/>`_.
The curses library was originally written for BSD Unix; the later System V
versions of Unix from AT&T added many enhancements and new functions. BSD curses
The Windows version of Python doesn't include the :mod:`curses`
module. A ported version called `UniCurses
-<https://pypi.python.org/pypi/UniCurses>`_ is available. You could
+<https://pypi.org/project/UniCurses>`_ is available. You could
also try `the Console module <http://effbot.org/zone/console-index.htm>`_
written by Fredrik Lundh, which doesn't
use the same API as curses but provides cursor-addressable text output
The C curses library offers only very simple input mechanisms. Python's
:mod:`curses` module adds a basic text-input widget. (Other libraries
-such as `Urwid <https://pypi.python.org/pypi/urwid/>`_ have more extensive
+such as `Urwid <https://pypi.org/project/urwid/>`_ have more extensive
collections of widgets.)
There are two methods for getting input from a window:
For a `--enable-shared` build of CPython, the markers are contained within the
libpython shared library, and the probe's dotted path needs to reflect this. For
-example, this line from the above example::
+example, this line from the above example:
+
+.. code-block:: none
probe process("python").mark("function__entry") {
-should instead read::
+should instead read:
+
+.. code-block:: none
probe process("python").library("libpython3.6dm.so.1.0").mark("function__entry") {
def some_function():
module_logger.info('received a call to "some_function"')
-The output looks like this::
+The output looks like this:
+
+.. code-block:: none
2005-03-23 23:47:11,663 - spam_application - INFO -
creating an instance of auxiliary_module.Auxiliary
if __name__ == '__main__':
main()
-When run, the script should print something like the following::
+When run, the script should print something like the following:
+
+.. code-block:: none
0 Thread-1 Hi from myfunc
3 MainThread Hello from main
logger2.warning('Jail zesty vixen who grabbed pay from quack.')
logger2.error('The five boxing wizards jump quickly.')
-When you run this, on the console you will see ::
+When you run this, on the console you will see
+
+.. code-block:: none
root : INFO Jackdaws love my big sphinx of quartz.
myapp.area1 : INFO How quickly daft jumping zebras vex.
myapp.area2 : WARNING Jail zesty vixen who grabbed pay from quack.
myapp.area2 : ERROR The five boxing wizards jump quickly.
-and in the file you will see something like ::
+and in the file you will see something like
+
+.. code-block:: none
10-22 22:19 root INFO Jackdaws love my big sphinx of quartz.
10-22 22:19 myapp.area1 DEBUG Quick zephyrs blow, vexing daft Jim.
main()
First run the server, and then the client. On the client side, nothing is
-printed on the console; on the server side, you should see something like::
+printed on the console; on the server side, you should see something like:
+
+.. code-block:: none
About to start TCP server...
59 root INFO Jackdaws love my big sphinx of quartz.
lvlname = logging.getLevelName(lvl)
a2.log(lvl, 'A message at %s level with %d %s', lvlname, 2, 'parameters')
-which, when run, produces something like::
+which, when run, produces something like:
+
+.. code-block:: none
2010-09-06 22:38:15,292 a.b.c DEBUG IP: 123.231.231.123 User: fred A debug message
2010-09-06 22:38:15,300 a.b.c INFO IP: 192.168.0.1 User: sheila An info message with some parameters
print(filename)
The result should be 6 separate files, each with part of the log history for the
-application::
+application:
+
+.. code-block:: none
logging_rotatingfile_example.out
logging_rotatingfile_example.out.1
Inserting a BOM into messages sent to a SysLogHandler
-----------------------------------------------------
-`RFC 5424 <https://tools.ietf.org/html/rfc5424>`_ requires that a
+:rfc:`5424` requires that a
Unicode message be sent to a syslog daemon as a set of bytes which have the
following structure: an optional pure-ASCII component, followed by a UTF-8 Byte
-Order Mark (BOM), followed by Unicode encoded using UTF-8. (See the `relevant
-section of the specification <https://tools.ietf.org/html/rfc5424#section-6>`_.)
+Order Mark (BOM), followed by Unicode encoded using UTF-8. (See the
+:rfc:`relevant section of the specification <5424#section-6>`.)
In Python 3.1, code was added to
:class:`~logging.handlers.SysLogHandler` to insert a BOM into the message, but
As this behaviour is broken, the incorrect BOM insertion code is being removed
from Python 3.2.4 and later. However, it is not being replaced, and if you
-want to produce RFC 5424-compliant messages which include a BOM, an optional
+want to produce :rfc:`5424`-compliant messages which include a BOM, an optional
pure-ASCII sequence before it and arbitrary Unicode after it, encoded using
UTF-8, then you need to do the following:
The formatted message *will* be encoded using UTF-8 encoding by
``SysLogHandler``. If you follow the above rules, you should be able to produce
-RFC 5424-compliant messages. If you don't, logging may not complain, but your
+:rfc:`5424`-compliant messages. If you don't, logging may not complain, but your
messages will not be RFC 5424-compliant, and your syslog daemon may complain.
logging.basicConfig(level=logging.INFO, format='%(message)s')
logging.info(_('message 1', foo='bar', bar='baz', num=123, fnum=123.456))
-If the above script is run, it prints::
+If the above script is run, it prints:
+
+.. code-block:: none
message 1 >>> {"fnum": 123.456, "num": 123, "bar": "baz", "foo": "bar"}
if __name__ == '__main__':
main()
-When the above script is run, it prints::
+When the above script is run, it prints:
+
+.. code-block:: none
message 1 >>> {"snowman": "\u2603", "set_value": [1, 2, 3]}
This example shows how you can pass configuration data to the callable which
constructs the instance, in the form of keyword parameters. When run, the above
-script will print::
+script will print:
+
+.. code-block:: none
changed: hello
if __name__ == '__main__':
main()
-When run, this produces a file with exactly two lines::
+When run, this produces a file with exactly two lines:
+
+.. code-block:: none
28/01/2015 07:21:23|INFO|Sample message|
28/01/2015 07:21:23|ERROR|ZeroDivisionError: integer division or modulo by zero|'Traceback (most recent call last):\n File "logtest7.py", line 30, in main\n x = 1 / 0\nZeroDivisionError: integer division or modulo by zero'|
write_line('Calling decorated foo with True')
assert decorated_foo(True)
-When this script is run, the following output should be observed::
+When this script is run, the following output should be observed:
+
+.. code-block:: none
Calling undecorated foo with False
about to log at DEBUG ...
logging.config.dictConfig(LOGGING)
logging.warning('The local time is %s', time.asctime())
-When this script is run, it should print something like::
+When this script is run, it should print something like:
+
+.. code-block:: none
2015-10-17 12:53:29,501 The local time is Sat Oct 17 13:53:29 2015
2015-10-17 13:53:29,501 The local time is Sat Oct 17 13:53:29 2015
logging.warning('And this, too')
And now if we open the file and look at what we have, we should find the log
-messages::
+messages:
+
+.. code-block:: none
DEBUG:root:This message should go to the log file
INFO:root:So should this
threshold for tracking. In this case, because we set the threshold to
``DEBUG``, all of the messages were printed.
-If you want to set the logging level from a command-line option such as::
+If you want to set the logging level from a command-line option such as:
+
+.. code-block:: none
--log=INFO
def do_something():
logging.info('Doing something')
-If you run *myapp.py*, you should see this in *myapp.log*::
+If you run *myapp.py*, you should see this in *myapp.log*:
+
+.. code-block:: none
INFO:root:Started
INFO:root:Doing something
logging.info('So should this')
logging.warning('And this, too')
-which would print::
+which would print:
+
+.. code-block:: none
DEBUG:This message should appear on the console
INFO:So should this
logging.basicConfig(format='%(asctime)s %(message)s')
logging.warning('is when this event was logged.')
-which should print something like this::
+which should print something like this:
+
+.. code-block:: none
2010-12-12 11:41:42,612 is when this event was logged.
-The default format for date/time display (shown above) is ISO8601. If you need
-more control over the formatting of the date/time, provide a *datefmt*
-argument to ``basicConfig``, as in this example::
+The default format for date/time display (shown above) is like ISO8601 or
+:rfc:`3339`. If you need more control over the formatting of the date/time, provide
+a *datefmt* argument to ``basicConfig``, as in this example::
import logging
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logging.warning('is when this event was logged.')
-which would display something like this::
+which would display something like this:
+
+.. code-block:: none
12/12/2010 11:46:36 AM is when this event was logged.
of the console (``sys.stderr``) and a default format for the displayed
message before delegating to the root logger to do the actual message output.
-The default format set by :func:`basicConfig` for messages is::
+The default format set by :func:`basicConfig` for messages is:
+
+.. code-block:: none
severity:logger name:message
.. method:: logging.Formatter.__init__(fmt=None, datefmt=None, style='%')
If there is no message format string, the default is to use the
-raw message. If there is no date format string, the default date format is::
+raw message. If there is no date format string, the default date format is:
+
+.. code-block:: none
%Y-%m-%d %H:%M:%S
logger.error('error message')
logger.critical('critical message')
-Here is the logging.conf file::
+Here is the logging.conf file:
+
+.. code-block:: ini
[loggers]
keys=root,simpleExample
socket, or use whatever approach makes sense for your application.
Here's an example of the same configuration as above, in YAML format for
-the new dictionary-based approach::
+the new dictionary-based approach:
+
+.. code-block:: yaml
version: 1
formatters:
.. _2to3: https://docs.python.org/3/library/2to3.html
-.. _caniusepython3: https://pypi.python.org/pypi/caniusepython3
+.. _caniusepython3: https://pypi.org/project/caniusepython3
.. _cheat sheet: http://python-future.org/compatible_idioms.html
-.. _coverage.py: https://pypi.python.org/pypi/coverage
+.. _coverage.py: https://pypi.org/project/coverage
.. _Futurize: http://python-future.org/automatic_conversion.html
.. _importlib: https://docs.python.org/3/library/importlib.html#module-importlib
-.. _importlib2: https://pypi.python.org/pypi/importlib2
+.. _importlib2: https://pypi.org/project/importlib2
.. _Modernize: https://python-modernize.readthedocs.org/en/latest/
.. _mypy: http://mypy-lang.org/
.. _Porting to Python 3: http://python3porting.com/
-.. _Pylint: https://pypi.python.org/pypi/pylint
+.. _Pylint: https://pypi.org/project/pylint
.. _Python 3 Q & A: https://ncoghlan-devs-python-notes.readthedocs.org/en/latest/python3/questions_and_answers.html
.. _pytype: https://github.com/google/pytype
.. _python-future: http://python-future.org/
.. _python-porting: https://mail.python.org/mailman/listinfo/python-porting
-.. _six: https://pypi.python.org/pypi/six
-.. _tox: https://pypi.python.org/pypi/tox
-.. _trove classifier: https://pypi.python.org/pypi?%3Aaction=list_classifiers
+.. _six: https://pypi.org/project/six
+.. _tox: https://pypi.org/project/tox
+.. _trove classifier: https://pypi.org/classifiers
.. _"What's New": https://docs.python.org/3/whatsnew/index.html
or not. Regular expressions are often used to dissect strings by writing a RE
divided into several subgroups which match different components of interest.
For example, an RFC-822 header line is divided into a header name and a value,
-separated by a ``':'``, like this::
+separated by a ``':'``, like this:
+
+.. code-block:: none
From: author@example.com
User-Agent: Thunderbird 1.5.0.9 (X11/20061227)
For a while people just wrote programs that didn't display accents.
In the mid-1980s an Apple II BASIC program written by a French speaker
-might have lines like these::
+might have lines like these:
+
+.. code-block:: basic
PRINT "MISE A JOUR TERMINEE"
PRINT "PARAMETRES ENREGISTRES"
with urllib.request.urlopen('http://python.org/') as response:
html = response.read()
-If you wish to retrieve a resource via URL and store it in a temporary location,
-you can do so via the :func:`~urllib.request.urlretrieve` function::
+If you wish to retrieve a resource via URL and store it in a temporary
+location, you can do so via the :func:`shutil.copyfileobj` and
+:func:`tempfile.NamedTemporaryFile` functions::
+ import shutil
+ import tempfile
import urllib.request
- local_filename, headers = urllib.request.urlretrieve('http://python.org/')
- html = open(local_filename)
+
+ with urllib.request.urlopen('http://python.org/') as response:
+ with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
+ shutil.copyfileobj(response, tmp_file)
+
+ with open(tmp_file.name) as html:
+ pass
Many uses of urllib will be that simple (note that instead of an 'http:' URL we
could have used a URL starting with 'ftp:', 'file:', etc.). However, it's the
urlopen will raise an :exc:`HTTPError`. Typical errors include '404' (page not
found), '403' (request forbidden), and '401' (authentication required).
-See section 10 of RFC 2616 for a reference on all the HTTP error codes.
+See section 10 of :rfc:`2616` for a reference on all the HTTP error codes.
The :exc:`HTTPError` instance raised will have an integer 'code' attribute, which
corresponds to the error sent by the server.
codes in the 400--599 range.
:attr:`http.server.BaseHTTPRequestHandler.responses` is a useful dictionary of
-response codes in that shows all the response codes used by RFC 2616. The
+response codes in that shows all the response codes used by :rfc:`2616`. The
dictionary is reproduced here for convenience ::
# Table mapping response codes to messages; entries have the
and a 'realm'. The header looks like: ``WWW-Authenticate: SCHEME
realm="REALM"``.
-e.g. ::
+e.g.
+
+.. code-block:: none
WWW-Authenticate: Basic realm="cPanel Users"
--- /dev/null
+#include <Python.h>
+
+typedef struct {
+ PyObject_HEAD
+ /* Type-specific fields go here. */
+} CustomObject;
+
+static PyTypeObject CustomType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "custom.Custom",
+ .tp_doc = "Custom objects",
+ .tp_basicsize = sizeof(CustomObject),
+ .tp_itemsize = 0,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+ .tp_new = PyType_GenericNew,
+};
+
+static PyModuleDef custommodule = {
+ PyModuleDef_HEAD_INIT,
+ .m_name = "custom",
+ .m_doc = "Example module that creates an extension type.",
+ .m_size = -1,
+};
+
+PyMODINIT_FUNC
+PyInit_custom(void)
+{
+ PyObject *m;
+ if (PyType_Ready(&CustomType) < 0)
+ return NULL;
+
+ m = PyModule_Create(&custommodule);
+ if (m == NULL)
+ return NULL;
+
+ Py_INCREF(&CustomType);
+ PyModule_AddObject(m, "Custom", (PyObject *) &CustomType);
+ return m;
+}
--- /dev/null
+#include <Python.h>
+#include "structmember.h"
+
+typedef struct {
+ PyObject_HEAD
+ PyObject *first; /* first name */
+ PyObject *last; /* last name */
+ int number;
+} CustomObject;
+
+static void
+Custom_dealloc(CustomObject *self)
+{
+ Py_XDECREF(self->first);
+ Py_XDECREF(self->last);
+ Py_TYPE(self)->tp_free((PyObject *) self);
+}
+
+static PyObject *
+Custom_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ CustomObject *self;
+ self = (CustomObject *) type->tp_alloc(type, 0);
+ if (self != NULL) {
+ self->first = PyUnicode_FromString("");
+ if (self->first == NULL) {
+ Py_DECREF(self);
+ return NULL;
+ }
+ self->last = PyUnicode_FromString("");
+ if (self->last == NULL) {
+ Py_DECREF(self);
+ return NULL;
+ }
+ self->number = 0;
+ }
+ return (PyObject *) self;
+}
+
+static int
+Custom_init(CustomObject *self, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"first", "last", "number", NULL};
+ PyObject *first = NULL, *last = NULL, *tmp;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOi", kwlist,
+ &first, &last,
+ &self->number))
+ return -1;
+
+ if (first) {
+ tmp = self->first;
+ Py_INCREF(first);
+ self->first = first;
+ Py_XDECREF(tmp);
+ }
+ if (last) {
+ tmp = self->last;
+ Py_INCREF(last);
+ self->last = last;
+ Py_XDECREF(tmp);
+ }
+ return 0;
+}
+
+static PyMemberDef Custom_members[] = {
+ {"first", T_OBJECT_EX, offsetof(CustomObject, first), 0,
+ "first name"},
+ {"last", T_OBJECT_EX, offsetof(CustomObject, last), 0,
+ "last name"},
+ {"number", T_INT, offsetof(CustomObject, number), 0,
+ "custom number"},
+ {NULL} /* Sentinel */
+};
+
+static PyObject *
+Custom_name(CustomObject *self, PyObject *Py_UNUSED(ignored))
+{
+ if (self->first == NULL) {
+ PyErr_SetString(PyExc_AttributeError, "first");
+ return NULL;
+ }
+ if (self->last == NULL) {
+ PyErr_SetString(PyExc_AttributeError, "last");
+ return NULL;
+ }
+ return PyUnicode_FromFormat("%S %S", self->first, self->last);
+}
+
+static PyMethodDef Custom_methods[] = {
+ {"name", (PyCFunction) Custom_name, METH_NOARGS,
+ "Return the name, combining the first and last name"
+ },
+ {NULL} /* Sentinel */
+};
+
+static PyTypeObject CustomType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "custom2.Custom",
+ .tp_doc = "Custom objects",
+ .tp_basicsize = sizeof(CustomObject),
+ .tp_itemsize = 0,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
+ .tp_new = Custom_new,
+ .tp_init = (initproc) Custom_init,
+ .tp_dealloc = (destructor) Custom_dealloc,
+ .tp_members = Custom_members,
+ .tp_methods = Custom_methods,
+};
+
+static PyModuleDef custommodule = {
+ PyModuleDef_HEAD_INIT,
+ .m_name = "custom2",
+ .m_doc = "Example module that creates an extension type.",
+ .m_size = -1,
+};
+
+PyMODINIT_FUNC
+PyInit_custom2(void)
+{
+ PyObject *m;
+ if (PyType_Ready(&CustomType) < 0)
+ return NULL;
+
+ m = PyModule_Create(&custommodule);
+ if (m == NULL)
+ return NULL;
+
+ Py_INCREF(&CustomType);
+ PyModule_AddObject(m, "Custom", (PyObject *) &CustomType);
+ return m;
+}
--- /dev/null
+#include <Python.h>
+#include "structmember.h"
+
+typedef struct {
+ PyObject_HEAD
+ PyObject *first; /* first name */
+ PyObject *last; /* last name */
+ int number;
+} CustomObject;
+
+static void
+Custom_dealloc(CustomObject *self)
+{
+ Py_XDECREF(self->first);
+ Py_XDECREF(self->last);
+ Py_TYPE(self)->tp_free((PyObject *) self);
+}
+
+static PyObject *
+Custom_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ CustomObject *self;
+ self = (CustomObject *) type->tp_alloc(type, 0);
+ if (self != NULL) {
+ self->first = PyUnicode_FromString("");
+ if (self->first == NULL) {
+ Py_DECREF(self);
+ return NULL;
+ }
+ self->last = PyUnicode_FromString("");
+ if (self->last == NULL) {
+ Py_DECREF(self);
+ return NULL;
+ }
+ self->number = 0;
+ }
+ return (PyObject *) self;
+}
+
+static int
+Custom_init(CustomObject *self, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"first", "last", "number", NULL};
+ PyObject *first = NULL, *last = NULL, *tmp;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|UUi", kwlist,
+ &first, &last,
+ &self->number))
+ return -1;
+
+ if (first) {
+ tmp = self->first;
+ Py_INCREF(first);
+ self->first = first;
+ Py_DECREF(tmp);
+ }
+ if (last) {
+ tmp = self->last;
+ Py_INCREF(last);
+ self->last = last;
+ Py_DECREF(tmp);
+ }
+ return 0;
+}
+
+static PyMemberDef Custom_members[] = {
+ {"number", T_INT, offsetof(CustomObject, number), 0,
+ "custom number"},
+ {NULL} /* Sentinel */
+};
+
+static PyObject *
+Custom_getfirst(CustomObject *self, void *closure)
+{
+ Py_INCREF(self->first);
+ return self->first;
+}
+
+static int
+Custom_setfirst(CustomObject *self, PyObject *value, void *closure)
+{
+ PyObject *tmp;
+ if (value == NULL) {
+ PyErr_SetString(PyExc_TypeError, "Cannot delete the first attribute");
+ return -1;
+ }
+ if (!PyUnicode_Check(value)) {
+ PyErr_SetString(PyExc_TypeError,
+ "The first attribute value must be a string");
+ return -1;
+ }
+ tmp = self->first;
+ Py_INCREF(value);
+ self->first = value;
+ Py_DECREF(tmp);
+ return 0;
+}
+
+static PyObject *
+Custom_getlast(CustomObject *self, void *closure)
+{
+ Py_INCREF(self->last);
+ return self->last;
+}
+
+static int
+Custom_setlast(CustomObject *self, PyObject *value, void *closure)
+{
+ PyObject *tmp;
+ if (value == NULL) {
+ PyErr_SetString(PyExc_TypeError, "Cannot delete the last attribute");
+ return -1;
+ }
+ if (!PyUnicode_Check(value)) {
+ PyErr_SetString(PyExc_TypeError,
+ "The last attribute value must be a string");
+ return -1;
+ }
+ tmp = self->last;
+ Py_INCREF(value);
+ self->last = value;
+ Py_DECREF(tmp);
+ return 0;
+}
+
+static PyGetSetDef Custom_getsetters[] = {
+ {"first", (getter) Custom_getfirst, (setter) Custom_setfirst,
+ "first name", NULL},
+ {"last", (getter) Custom_getlast, (setter) Custom_setlast,
+ "last name", NULL},
+ {NULL} /* Sentinel */
+};
+
+static PyObject *
+Custom_name(CustomObject *self, PyObject *Py_UNUSED(ignored))
+{
+ return PyUnicode_FromFormat("%S %S", self->first, self->last);
+}
+
+static PyMethodDef Custom_methods[] = {
+ {"name", (PyCFunction) Custom_name, METH_NOARGS,
+ "Return the name, combining the first and last name"
+ },
+ {NULL} /* Sentinel */
+};
+
+static PyTypeObject CustomType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "custom3.Custom",
+ .tp_doc = "Custom objects",
+ .tp_basicsize = sizeof(CustomObject),
+ .tp_itemsize = 0,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
+ .tp_new = Custom_new,
+ .tp_init = (initproc) Custom_init,
+ .tp_dealloc = (destructor) Custom_dealloc,
+ .tp_members = Custom_members,
+ .tp_methods = Custom_methods,
+ .tp_getset = Custom_getsetters,
+};
+
+static PyModuleDef custommodule = {
+ PyModuleDef_HEAD_INIT,
+ .m_name = "custom3",
+ .m_doc = "Example module that creates an extension type.",
+ .m_size = -1,
+};
+
+PyMODINIT_FUNC
+PyInit_custom3(void)
+{
+ PyObject *m;
+ if (PyType_Ready(&CustomType) < 0)
+ return NULL;
+
+ m = PyModule_Create(&custommodule);
+ if (m == NULL)
+ return NULL;
+
+ Py_INCREF(&CustomType);
+ PyModule_AddObject(m, "Custom", (PyObject *) &CustomType);
+ return m;
+}
--- /dev/null
+#include <Python.h>
+#include "structmember.h"
+
+typedef struct {
+ PyObject_HEAD
+ PyObject *first; /* first name */
+ PyObject *last; /* last name */
+ int number;
+} CustomObject;
+
+static int
+Custom_traverse(CustomObject *self, visitproc visit, void *arg)
+{
+ Py_VISIT(self->first);
+ Py_VISIT(self->last);
+ return 0;
+}
+
+static int
+Custom_clear(CustomObject *self)
+{
+ Py_CLEAR(self->first);
+ Py_CLEAR(self->last);
+ return 0;
+}
+
+static void
+Custom_dealloc(CustomObject *self)
+{
+ PyObject_GC_UnTrack(self);
+ Custom_clear(self);
+ Py_TYPE(self)->tp_free((PyObject *) self);
+}
+
+static PyObject *
+Custom_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ CustomObject *self;
+ self = (CustomObject *) type->tp_alloc(type, 0);
+ if (self != NULL) {
+ self->first = PyUnicode_FromString("");
+ if (self->first == NULL) {
+ Py_DECREF(self);
+ return NULL;
+ }
+ self->last = PyUnicode_FromString("");
+ if (self->last == NULL) {
+ Py_DECREF(self);
+ return NULL;
+ }
+ self->number = 0;
+ }
+ return (PyObject *) self;
+}
+
+static int
+Custom_init(CustomObject *self, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"first", "last", "number", NULL};
+ PyObject *first = NULL, *last = NULL, *tmp;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|UUi", kwlist,
+ &first, &last,
+ &self->number))
+ return -1;
+
+ if (first) {
+ tmp = self->first;
+ Py_INCREF(first);
+ self->first = first;
+ Py_DECREF(tmp);
+ }
+ if (last) {
+ tmp = self->last;
+ Py_INCREF(last);
+ self->last = last;
+ Py_DECREF(tmp);
+ }
+ return 0;
+}
+
+static PyMemberDef Custom_members[] = {
+ {"number", T_INT, offsetof(CustomObject, number), 0,
+ "custom number"},
+ {NULL} /* Sentinel */
+};
+
+static PyObject *
+Custom_getfirst(CustomObject *self, void *closure)
+{
+ Py_INCREF(self->first);
+ return self->first;
+}
+
+static int
+Custom_setfirst(CustomObject *self, PyObject *value, void *closure)
+{
+ if (value == NULL) {
+ PyErr_SetString(PyExc_TypeError, "Cannot delete the first attribute");
+ return -1;
+ }
+ if (!PyUnicode_Check(value)) {
+ PyErr_SetString(PyExc_TypeError,
+ "The first attribute value must be a string");
+ return -1;
+ }
+ Py_INCREF(value);
+ Py_CLEAR(self->first);
+ self->first = value;
+ return 0;
+}
+
+static PyObject *
+Custom_getlast(CustomObject *self, void *closure)
+{
+ Py_INCREF(self->last);
+ return self->last;
+}
+
+static int
+Custom_setlast(CustomObject *self, PyObject *value, void *closure)
+{
+ if (value == NULL) {
+ PyErr_SetString(PyExc_TypeError, "Cannot delete the last attribute");
+ return -1;
+ }
+ if (!PyUnicode_Check(value)) {
+ PyErr_SetString(PyExc_TypeError,
+ "The last attribute value must be a string");
+ return -1;
+ }
+ Py_INCREF(value);
+ Py_CLEAR(self->last);
+ self->last = value;
+ return 0;
+}
+
+static PyGetSetDef Custom_getsetters[] = {
+ {"first", (getter) Custom_getfirst, (setter) Custom_setfirst,
+ "first name", NULL},
+ {"last", (getter) Custom_getlast, (setter) Custom_setlast,
+ "last name", NULL},
+ {NULL} /* Sentinel */
+};
+
+static PyObject *
+Custom_name(CustomObject *self, PyObject *Py_UNUSED(ignored))
+{
+ return PyUnicode_FromFormat("%S %S", self->first, self->last);
+}
+
+static PyMethodDef Custom_methods[] = {
+ {"name", (PyCFunction) Custom_name, METH_NOARGS,
+ "Return the name, combining the first and last name"
+ },
+ {NULL} /* Sentinel */
+};
+
+static PyTypeObject CustomType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "custom4.Custom",
+ .tp_doc = "Custom objects",
+ .tp_basicsize = sizeof(CustomObject),
+ .tp_itemsize = 0,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
+ .tp_new = Custom_new,
+ .tp_init = (initproc) Custom_init,
+ .tp_dealloc = (destructor) Custom_dealloc,
+ .tp_traverse = (traverseproc) Custom_traverse,
+ .tp_clear = (inquiry) Custom_clear,
+ .tp_members = Custom_members,
+ .tp_methods = Custom_methods,
+ .tp_getset = Custom_getsetters,
+};
+
+static PyModuleDef custommodule = {
+ PyModuleDef_HEAD_INIT,
+ .m_name = "custom4",
+ .m_doc = "Example module that creates an extension type.",
+ .m_size = -1,
+};
+
+PyMODINIT_FUNC
+PyInit_custom4(void)
+{
+ PyObject *m;
+ if (PyType_Ready(&CustomType) < 0)
+ return NULL;
+
+ m = PyModule_Create(&custommodule);
+ if (m == NULL)
+ return NULL;
+
+ Py_INCREF(&CustomType);
+ PyModule_AddObject(m, "Custom", (PyObject *) &CustomType);
+ return m;
+}
+++ /dev/null
-#include <Python.h>
-
-typedef struct {
- PyObject_HEAD
- /* Type-specific fields go here. */
-} noddy_NoddyObject;
-
-static PyTypeObject noddy_NoddyType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "noddy.Noddy", /* tp_name */
- sizeof(noddy_NoddyObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- "Noddy objects", /* tp_doc */
-};
-
-static PyModuleDef noddymodule = {
- PyModuleDef_HEAD_INIT,
- "noddy",
- "Example module that creates an extension type.",
- -1,
- NULL, NULL, NULL, NULL, NULL
-};
-
-PyMODINIT_FUNC
-PyInit_noddy(void)
-{
- PyObject* m;
-
- noddy_NoddyType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&noddy_NoddyType) < 0)
- return NULL;
-
- m = PyModule_Create(&noddymodule);
- if (m == NULL)
- return NULL;
-
- Py_INCREF(&noddy_NoddyType);
- PyModule_AddObject(m, "Noddy", (PyObject *)&noddy_NoddyType);
- return m;
-}
+++ /dev/null
-#include <Python.h>
-#include "structmember.h"
-
-typedef struct {
- PyObject_HEAD
- PyObject *first; /* first name */
- PyObject *last; /* last name */
- int number;
-} Noddy;
-
-static void
-Noddy_dealloc(Noddy* self)
-{
- Py_XDECREF(self->first);
- Py_XDECREF(self->last);
- Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject *
-Noddy_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
-{
- Noddy *self;
-
- self = (Noddy *)type->tp_alloc(type, 0);
- if (self != NULL) {
- self->first = PyUnicode_FromString("");
- if (self->first == NULL) {
- Py_DECREF(self);
- return NULL;
- }
-
- self->last = PyUnicode_FromString("");
- if (self->last == NULL) {
- Py_DECREF(self);
- return NULL;
- }
-
- self->number = 0;
- }
-
- return (PyObject *)self;
-}
-
-static int
-Noddy_init(Noddy *self, PyObject *args, PyObject *kwds)
-{
- PyObject *first=NULL, *last=NULL, *tmp;
-
- static char *kwlist[] = {"first", "last", "number", NULL};
-
- if (! PyArg_ParseTupleAndKeywords(args, kwds, "|OOi", kwlist,
- &first, &last,
- &self->number))
- return -1;
-
- if (first) {
- tmp = self->first;
- Py_INCREF(first);
- self->first = first;
- Py_XDECREF(tmp);
- }
-
- if (last) {
- tmp = self->last;
- Py_INCREF(last);
- self->last = last;
- Py_XDECREF(tmp);
- }
-
- return 0;
-}
-
-
-static PyMemberDef Noddy_members[] = {
- {"first", T_OBJECT_EX, offsetof(Noddy, first), 0,
- "first name"},
- {"last", T_OBJECT_EX, offsetof(Noddy, last), 0,
- "last name"},
- {"number", T_INT, offsetof(Noddy, number), 0,
- "noddy number"},
- {NULL} /* Sentinel */
-};
-
-static PyObject *
-Noddy_name(Noddy* self)
-{
- if (self->first == NULL) {
- PyErr_SetString(PyExc_AttributeError, "first");
- return NULL;
- }
-
- if (self->last == NULL) {
- PyErr_SetString(PyExc_AttributeError, "last");
- return NULL;
- }
-
- return PyUnicode_FromFormat("%S %S", self->first, self->last);
-}
-
-static PyMethodDef Noddy_methods[] = {
- {"name", (PyCFunction)Noddy_name, METH_NOARGS,
- "Return the name, combining the first and last name"
- },
- {NULL} /* Sentinel */
-};
-
-static PyTypeObject NoddyType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "noddy.Noddy", /* tp_name */
- sizeof(Noddy), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)Noddy_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT |
- Py_TPFLAGS_BASETYPE, /* tp_flags */
- "Noddy objects", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- Noddy_methods, /* tp_methods */
- Noddy_members, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)Noddy_init, /* tp_init */
- 0, /* tp_alloc */
- Noddy_new, /* tp_new */
-};
-
-static PyModuleDef noddy2module = {
- PyModuleDef_HEAD_INIT,
- "noddy2",
- "Example module that creates an extension type.",
- -1,
- NULL, NULL, NULL, NULL, NULL
-};
-
-PyMODINIT_FUNC
-PyInit_noddy2(void)
-{
- PyObject* m;
-
- if (PyType_Ready(&NoddyType) < 0)
- return NULL;
-
- m = PyModule_Create(&noddy2module);
- if (m == NULL)
- return NULL;
-
- Py_INCREF(&NoddyType);
- PyModule_AddObject(m, "Noddy", (PyObject *)&NoddyType);
- return m;
-}
+++ /dev/null
-#include <Python.h>
-#include "structmember.h"
-
-typedef struct {
- PyObject_HEAD
- PyObject *first;
- PyObject *last;
- int number;
-} Noddy;
-
-static void
-Noddy_dealloc(Noddy* self)
-{
- Py_XDECREF(self->first);
- Py_XDECREF(self->last);
- Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject *
-Noddy_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
-{
- Noddy *self;
-
- self = (Noddy *)type->tp_alloc(type, 0);
- if (self != NULL) {
- self->first = PyUnicode_FromString("");
- if (self->first == NULL) {
- Py_DECREF(self);
- return NULL;
- }
-
- self->last = PyUnicode_FromString("");
- if (self->last == NULL) {
- Py_DECREF(self);
- return NULL;
- }
-
- self->number = 0;
- }
-
- return (PyObject *)self;
-}
-
-static int
-Noddy_init(Noddy *self, PyObject *args, PyObject *kwds)
-{
- PyObject *first=NULL, *last=NULL, *tmp;
-
- static char *kwlist[] = {"first", "last", "number", NULL};
-
- if (! PyArg_ParseTupleAndKeywords(args, kwds, "|SSi", kwlist,
- &first, &last,
- &self->number))
- return -1;
-
- if (first) {
- tmp = self->first;
- Py_INCREF(first);
- self->first = first;
- Py_DECREF(tmp);
- }
-
- if (last) {
- tmp = self->last;
- Py_INCREF(last);
- self->last = last;
- Py_DECREF(tmp);
- }
-
- return 0;
-}
-
-static PyMemberDef Noddy_members[] = {
- {"number", T_INT, offsetof(Noddy, number), 0,
- "noddy number"},
- {NULL} /* Sentinel */
-};
-
-static PyObject *
-Noddy_getfirst(Noddy *self, void *closure)
-{
- Py_INCREF(self->first);
- return self->first;
-}
-
-static int
-Noddy_setfirst(Noddy *self, PyObject *value, void *closure)
-{
- if (value == NULL) {
- PyErr_SetString(PyExc_TypeError, "Cannot delete the first attribute");
- return -1;
- }
-
- if (! PyUnicode_Check(value)) {
- PyErr_SetString(PyExc_TypeError,
- "The first attribute value must be a string");
- return -1;
- }
-
- Py_DECREF(self->first);
- Py_INCREF(value);
- self->first = value;
-
- return 0;
-}
-
-static PyObject *
-Noddy_getlast(Noddy *self, void *closure)
-{
- Py_INCREF(self->last);
- return self->last;
-}
-
-static int
-Noddy_setlast(Noddy *self, PyObject *value, void *closure)
-{
- if (value == NULL) {
- PyErr_SetString(PyExc_TypeError, "Cannot delete the last attribute");
- return -1;
- }
-
- if (! PyUnicode_Check(value)) {
- PyErr_SetString(PyExc_TypeError,
- "The last attribute value must be a string");
- return -1;
- }
-
- Py_DECREF(self->last);
- Py_INCREF(value);
- self->last = value;
-
- return 0;
-}
-
-static PyGetSetDef Noddy_getseters[] = {
- {"first",
- (getter)Noddy_getfirst, (setter)Noddy_setfirst,
- "first name",
- NULL},
- {"last",
- (getter)Noddy_getlast, (setter)Noddy_setlast,
- "last name",
- NULL},
- {NULL} /* Sentinel */
-};
-
-static PyObject *
-Noddy_name(Noddy* self)
-{
- return PyUnicode_FromFormat("%S %S", self->first, self->last);
-}
-
-static PyMethodDef Noddy_methods[] = {
- {"name", (PyCFunction)Noddy_name, METH_NOARGS,
- "Return the name, combining the first and last name"
- },
- {NULL} /* Sentinel */
-};
-
-static PyTypeObject NoddyType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "noddy.Noddy", /* tp_name */
- sizeof(Noddy), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)Noddy_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT |
- Py_TPFLAGS_BASETYPE, /* tp_flags */
- "Noddy objects", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- Noddy_methods, /* tp_methods */
- Noddy_members, /* tp_members */
- Noddy_getseters, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)Noddy_init, /* tp_init */
- 0, /* tp_alloc */
- Noddy_new, /* tp_new */
-};
-
-static PyModuleDef noddy3module = {
- PyModuleDef_HEAD_INIT,
- "noddy3",
- "Example module that creates an extension type.",
- -1,
- NULL, NULL, NULL, NULL, NULL
-};
-
-PyMODINIT_FUNC
-PyInit_noddy3(void)
-{
- PyObject* m;
-
- if (PyType_Ready(&NoddyType) < 0)
- return NULL;
-
- m = PyModule_Create(&noddy3module);
- if (m == NULL)
- return NULL;
-
- Py_INCREF(&NoddyType);
- PyModule_AddObject(m, "Noddy", (PyObject *)&NoddyType);
- return m;
-}
+++ /dev/null
-#include <Python.h>
-#include "structmember.h"
-
-typedef struct {
- PyObject_HEAD
- PyObject *first;
- PyObject *last;
- int number;
-} Noddy;
-
-static int
-Noddy_traverse(Noddy *self, visitproc visit, void *arg)
-{
- int vret;
-
- if (self->first) {
- vret = visit(self->first, arg);
- if (vret != 0)
- return vret;
- }
- if (self->last) {
- vret = visit(self->last, arg);
- if (vret != 0)
- return vret;
- }
-
- return 0;
-}
-
-static int
-Noddy_clear(Noddy *self)
-{
- PyObject *tmp;
-
- tmp = self->first;
- self->first = NULL;
- Py_XDECREF(tmp);
-
- tmp = self->last;
- self->last = NULL;
- Py_XDECREF(tmp);
-
- return 0;
-}
-
-static void
-Noddy_dealloc(Noddy* self)
-{
- PyObject_GC_UnTrack(self);
- Noddy_clear(self);
- Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject *
-Noddy_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
-{
- Noddy *self;
-
- self = (Noddy *)type->tp_alloc(type, 0);
- if (self != NULL) {
- self->first = PyUnicode_FromString("");
- if (self->first == NULL) {
- Py_DECREF(self);
- return NULL;
- }
-
- self->last = PyUnicode_FromString("");
- if (self->last == NULL) {
- Py_DECREF(self);
- return NULL;
- }
-
- self->number = 0;
- }
-
- return (PyObject *)self;
-}
-
-static int
-Noddy_init(Noddy *self, PyObject *args, PyObject *kwds)
-{
- PyObject *first=NULL, *last=NULL, *tmp;
-
- static char *kwlist[] = {"first", "last", "number", NULL};
-
- if (! PyArg_ParseTupleAndKeywords(args, kwds, "|OOi", kwlist,
- &first, &last,
- &self->number))
- return -1;
-
- if (first) {
- tmp = self->first;
- Py_INCREF(first);
- self->first = first;
- Py_XDECREF(tmp);
- }
-
- if (last) {
- tmp = self->last;
- Py_INCREF(last);
- self->last = last;
- Py_XDECREF(tmp);
- }
-
- return 0;
-}
-
-
-static PyMemberDef Noddy_members[] = {
- {"first", T_OBJECT_EX, offsetof(Noddy, first), 0,
- "first name"},
- {"last", T_OBJECT_EX, offsetof(Noddy, last), 0,
- "last name"},
- {"number", T_INT, offsetof(Noddy, number), 0,
- "noddy number"},
- {NULL} /* Sentinel */
-};
-
-static PyObject *
-Noddy_name(Noddy* self)
-{
- if (self->first == NULL) {
- PyErr_SetString(PyExc_AttributeError, "first");
- return NULL;
- }
-
- if (self->last == NULL) {
- PyErr_SetString(PyExc_AttributeError, "last");
- return NULL;
- }
-
- return PyUnicode_FromFormat("%S %S", self->first, self->last);
-}
-
-static PyMethodDef Noddy_methods[] = {
- {"name", (PyCFunction)Noddy_name, METH_NOARGS,
- "Return the name, combining the first and last name"
- },
- {NULL} /* Sentinel */
-};
-
-static PyTypeObject NoddyType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "noddy.Noddy", /* tp_name */
- sizeof(Noddy), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)Noddy_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT |
- Py_TPFLAGS_BASETYPE |
- Py_TPFLAGS_HAVE_GC, /* tp_flags */
- "Noddy objects", /* tp_doc */
- (traverseproc)Noddy_traverse, /* tp_traverse */
- (inquiry)Noddy_clear, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- Noddy_methods, /* tp_methods */
- Noddy_members, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)Noddy_init, /* tp_init */
- 0, /* tp_alloc */
- Noddy_new, /* tp_new */
-};
-
-static PyModuleDef noddy4module = {
- PyModuleDef_HEAD_INIT,
- "noddy4",
- "Example module that creates an extension type.",
- -1,
- NULL, NULL, NULL, NULL, NULL
-};
-
-PyMODINIT_FUNC
-PyInit_noddy4(void)
-{
- PyObject* m;
-
- if (PyType_Ready(&NoddyType) < 0)
- return NULL;
-
- m = PyModule_Create(&noddy4module);
- if (m == NULL)
- return NULL;
-
- Py_INCREF(&NoddyType);
- PyModule_AddObject(m, "Noddy", (PyObject *)&NoddyType);
- return m;
-}
+++ /dev/null
-#include <Python.h>
-
-typedef struct {
- PyListObject list;
- int state;
-} Shoddy;
-
-
-static PyObject *
-Shoddy_increment(Shoddy *self, PyObject *unused)
-{
- self->state++;
- return PyLong_FromLong(self->state);
-}
-
-
-static PyMethodDef Shoddy_methods[] = {
- {"increment", (PyCFunction)Shoddy_increment, METH_NOARGS,
- PyDoc_STR("increment state counter")},
- {NULL, NULL},
-};
-
-static int
-Shoddy_init(Shoddy *self, PyObject *args, PyObject *kwds)
-{
- if (PyList_Type.tp_init((PyObject *)self, args, kwds) < 0)
- return -1;
- self->state = 0;
- return 0;
-}
-
-
-static PyTypeObject ShoddyType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "shoddy.Shoddy", /* tp_name */
- sizeof(Shoddy), /* tp_basicsize */
- 0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT |
- Py_TPFLAGS_BASETYPE, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- Shoddy_methods, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)Shoddy_init, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
-};
-
-static PyModuleDef shoddymodule = {
- PyModuleDef_HEAD_INIT,
- "shoddy",
- "Shoddy module",
- -1,
- NULL, NULL, NULL, NULL, NULL
-};
-
-PyMODINIT_FUNC
-PyInit_shoddy(void)
-{
- PyObject *m;
-
- ShoddyType.tp_base = &PyList_Type;
- if (PyType_Ready(&ShoddyType) < 0)
- return NULL;
-
- m = PyModule_Create(&shoddymodule);
- if (m == NULL)
- return NULL;
-
- Py_INCREF(&ShoddyType);
- PyModule_AddObject(m, "Shoddy", (PyObject *) &ShoddyType);
- return m;
-}
--- /dev/null
+#include <Python.h>
+
+typedef struct {
+ PyListObject list;
+ int state;
+} SubListObject;
+
+static PyObject *
+SubList_increment(SubListObject *self, PyObject *unused)
+{
+ self->state++;
+ return PyLong_FromLong(self->state);
+}
+
+static PyMethodDef SubList_methods[] = {
+ {"increment", (PyCFunction) SubList_increment, METH_NOARGS,
+ PyDoc_STR("increment state counter")},
+ {NULL},
+};
+
+static int
+SubList_init(SubListObject *self, PyObject *args, PyObject *kwds)
+{
+ if (PyList_Type.tp_init((PyObject *) self, args, kwds) < 0)
+ return -1;
+ self->state = 0;
+ return 0;
+}
+
+static PyTypeObject SubListType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "sublist.SubList",
+ .tp_doc = "SubList objects",
+ .tp_basicsize = sizeof(SubListObject),
+ .tp_itemsize = 0,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
+ .tp_init = (initproc) SubList_init,
+ .tp_methods = SubList_methods,
+};
+
+static PyModuleDef sublistmodule = {
+ PyModuleDef_HEAD_INIT,
+ .m_name = "sublist",
+ .m_doc = "Example module that creates an extension type.",
+ .m_size = -1,
+};
+
+PyMODINIT_FUNC
+PyInit_sublist(void)
+{
+ PyObject *m;
+ SubListType.tp_base = &PyList_Type;
+ if (PyType_Ready(&SubListType) < 0)
+ return NULL;
+
+ m = PyModule_Create(&sublistmodule);
+ if (m == NULL)
+ return NULL;
+
+ Py_INCREF(&SubListType);
+ PyModule_AddObject(m, "SubList", (PyObject *) &SubListType);
+ return m;
+}
-"""Test module for the noddy examples
+"""Test module for the custom examples
-Noddy 1:
+Custom 1:
->>> import noddy
->>> n1 = noddy.Noddy()
->>> n2 = noddy.Noddy()
->>> del n1
->>> del n2
+>>> import custom
+>>> c1 = custom.Custom()
+>>> c2 = custom.Custom()
+>>> del c1
+>>> del c2
-Noddy 2
+Custom 2
->>> import noddy2
->>> n1 = noddy2.Noddy('jim', 'fulton', 42)
->>> n1.first
+>>> import custom2
+>>> c1 = custom2.Custom('jim', 'fulton', 42)
+>>> c1.first
'jim'
->>> n1.last
+>>> c1.last
'fulton'
->>> n1.number
+>>> c1.number
42
->>> n1.name()
+>>> c1.name()
'jim fulton'
->>> n1.first = 'will'
->>> n1.name()
+>>> c1.first = 'will'
+>>> c1.name()
'will fulton'
->>> n1.last = 'tell'
->>> n1.name()
+>>> c1.last = 'tell'
+>>> c1.name()
'will tell'
->>> del n1.first
->>> n1.name()
+>>> del c1.first
+>>> c1.name()
Traceback (most recent call last):
...
AttributeError: first
->>> n1.first
+>>> c1.first
Traceback (most recent call last):
...
AttributeError: first
->>> n1.first = 'drew'
->>> n1.first
+>>> c1.first = 'drew'
+>>> c1.first
'drew'
->>> del n1.number
+>>> del c1.number
Traceback (most recent call last):
...
TypeError: can't delete numeric/char attribute
->>> n1.number=2
->>> n1.number
+>>> c1.number=2
+>>> c1.number
2
->>> n1.first = 42
->>> n1.name()
+>>> c1.first = 42
+>>> c1.name()
'42 tell'
->>> n2 = noddy2.Noddy()
->>> n2.name()
+>>> c2 = custom2.Custom()
+>>> c2.name()
' '
->>> n2.first
+>>> c2.first
''
->>> n2.last
+>>> c2.last
''
->>> del n2.first
->>> n2.first
+>>> del c2.first
+>>> c2.first
Traceback (most recent call last):
...
AttributeError: first
->>> n2.first
+>>> c2.first
Traceback (most recent call last):
...
AttributeError: first
->>> n2.name()
+>>> c2.name()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: first
->>> n2.number
+>>> c2.number
0
->>> n3 = noddy2.Noddy('jim', 'fulton', 'waaa')
+>>> n3 = custom2.Custom('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
-TypeError: an integer is required
->>> del n1
->>> del n2
+TypeError: an integer is required (got type str)
+>>> del c1
+>>> del c2
-Noddy 3
+Custom 3
->>> import noddy3
->>> n1 = noddy3.Noddy('jim', 'fulton', 42)
->>> n1 = noddy3.Noddy('jim', 'fulton', 42)
->>> n1.name()
+>>> import custom3
+>>> c1 = custom3.Custom('jim', 'fulton', 42)
+>>> c1 = custom3.Custom('jim', 'fulton', 42)
+>>> c1.name()
'jim fulton'
->>> del n1.first
+>>> del c1.first
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: Cannot delete the first attribute
->>> n1.first = 42
+>>> c1.first = 42
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: The first attribute value must be a string
->>> n1.first = 'will'
->>> n1.name()
+>>> c1.first = 'will'
+>>> c1.name()
'will fulton'
->>> n2 = noddy3.Noddy()
->>> n2 = noddy3.Noddy()
->>> n2 = noddy3.Noddy()
->>> n3 = noddy3.Noddy('jim', 'fulton', 'waaa')
+>>> c2 = custom3.Custom()
+>>> c2 = custom3.Custom()
+>>> c2 = custom3.Custom()
+>>> n3 = custom3.Custom('jim', 'fulton', 'waaa')
Traceback (most recent call last):
File "<stdin>", line 1, in ?
-TypeError: an integer is required
->>> del n1
->>> del n2
+TypeError: an integer is required (got type str)
+>>> del c1
+>>> del c2
-Noddy 4
+Custom 4
->>> import noddy4
->>> n1 = noddy4.Noddy('jim', 'fulton', 42)
->>> n1.first
+>>> import custom4
+>>> c1 = custom4.Custom('jim', 'fulton', 42)
+>>> c1.first
'jim'
->>> n1.last
+>>> c1.last
'fulton'
->>> n1.number
+>>> c1.number
42
->>> n1.name()
+>>> c1.name()
'jim fulton'
->>> n1.first = 'will'
->>> n1.name()
+>>> c1.first = 'will'
+>>> c1.name()
'will fulton'
->>> n1.last = 'tell'
->>> n1.name()
+>>> c1.last = 'tell'
+>>> c1.name()
'will tell'
->>> del n1.first
->>> n1.name()
+>>> del c1.first
Traceback (most recent call last):
...
-AttributeError: first
->>> n1.first
-Traceback (most recent call last):
-...
-AttributeError: first
->>> n1.first = 'drew'
->>> n1.first
+TypeError: Cannot delete the first attribute
+>>> c1.name()
+'will tell'
+>>> c1.first = 'drew'
+>>> c1.first
'drew'
->>> del n1.number
+>>> del c1.number
Traceback (most recent call last):
...
TypeError: can't delete numeric/char attribute
->>> n1.number=2
->>> n1.number
+>>> c1.number=2
+>>> c1.number
2
->>> n1.first = 42
->>> n1.name()
-'42 tell'
->>> n2 = noddy4.Noddy()
->>> n2 = noddy4.Noddy()
->>> n2 = noddy4.Noddy()
->>> n2 = noddy4.Noddy()
->>> n2.name()
+>>> c1.first = 42
+Traceback (most recent call last):
+...
+TypeError: The first attribute value must be a string
+>>> c1.name()
+'drew tell'
+>>> c2 = custom4.Custom()
+>>> c2 = custom4.Custom()
+>>> c2 = custom4.Custom()
+>>> c2 = custom4.Custom()
+>>> c2.name()
' '
->>> n2.first
+>>> c2.first
''
->>> n2.last
+>>> c2.last
''
->>> del n2.first
->>> n2.first
-Traceback (most recent call last):
-...
-AttributeError: first
->>> n2.first
-Traceback (most recent call last):
-...
-AttributeError: first
->>> n2.name()
-Traceback (most recent call last):
- File "<stdin>", line 1, in ?
-AttributeError: first
->>> n2.number
+>>> c2.number
0
->>> n3 = noddy4.Noddy('jim', 'fulton', 'waaa')
+>>> n3 = custom4.Custom('jim', 'fulton', 'waaa')
Traceback (most recent call last):
- File "<stdin>", line 1, in ?
-TypeError: an integer is required
+...
+TypeError: an integer is required (got type str)
Test cyclic gc(?)
>>> import gc
>>> gc.disable()
->>> x = []
->>> l = [x]
->>> n2.first = l
->>> n2.first
-[[]]
->>> l.append(n2)
->>> del l
->>> del n1
->>> del n2
+>>> class Subclass(custom4.Custom): pass
+...
+>>> s = Subclass()
+>>> s.cycle = [s]
+>>> s.cycle.append(s.cycle)
+>>> x = object()
+>>> s.x = x
+>>> del s
>>> sys.getrefcount(x)
3
>>> ignore = gc.collect()
Python (command line)`. Once the interpreter is started, you type Python code
at the prompt. For example, on my Linux system, I type the three Python
statements shown below, and get the output as shown, to find out my
-:file:`{prefix}` and :file:`{exec-prefix}`::
+:file:`{prefix}` and :file:`{exec-prefix}`:
+
+.. code-block:: pycon
Python 2.4 (#26, Aug 7 2004, 17:19:02)
Type "help", "copyright", "credits" or "license" for more information.
Obviously, specifying the entire installation scheme every time you install a
new module distribution would be very tedious. Thus, you can put these options
-into your Distutils config file (see section :ref:`inst-config-files`)::
+into your Distutils config file (see section :ref:`inst-config-files`):
+
+.. code-block:: ini
[install]
install-base=$HOME
install-scripts=python/scripts
install-data=python/data
-or, equivalently, ::
+or, equivalently,
+
+.. code-block:: ini
[install]
install-base=$HOME/python
standard library, and modify ``sys.path``. :file:`site.py` is automatically
imported when the Python interpreter is executed, unless the :option:`-S` switch
is supplied to suppress this behaviour. So you could simply edit
-:file:`site.py` and add two lines to it::
+:file:`site.py` and add two lines to it:
+
+.. code-block:: python
import sys
sys.path.append('/www/python/')
section consists of one option per line, specified as ``option=value``.
For example, the following is a complete config file that just forces all
-commands to run quietly by default::
+commands to run quietly by default:
+
+.. code-block:: ini
[global]
verbose=0
You could override the default "build base" directory and make the
:command:`build\*` commands always forcibly rebuild all files with the
-following::
+following:
+
+.. code-block:: ini
[build]
build-base=blib
``venv``. It allows virtual environments to be used on versions of
Python prior to 3.4, which either don't provide ``venv`` at all, or
aren't able to automatically install ``pip`` into created environments.
-* The `Python Packaging Index <https://pypi.python.org/pypi>`__ is a public
+* The `Python Packaging Index <https://pypi.org>`__ is a public
repository of open source licensed packages made available for use by
other Python users.
* the `Python Packaging Authority
Namespace(bar='BAR', foo='FOO')
>>> parser.parse_args(['--foo', 'FOO'])
usage: PROG [-h] [-f FOO] bar
- PROG: error: too few arguments
+ PROG: error: the following arguments are required: bar
action
Namespace(foo=['a', 'b'])
>>> parser.parse_args([])
usage: PROG [-h] foo [foo ...]
- PROG: error: too few arguments
+ PROG: error: the following arguments are required: foo
.. _`argparse.REMAINDER`:
Providing ``default=argparse.SUPPRESS`` causes no attribute to be added if the
-command-line argument was not present.::
+command-line argument was not present::
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument('--foo', default=argparse.SUPPRESS)
See :ref:`UDP echo client protocol <asyncio-udp-echo-client-protocol>` and
:ref:`UDP echo server protocol <asyncio-udp-echo-server-protocol>` examples.
+ .. versionchanged:: 3.4.4
+ The *family*, *proto*, *flags*, *reuse_address*, *reuse_port,
+ *allow_broadcast*, and *sock* parameters were added.
.. coroutinemethod:: AbstractEventLoop.create_unix_connection(protocol_factory, path, \*, ssl=None, sock=None, server_hostname=None)
Get the event loop for the current context.
Returns an event loop object implementing the :class:`AbstractEventLoop`
- interface.
+ interface. In case called from coroutine, it returns the currently
+ running event loop.
Raises an exception in case no event loop has been set for the current
context and the current policy does not specify to create one. It must
never return ``None``.
+ .. versionchanged:: 3.6
+
.. method:: set_event_loop(loop)
Set the event loop for the current context to *loop*.
outer Future is *not* cancelled in this case. (This is to prevent the
cancellation of one child to cause other children to be cancelled.)
+ .. versionchanged:: 3.6.6
+ If the *gather* itself is cancelled, the cancellation is propagated
+ regardless of *return_exceptions*.
+
.. function:: iscoroutine(obj)
Return ``True`` if *obj* is a :ref:`coroutine object <coroutine>`,
*quotetabs* is present and true, all tabs and spaces will be encoded. If the
optional argument *istext* is present and true, newlines are not encoded but
trailing whitespace will be encoded. If the optional argument *header* is
- present and true, spaces will be encoded as underscores per RFC1522. If the
+ present and true, spaces will be encoded as underscores per :rfc:`1522`. If the
optional argument *header* is present and false, newline characters will be
encoded as well; otherwise linefeed conversion might corrupt the binary data
stream.
Python supports this conversion in several ways: the ``idna`` codec performs
conversion between Unicode and ACE, separating an input string into labels
-based on the separator characters defined in `section 3.1`_ (1) of :rfc:`3490`
+based on the separator characters defined in :rfc:`section 3.1 of RFC 3490 <3490#section-3.1>`
and converting each label to ACE as required, and conversely separating an input
byte string into labels based on the ``.`` separator and converting any ACE
labels found into unicode. Furthermore, the :mod:`socket` module
names (:mod:`http.client` then also transparently sends an IDNA hostname in the
:mailheader:`Host` field if it sends that field at all).
-.. _section 3.1: https://tools.ietf.org/html/rfc3490#section-3.1
-
When receiving host names from the wire (such as in reverse name lookup), no
automatic conversion to Unicode is performed: Applications wishing to present
such host names to the user should decode them to Unicode.
given *section*. Optional arguments have the same meaning as for the
:meth:`get` method.
- .. versionchanged:: 3.2
- Items present in *vars* no longer appear in the result. The previous
- behaviour mixed actual parser options with variables provided for
- interpolation.
.. method:: set(section, option, value)
.. method:: csvwriter.writerows(rows)
- Write all the *rows* parameters (a list of *row* objects as described above) to
- the writer's file object, formatted according to the current dialect.
+ Write all elements in *rows* (an iterable of *row* objects as described
+ above) to the writer's file object, formatted according to the current
+ dialect.
Writer objects have the following public attribute:
character previously painter at that location. By default, the character
position and attributes are the current settings for the window object.
+ .. note::
+
+ Writing outside the window, subwindow, or pad raises a :exc:`curses.error`.
+ Attempting to write to the lower right corner of a window, subwindow,
+ or pad will cause an exception to be raised after the character is printed.
+
.. method:: window.addnstr(str, n[, attr])
window.addnstr(y, x, str, n[, attr])
Paint the character string *str* at ``(y, x)`` with attributes
*attr*, overwriting anything previously on the display.
+ .. note::
+
+ Writing outside the window, subwindow, or pad raises :exc:`curses.error`.
+ Attempting to write to the lower right corner of a window, subwindow,
+ or pad will cause an exception to be raised after the string is printed.
+
.. method:: window.attroff(attr)
If provided, *tz* must be an instance of a :class:`tzinfo` subclass, and its
:meth:`utcoffset` and :meth:`dst` methods must not return ``None``. If *self*
- is naive (``self.tzinfo is None``), it is presumed to represent time in the
- system timezone.
+ is naive, it is presumed to represent time in the system timezone.
If called without arguments (or with ``tz=None``) the system local
timezone is assumed for the target timezone. The ``.tzinfo`` attribute of the converted
These libraries help you with publishing and installing Python software.
While these modules are designed to work in conjunction with the
-`Python Package Index <https://pypi.python.org/pypi>`__, they can also be used
+`Python Package Index <https://pypi.org>`__, they can also be used
with a local index server, or without any index server at all.
.. toctree::
set_content(msg, <'EmailMessage'>, cte=None, \
disposition=None, filename=None, cid=None, \
params=None, headers=None)
- set_content(msg, <'list'>, subtype='mixed', \
- disposition=None, filename=None, cid=None, \
- params=None, headers=None)
Add headers and payload to *msg*:
specified or ``rfc822`` if it is not. If *subtype* is
``partial``, raise an error (``bytes`` objects must be used to
construct ``message/partial`` parts).
- * For *<'list'>*, which should be a list of
- :class:`~email.message.EmailMessage` objects, set the ``maintype``
- to ``multipart``, and the ``subtype`` to *subtype* if it is
- specified, and ``mixed`` if it is not. If the message parts in
- the *<'list'>* have :mailheader:`MIME-Version` headers, remove
- them.
If *charset* is provided (which is valid only for ``str``), encode the
string to bytes using the specified character set. The default is
.. literalinclude:: ../includes/email-simple.py
-Parsing RFC822 headers can easily be done by the using the classes
+Parsing :rfc:`822` headers can easily be done by the using the classes
from the :mod:`~email.parser` module:
.. literalinclude:: ../includes/email-headers.py
Convert any bytes with the high bit set as needed using an
ASCII-compatible :mailheader:`Content-Transfer-Encoding`. That is,
transform parts with non-ASCII :mailheader:`Cotnent-Transfer-Encoding`
- (:mailheader:`Content-Transfer-Encoding: 8bit`) to an ASCII compatibile
+ (:mailheader:`Content-Transfer-Encoding: 8bit`) to an ASCII compatible
:mailheader:`Content-Transfer-Encoding`, and encode RFC-invalid non-ASCII
bytes in headers using the MIME ``unknown-8bit`` character set, thus
rendering them RFC-compliant.
Example
-------
-.. highlight:: sh
-
Example of a segmentation fault on Linux with and without enabling the fault
-handler::
+handler:
+
+.. code-block:: shell-session
$ python3 -c "import ctypes; ctypes.string_at(0)"
Segmentation fault
If optional *rest* is given, a ``REST`` command is sent to the server, passing
*rest* as an argument. *rest* is usually a byte offset into the requested file,
telling the server to restart sending the file's bytes at the requested offset,
- skipping over the initial bytes. Note however that RFC 959 requires only that
+ skipping over the initial bytes. Note however that :rfc:`959` requires only that
*rest* be a string containing characters in the printable range from ASCII code
33 to ASCII code 126. The :meth:`transfercmd` method, therefore, converts
*rest* to a string, but no check is performed on the string's contents. If the
.. function:: hex(x)
Convert an integer number to a lowercase hexadecimal string prefixed with
- "0x". If x is not a Python :class:`int` object, it has to define an
- __index__() method that returns an integer. Some examples:
+ "0x". If *x* is not a Python :class:`int` object, it has to define an
+ :meth:`__index__` method that returns an integer. Some examples:
>>> hex(255)
'0xff'
int(x, base=10)
Return an integer object constructed from a number or string *x*, or return
- ``0`` if no arguments are given. If *x* is a number, return
- :meth:`x.__int__() <object.__int__>`. If *x* defines
- :meth:`x.__trunc__() <object.__trunc__>` but not
- :meth:`x.__int__() <object.__int__>`, then return
- if :meth:`x.__trunc__() <object.__trunc__>`. For floating point numbers,
- this truncates towards zero.
+ ``0`` if no arguments are given. If *x* defines :meth:`__int__`,
+ ``int(x)`` returns ``x.__int__()``. If *x* defines :meth:`__trunc__`,
+ it returns ``x.__trunc__()``.
+ For floating point numbers, this truncates towards zero.
If *x* is not a number or if *base* is given, then *x* must be a string,
:class:`bytes`, or :class:`bytearray` instance representing an :ref:`integer
equally close, rounding is done toward the even choice (so, for example,
both ``round(0.5)`` and ``round(-0.5)`` are ``0``, and ``round(1.5)`` is
``2``). Any integer value is valid for *ndigits* (positive, zero, or
- negative). The return value is an integer if called with one argument,
- otherwise of the same type as *number*.
+ negative). The return value is an integer if *ndigits* is omitted or
+ ``None``.
+ Otherwise the return value has the same type as *number*.
- For a general Python object ``number``, ``round(number, ndigits)`` delegates to
- ``number.__round__(ndigits)``.
+ For a general Python object ``number``, ``round`` delegates to
+ ``number.__round__``.
.. note::
.. index::
single: blake2b, blake2s
-BLAKE2_ is a cryptographic hash function defined in RFC-7693_ that comes in two
+BLAKE2_ is a cryptographic hash function defined in :rfc:`7693` that comes in two
flavors:
* **BLAKE2b**, optimized for 64-bit platforms and produces digests of any size
* *Alexandr Sokolovskiy*
-.. _RFC-7693: https://tools.ietf.org/html/rfc7693
.. _BLAKE2: https://blake2.net
.. _HMAC: https://en.wikipedia.org/wiki/Hash-based_message_authentication_code
.. _BLAKE: https://131002.net/blake/
:class:`DefaultCookiePolicy` objects.
:class:`DefaultCookiePolicy` implements the standard accept / reject rules for
- Netscape and RFC 2965 cookies. By default, RFC 2109 cookies (ie. cookies
+ Netscape and :rfc:`2965` cookies. By default, :rfc:`2109` cookies (ie. cookies
received in a :mailheader:`Set-Cookie` header with a version cookie-attribute of
1) are treated according to the RFC 2965 rules. However, if RFC 2965 handling
is turned off or :attr:`rfc2109_as_netscape` is ``True``, RFC 2109 cookies are
.. class:: Cookie()
- This class represents Netscape, RFC 2109 and RFC 2965 cookies. It is not
+ This class represents Netscape, :rfc:`2109` and :rfc:`2965` cookies. It is not
expected that users of :mod:`http.cookiejar` construct their own :class:`Cookie`
instances. Instead, if necessary, call :meth:`make_cookies` on a
:class:`CookieJar` instance.
the one sketched out in ``cookie_spec.html``.
:rfc:`2109` - HTTP State Management Mechanism
- Obsoleted by RFC 2965. Uses :mailheader:`Set-Cookie` with version=1.
+ Obsoleted by :rfc:`2965`. Uses :mailheader:`Set-Cookie` with version=1.
:rfc:`2965` - HTTP State Management Mechanism
The Netscape protocol with the bugs fixed. Uses :mailheader:`Set-Cookie2` in
place of :mailheader:`Set-Cookie`. Not widely used.
http://kristol.org/cookie/errata.html
- Unfinished errata to RFC 2965.
+ Unfinished errata to :rfc:`2965`.
:rfc:`2964` - Use of HTTP State Management
.. note::
- This loses information about RFC 2965 cookies, and also about newer or
+ This loses information about :rfc:`2965` cookies, and also about newer or
non-standard cookie-attributes such as ``port``.
.. warning::
.. attribute:: CookiePolicy.rfc2965
- Implement RFC 2965 protocol.
+ Implement :rfc:`2965` protocol.
.. attribute:: CookiePolicy.hide_cookie2
Don't add :mailheader:`Cookie2` header to requests (the presence of this header
- indicates to the server that we understand RFC 2965 cookies).
+ indicates to the server that we understand :rfc:`2965` cookies).
The most useful way to define a :class:`CookiePolicy` class is by subclassing
from :class:`DefaultCookiePolicy` and overriding some or all of the methods
Implements the standard rules for accepting and returning cookies.
-Both RFC 2965 and Netscape cookies are covered. RFC 2965 handling is switched
+Both :rfc:`2965` and Netscape cookies are covered. RFC 2965 handling is switched
off by default.
The easiest way to provide your own policy is to override this class and call
.. attribute:: DefaultCookiePolicy.rfc2109_as_netscape
- If true, request that the :class:`CookieJar` instance downgrade RFC 2109 cookies
+ If true, request that the :class:`CookieJar` instance downgrade :rfc:`2109` cookies
(ie. cookies received in a :mailheader:`Set-Cookie` header with a version
cookie-attribute of 1) to Netscape cookies by setting the version attribute of
the :class:`Cookie` instance to 0. The default value is :const:`None`, in which
- case RFC 2109 cookies are downgraded if and only if RFC 2965 handling is turned
+ case RFC 2109 cookies are downgraded if and only if :rfc:`2965` handling is turned
off. Therefore, RFC 2109 cookies are downgraded by default.
and isn't guaranteed to work!
-RFC 2965 protocol strictness switches:
+:rfc:`2965` protocol strictness switches:
.. attribute:: DefaultCookiePolicy.strict_rfc2965_unverifiable
- Follow RFC 2965 rules on unverifiable transactions (usually, an unverifiable
+ Follow :rfc:`2965` rules on unverifiable transactions (usually, an unverifiable
transaction is one resulting from a redirect or a request for an image hosted on
another site). If this is false, cookies are *never* blocked on the basis of
verifiability
.. attribute:: DefaultCookiePolicy.strict_ns_unverifiable
- Apply RFC 2965 rules on unverifiable transactions even to Netscape cookies.
+ Apply :rfc:`2965` rules on unverifiable transactions even to Netscape cookies.
.. attribute:: DefaultCookiePolicy.strict_ns_domain
.. attribute:: DefaultCookiePolicy.DomainRFC2965Match
- When setting cookies, require a full RFC 2965 domain-match.
+ When setting cookies, require a full :rfc:`2965` domain-match.
The following attributes are provided for convenience, and are the most useful
combinations of the above flags:
standard cookie-attributes specified in the various cookie standards. The
correspondence is not one-to-one, because there are complicated rules for
assigning default values, because the ``max-age`` and ``expires``
-cookie-attributes contain equivalent information, and because RFC 2109 cookies
+cookie-attributes contain equivalent information, and because :rfc:`2109` cookies
may be 'downgraded' by :mod:`http.cookiejar` from version 1 to version 0 (Netscape)
cookies.
.. attribute:: Cookie.version
- Integer or :const:`None`. Netscape cookies have :attr:`version` 0. RFC 2965 and
- RFC 2109 cookies have a ``version`` cookie-attribute of 1. However, note that
+ Integer or :const:`None`. Netscape cookies have :attr:`version` 0. :rfc:`2965` and
+ :rfc:`2109` cookies have a ``version`` cookie-attribute of 1. However, note that
:mod:`http.cookiejar` may 'downgrade' RFC 2109 cookies to Netscape cookies, in which
case :attr:`version` is 0.
.. attribute:: Cookie.rfc2109
- ``True`` if this cookie was received as an RFC 2109 cookie (ie. the cookie
+ ``True`` if this cookie was received as an :rfc:`2109` cookie (ie. the cookie
arrived in a :mailheader:`Set-Cookie` header, and the value of the Version
cookie-attribute in that header was 1). This attribute is provided because
:mod:`http.cookiejar` may 'downgrade' RFC 2109 cookies to Netscape cookies, in
r = opener.open("http://example.com/")
The next example illustrates the use of :class:`DefaultCookiePolicy`. Turn on
-RFC 2965 cookies, be more strict about domains when setting and returning
+:rfc:`2965` cookies, be more strict about domains when setting and returning
Netscape cookies, and block some domains from setting cookies or having them
returned::
Code Context (toggle)(Editor Window only)
Open a pane at the top of the edit window which shows the block context
- of the code which has scrolled above the top of the window.
+ of the code which has scrolled above the top of the window. Clicking a
+ line in this pane exposes that line at the top of the editor.
Window menu (Shell and Editor)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. method:: IMAP4.namespace()
- Returns IMAP namespaces as defined in RFC2342.
+ Returns IMAP namespaces as defined in :rfc:`2342`.
.. method:: IMAP4.noop()
.. class:: WindowsRegistryFinder
:term:`Finder` for modules declared in the Windows registry. This class
- implements the :class:`importlib.abc.Finder` ABC.
+ implements the :class:`importlib.abc.MetaPathFinder` ABC.
Only class methods are defined by this class to alleviate the need for
instantiation.
In addition to the standard library, there is a growing collection of
several thousand components (from individual programs and modules to
packages and entire application development frameworks), available from
-the `Python Package Index <https://pypi.python.org/pypi>`_.
+the `Python Package Index <https://pypi.org>`_.
.. toctree::
characters written are translated to the given string.
If *line_buffering* is ``True``, :meth:`flush` is implied when a call to
- write contains a newline character.
+ write contains a newline character or a carriage return.
If *write_through* is ``True``, calls to :meth:`write` are guaranteed
not to be buffered: any data written on the :class:`TextIOWrapper`
The :class:`IPv4Address` and :class:`IPv6Address` objects share a lot of common
attributes. Some attributes that are only meaningful for IPv6 addresses are
also implemented by :class:`IPv4Address` objects, in order to make it easier to
-write code that handles both IP versions correctly.
+write code that handles both IP versions correctly. Address objects are
+:term:`hashable`, so they can be used as keys in dictionaries.
.. class:: IPv4Address(address)
objects as well. In addition, network objects implement additional attributes.
All of these are common between :class:`IPv4Network` and :class:`IPv6Network`,
so to avoid duplication they are only documented for :class:`IPv4Network`.
+Network objects are :term:`hashable`, so they can be used as keys in
+dictionaries.
.. class:: IPv4Network(address, strict=True)
a slash (``/``). The IP address is the network address, and the mask
can be either a single number, which means it's a *prefix*, or a string
representation of an IPv4 address. If it's the latter, the mask is
- interpreted as a *net mask* if it starts with a non-zero field, or as
- a *host mask* if it starts with a zero field. If no mask is provided,
+ interpreted as a *net mask* if it starts with a non-zero field, or as a
+ *host mask* if it starts with a zero field, with the single exception of
+ an all-zero mask which is treated as a *net mask*. If no mask is provided,
it's considered to be ``/32``.
For example, the following *address* specifications are equivalent:
Unless stated otherwise, all network methods accepting other network/address
objects will raise :exc:`TypeError` if the argument's IP version is
- incompatible to ``self``
+ incompatible to ``self``.
.. versionchanged:: 3.5
.. attribute:: max_prefixlen
Refer to the corresponding attribute documentation in
- :class:`IPv4Address`
+ :class:`IPv4Address`.
.. attribute:: is_multicast
.. attribute:: is_private
.. attribute:: is_link_local
These attributes are true for the network as a whole if they are true
- for both the network address and the broadcast address
+ for both the network address and the broadcast address.
.. attribute:: network_address
Returns an iterator over the usable hosts in the network. The usable
hosts are all the IP addresses that belong to the network, except the
- network address itself and the network broadcast address.
+ network address itself and the network broadcast address. For networks
+ with a mask length of 31, the network address and network broadcast
+ address are also included in the result.
>>> list(ip_network('192.0.2.0/29').hosts()) #doctest: +NORMALIZE_WHITESPACE
[IPv4Address('192.0.2.1'), IPv4Address('192.0.2.2'),
IPv4Address('192.0.2.3'), IPv4Address('192.0.2.4'),
IPv4Address('192.0.2.5'), IPv4Address('192.0.2.6')]
+ >>> list(ip_network('192.0.2.0/31').hosts())
+ [IPv4Address('192.0.2.0'), IPv4Address('192.0.2.1')]
.. method:: overlaps(other)
Construct an IPv6 network definition. *address* can be one of the following:
- 1. A string consisting of an IP address and an optional mask, separated by
- a slash (``/``). The IP address is the network address, and the mask
- is a single number, which represents a *prefix*. If no mask is provided,
- it's considered to be ``/128``.
+ 1. A string consisting of an IP address and an optional prefix length,
+ separated by a slash (``/``). The IP address is the network address,
+ and the prefix length must be a single number, the *prefix*. If no
+ prefix length is provided, it's considered to be ``/128``.
Note that currently expanded netmasks are not supported. That means
``2001:db00::0/24`` is a valid argument while ``2001:db00::0/ffff:ff00::``
.. attribute:: num_addresses
.. attribute:: prefixlen
.. method:: hosts()
+
+ Returns an iterator over the usable hosts in the network. The usable
+ hosts are all the IP addresses that belong to the network, except the
+ Subnet-Router anycast address. For networks with a mask length of 127,
+ the Subnet-Router anycast address is also included in the result.
+
.. method:: overlaps(other)
.. method:: address_exclude(network)
.. method:: subnets(prefixlen_diff=1, new_prefix=None)
.. method:: compare_networks(other)
Refer to the corresponding attribute documentation in
- :class:`IPv4Network`
+ :class:`IPv4Network`.
.. attribute:: is_site_local
These attribute is true for the network as a whole if it is true
- for both the network address and the broadcast address
+ for both the network address and the broadcast address.
Operators
Logical operators
"""""""""""""""""
-Network objects can be compared with the usual set of logical operators,
-similarly to address objects.
+Network objects can be compared with the usual set of logical operators.
+Network objects are ordered first by network address, then by net mask.
Iteration
Interface objects
-----------------
+Interface objects are :term:`hashable`, so they can be used as keys in
+dictionaries.
+
.. class:: IPv4Interface(address)
Construct an IPv4 interface. The meaning of *address* is as in the
:class:`IPv4Interface`.
+Operators
+^^^^^^^^^
+
+Interface objects support some operators. Unless stated otherwise, operators
+can only be applied between compatible objects (i.e. IPv4 with IPv4, IPv6 with
+IPv6).
+
+
+Logical operators
+"""""""""""""""""
+
+Interface objects can be compared with the usual set of logical operators.
+
+For equality comparison (``==`` and ``!=``), both the IP address and network
+must be the same for the objects to be equal. An interface will not compare
+equal to any address or network object.
+
+For ordering (``<``, ``>``, etc) the rules are different. Interface and
+address objects with the same IP version can be compared, and the address
+objects will always sort before the interface objects. Two interface objects
+are first compared by their networks and, if those are the same, then by their
+IP addresses.
+
+
Other Module Level Functions
----------------------------
doesn't make sense. There are some times however, where you may wish to
have :mod:`ipaddress` sort these anyway. If you need to do this, you can use
- this function as the ``key`` argument to :func:`sorted()`.
+ this function as the *key* argument to :func:`sorted()`.
*obj* is either a network or address object.
.. exception:: NetmaskValueError(ValueError)
- Any value error related to the netmask.
+ Any value error related to the net mask.
# islice('ABCDEFG', 2, None) --> C D E F G
# islice('ABCDEFG', 0, None, 2) --> A C E G
s = slice(*args)
- it = iter(range(s.start or 0, s.stop or sys.maxsize, s.step or 1))
+ start, stop, step = s.start or 0, s.stop or sys.maxsize, s.step or 1
+ it = iter(range(start, stop, step))
try:
nexti = next(it)
except StopIteration:
+ # Consume *iterable* up to the *start* position.
+ for i, element in zip(range(start), iterable):
+ pass
return
- for i, element in enumerate(iterable):
- if i == nexti:
- yield element
- nexti = next(it)
+ try:
+ for i, element in enumerate(iterable):
+ if i == nexti:
+ yield element
+ nexti = next(it)
+ except StopIteration:
+ # Consume to *stop*.
+ for i, element in zip(range(i + 1, stop), iterable):
+ pass
If *start* is ``None``, then iteration starts at zero. If *step* is ``None``,
then the step defaults to one.
"Return first n items of the iterable as a list"
return list(islice(iterable, n))
+ def prepend(value, iterator):
+ "Prepend a single value in front of an iterator"
+ # prepend(1, [2, 3, 4]) -> 1 2 3 4
+ return chain([value], iterator)
+
def tabulate(function, start=0):
"Return function(0), function(1), ..."
return map(function, count(start))
# tail(3, 'ABCDEFG') --> E F G
return iter(collections.deque(iterable, maxlen=n))
- def consume(iterator, n):
- "Advance the iterator n-steps ahead. If n is none, consume entirely."
+ def consume(iterator, n=None):
+ "Advance the iterator n-steps ahead. If n is None, consume entirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
['[2.0', ', 1.0', ']']
-.. highlight:: bash
+Using :mod:`json.tool` from the shell to validate and pretty-print:
-Using :mod:`json.tool` from the shell to validate and pretty-print::
+.. code-block:: shell-session
$ echo '{"json":"obj"}' | python -m json.tool
{
See :ref:`json-commandline` for detailed documentation.
-.. highlight:: python3
-
.. note::
JSON is a subset of `YAML <http://yaml.org/>`_ 1.2. The JSON produced by
.. function:: load(fp, *, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw)
- Deserialize *fp* (a ``.read()``-supporting :term:`file-like object`
- containing a JSON document) to a Python object using this :ref:`conversion
- table <json-to-py-table>`.
+ Deserialize *fp* (a ``.read()``-supporting :term:`text file` or
+ :term:`binary file` containing a JSON document) to a Python object using
+ this :ref:`conversion table <json-to-py-table>`.
*object_hook* is an optional function that will be called with the result of
any object literal decoded (a :class:`dict`). The return value of
.. versionchanged:: 3.6
All optional parameters are now :ref:`keyword-only <keyword-only_parameter>`.
+ .. versionchanged:: 3.6
+ *fp* can now be a :term:`binary file`. The input encoding should be
+ UTF-8, UTF-16 or UTF-32.
+
.. function:: loads(s, *, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw)
Deserialize *s* (a :class:`str`, :class:`bytes` or :class:`bytearray`
when serializing instances of "exotic" numerical types such as
:class:`decimal.Decimal`.
-.. highlight:: bash
-
.. _json-commandline:
Command Line Interface
and pretty-print JSON objects.
If the optional ``infile`` and ``outfile`` arguments are not
-specified, :attr:`sys.stdin` and :attr:`sys.stdout` will be used respectively::
+specified, :attr:`sys.stdin` and :attr:`sys.stdout` will be used respectively:
+
+.. code-block:: shell-session
$ echo '{"json": "obj"}' | python -m json.tool
{
.. cmdoption:: infile
- The JSON file to be validated or pretty-printed::
+ The JSON file to be validated or pretty-printed:
+
+ .. code-block:: shell-session
$ python -m json.tool mp_films.json
[
id. If, however, a user defines a ``my.package.MyHandler`` which has
an ``alternate`` handler, the configuration system would not know that
the ``alternate`` referred to a handler. To cater for this, a generic
-resolution system allows the user to specify::
+resolution system allows the user to specify:
+
+.. code-block:: yaml
handlers:
file:
analogous way to strings with the ``ext://`` prefix, but looking
in the configuration itself rather than the import namespace. The
mechanism allows access by dot or by index, in a similar way to
-that provided by ``str.format``. Thus, given the following snippet::
+that provided by ``str.format``. Thus, given the following snippet:
+
+.. code-block:: yaml
handlers:
email:
The ``format`` entry is the overall format string, and the ``datefmt`` entry is
the :func:`strftime`\ -compatible date/time format string. If empty, the
-package substitutes ISO8601 format date/times, which is almost equivalent to
-specifying the date format string ``'%Y-%m-%d %H:%M:%S'``. The ISO8601 format
-also specifies milliseconds, which are appended to the result of using the above
-format string, with a comma separator. An example time in ISO8601 format is
-``2003-01-23 00:29:50,411``.
+package substitutes something which is almost equivalent to specifying the date
+format string ``'%Y-%m-%d %H:%M:%S'``. This format also specifies milliseconds,
+which are appended to the result of using the above format string, with a comma
+separator. An example time in this format is ``2003-01-23 00:29:50,411``.
The ``class`` entry is optional. It indicates the name of the formatter's class
(as a dotted module and class name.) This option is useful for instantiating a
.. versionchanged:: 3.4
If ``port`` is specified as ``None``, a Unix domain socket is created
- using the value in ``host`` - otherwise, a TCP socket is created.
+ using the value in ``host`` - otherwise, a UDP socket is created.
.. method:: emit()
(See: :issue:`12168`.) In earlier versions, the message sent to the
syslog daemons was always terminated with a NUL byte, because early
versions of these daemons expected a NUL terminated message - even
- though it's not in the relevant specification (RFC 5424). More recent
+ though it's not in the relevant specification (:rfc:`5424`). More recent
versions of these daemons don't expect the NUL byte but strip it off
if it's there, and even more recent daemons (which adhere more closely
to RFC 5424) pass the NUL byte on as part of the message.
You can specify *stack_info* independently of *exc_info*, e.g. to just show
how you got to a certain point in your code, even when no exceptions were
- raised. The stack frames are printed following a header line which says::
+ raised. The stack frames are printed following a header line which says:
+
+ .. code-block:: none
Stack (most recent call last):
logger = logging.getLogger('tcpserver')
logger.warning('Protocol problem: %s', 'connection reset', extra=d)
- would print something like ::
+ would print something like
+
+ .. code-block:: none
2006-02-08 22:20:02,165 192.168.0.1 fbloggs Protocol problem: connection reset
Returns a new instance of the :class:`Formatter` class. The instance is
initialized with a format string for the message as a whole, as well as a
format string for the date/time portion of a message. If no *fmt* is
- specified, ``'%(message)s'`` is used. If no *datefmt* is specified, the
- ISO8601 date format is used.
+ specified, ``'%(message)s'`` is used. If no *datefmt* is specified, a format
+ is used which is described in the :meth:`formatTime` documentation.
The *style* parameter can be one of '%', '{' or '$' and determines how
the format string will be merged with its data: using one of %-formatting,
formatters to provide for any specific requirement, but the basic behavior
is as follows: if *datefmt* (a string) is specified, it is used with
:func:`time.strftime` to format the creation time of the
- record. Otherwise, the ISO8601 format is used. The resulting string is
- returned.
+ record. Otherwise, the format '%Y-%m-%d %H:%M:%S,uuu' is used, where the
+ uuu part is a millisecond value and the other letters are as per the
+ :func:`time.strftime` documentation. An example time in this format is
+ ``2003-01-23 00:29:50,411``. The resulting string is returned.
This function uses a user-configurable function to convert the creation
time to a tuple. By default, :func:`time.localtime` is used; to change
attribute in the ``Formatter`` class.
.. versionchanged:: 3.3
- Previously, the default ISO 8601 format was hard-coded as in this
- example: ``2010-09-06 22:38:15,292`` where the part before the comma is
+ Previously, the default format was hard-coded as in this example:
+ ``2010-09-06 22:38:15,292`` where the part before the comma is
handled by a strptime format string (``'%Y-%m-%d %H:%M:%S'``), and the
part after the comma is a millisecond value. Because strptime does not
have a format placeholder for milliseconds, the millisecond value is
You can specify *stack_info* independently of *exc_info*, e.g. to just show
how you got to a certain point in your code, even when no exceptions were
- raised. The stack frames are printed following a header line which says::
+ raised. The stack frames are printed following a header line which says:
+
+ .. code-block:: none
Stack (most recent call last):
d = {'clientip': '192.168.0.1', 'user': 'fbloggs'}
logging.warning('Protocol problem: %s', 'connection reset', extra=d)
- would print something like::
+ would print something like:
+
+ .. code-block:: none
2006-02-08 22:20:02,165 192.168.0.1 fbloggs Protocol problem: connection reset
:class:`~mmap.mmap` can also be used as a context manager in a :keyword:`with`
- statement.::
+ statement::
import mmap
.. function:: Pipe([duplex])
- Returns a pair ``(conn1, conn2)`` of :class:`Connection` objects representing
- the ends of a pipe.
+ Returns a pair ``(conn1, conn2)`` of
+ :class:`~multiprocessing.connection.Connection` objects representing the
+ ends of a pipe.
If *duplex* is ``True`` (the default) then the pipe is bidirectional. If
*duplex* is ``False`` then the pipe is unidirectional: ``conn1`` can only be
Connection Objects
~~~~~~~~~~~~~~~~~~
+.. currentmodule:: multiprocessing.connection
+
Connection objects allow the sending and receiving of picklable objects or
strings. They can be thought of as message oriented connected sockets.
-Connection objects are usually created using :func:`Pipe` -- see also
+Connection objects are usually created using
+:func:`Pipe <multiprocessing.Pipe>` -- see also
:ref:`multiprocessing-listeners-clients`.
.. class:: Connection
Synchronization primitives
~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. currentmodule:: multiprocessing
+
Generally synchronization primitives are not as necessary in a multiprocess
program as they are in a multithreaded program. See the documentation for
:mod:`threading` module.
:synopsis: API for dealing with sockets.
Usually message passing between processes is done using queues or by using
-:class:`~multiprocessing.Connection` objects returned by
+:class:`~Connection` objects returned by
:func:`~multiprocessing.Pipe`.
However, the :mod:`multiprocessing.connection` module allows some extra
.. function:: Client(address[, family[, authkey]])
Attempt to set up a connection to the listener which is using address
- *address*, returning a :class:`~multiprocessing.Connection`.
+ *address*, returning a :class:`~Connection`.
The type of the connection is determined by *family* argument, but this can
generally be omitted since it can usually be inferred from the format of
.. method:: accept()
Accept a connection on the bound socket or named pipe of the listener
- object and return a :class:`~multiprocessing.Connection` object. If
- authentication is attempted and fails, then
+ object and return a :class:`~Connection` object.
+ If authentication is attempted and fails, then
:exc:`~multiprocessing.AuthenticationError` is raised.
.. method:: close()
For both Unix and Windows, an object can appear in *object_list* if
it is
- * a readable :class:`~multiprocessing.Connection` object;
+ * a readable :class:`~multiprocessing.connection.Connection` object;
* a connected and readable :class:`socket.socket` object; or
* the :attr:`~multiprocessing.Process.sentinel` attribute of a
:class:`~multiprocessing.Process` object.
Authentication keys
~~~~~~~~~~~~~~~~~~~
-When one uses :meth:`Connection.recv <multiprocessing.Connection.recv>`, the
+When one uses :meth:`Connection.recv <Connection.recv>`, the
data received is automatically
-unpickled. Unfortunately unpickling data from an untrusted source is a security
-risk. Therefore :class:`Listener` and :func:`Client` use the :mod:`hmac` module
+unpickled. Unfortunately unpickling data from an untrusted source is a security
+risk. Therefore :class:`Listener` and :func:`Client` use the :mod:`hmac` module
to provide digest authentication.
An authentication key is a byte string which can be thought of as a
is supplied, then the returned *list* is an empty list. This is an optional NNTP
extension, and may not be supported by all servers.
- RFC2980 says "It is suggested that this extension be deprecated". Use
+ :rfc:`2980` says "It is suggested that this extension be deprecated". Use
:meth:`descriptions` or :meth:`description` instead.
.. attribute:: st_ino
- Inode number.
+ Platform dependent, but if non-zero, uniquely identifies the
+ file for a given value of ``st_dev``. Typically:
+
+ * the inode number on Unix,
+ * the `file index
+ <https://msdn.microsoft.com/en-us/library/aa363788>`_ on
+ Windows
.. attribute:: st_dev
.. versionadded:: 3.5
Added the :attr:`st_file_attributes` member on Windows.
+ .. versionchanged:: 3.5
+ Windows now returns the file index as :attr:`st_ino` when
+ available.
+
.. function:: stat_float_times([newvalue])
no effect on the behavior of the walk, because in bottom-up mode the directories
in *dirnames* are generated before *dirpath* itself is generated.
- By default, errors from the :func:`listdir` call are ignored. If optional
+ By default, errors from the :func:`scandir` call are ignored. If optional
argument *onerror* is specified, it should be a function; it will be called with
one argument, an :exc:`OSError` instance. It can report the error to continue
with the walk, or raise the exception to abort the walk. Note that the filename
.. method:: object.__getnewargs__()
- This method serve a similar purpose as :meth:`__getnewargs_ex__`, but
+ This method serves a similar purpose as :meth:`__getnewargs_ex__`, but
supports only positional arguments. It must return a tuple of arguments
``args`` which will be passed to the :meth:`__new__` method upon unpickling.
This is another name for :func:`linux_distribution`.
- .. deprecated-removed:: 3.5 3.7
+ .. deprecated-removed:: 3.5 3.8
+ See alternative like the `distro <https://pypi.org/project/distro>`_ package.
.. function:: linux_distribution(distname='', version='', id='', supported_dists=('SuSE','debian','redhat','mandrake',...), full_distribution_name=1)
parameters. ``id`` is the item in parentheses after the version number. It
is usually the version codename.
- .. deprecated-removed:: 3.5 3.7
+ .. deprecated-removed:: 3.5 3.8
+ See alternative like the `distro <https://pypi.org/project/distro>`_ package.
.. function:: libc_ver(executable=sys.executable, lib='', version='', chunksize=2048)
-------
To demonstrate several uses of the :func:`pprint` function and its parameters,
-let's fetch information about a project from `PyPI <https://pypi.python.org/pypi>`_::
+let's fetch information about a project from `PyPI <https://pypi.org>`_::
>>> import json
>>> import pprint
>>> from urllib.request import urlopen
- >>> with urlopen('http://pypi.python.org/pypi/Twisted/json') as url:
+ >>> with urlopen('http://pypi.org/project/Twisted/json') as url:
... http_info = url.info()
... raw_data = url.read().decode(http_info.get_content_charset())
>>> project_info = json.loads(raw_data)
'maintainer': '',
'maintainer_email': '',
'name': 'Twisted',
- 'package_url': 'http://pypi.python.org/pypi/Twisted',
+ 'package_url': 'http://pypi.org/project/Twisted',
'platform': 'UNKNOWN',
- 'release_url': 'http://pypi.python.org/pypi/Twisted/12.3.0',
+ 'release_url': 'http://pypi.org/project/Twisted/12.3.0',
'requires_python': None,
'stable_version': None,
'summary': 'An asynchronous networking framework written in Python',
'python_version': 'source',
'size': 2615733,
'upload_time': '2012-12-26T12:47:03',
- 'url': 'https://pypi.python.org/packages/source/T/Twisted/Twisted-12.3.0.tar.bz2'},
+ 'url': 'https://pypi.org/packages/source/T/Twisted/Twisted-12.3.0.tar.bz2'},
{'comment_text': '',
'downloads': 5224,
'filename': 'Twisted-12.3.0.win32-py2.7.msi',
'python_version': '2.7',
'size': 2916352,
'upload_time': '2012-12-26T12:48:15',
- 'url': 'https://pypi.python.org/packages/2.7/T/Twisted/Twisted-12.3.0.win32-py2.7.msi'}]}
+ 'url': 'https://pypi.org/packages/2.7/T/Twisted/Twisted-12.3.0.win32-py2.7.msi'}]}
The result can be limited to a certain *depth* (ellipsis is used for deeper
contents)::
'maintainer': '',
'maintainer_email': '',
'name': 'Twisted',
- 'package_url': 'http://pypi.python.org/pypi/Twisted',
+ 'package_url': 'http://pypi.org/project/Twisted',
'platform': 'UNKNOWN',
- 'release_url': 'http://pypi.python.org/pypi/Twisted/12.3.0',
+ 'release_url': 'http://pypi.org/project/Twisted/12.3.0',
'requires_python': None,
'stable_version': None,
'summary': 'An asynchronous networking framework written in Python',
'maintainer': '',
'maintainer_email': '',
'name': 'Twisted',
- 'package_url': 'http://pypi.python.org/pypi/Twisted',
+ 'package_url': 'http://pypi.org/project/Twisted',
'platform': 'UNKNOWN',
- 'release_url': 'http://pypi.python.org/pypi/Twisted/12.3.0',
+ 'release_url': 'http://pypi.org/project/Twisted/12.3.0',
'requires_python': None,
'stable_version': None,
'summary': 'An asynchronous networking '
corresponding version of :mod:`profile` or :mod:`cProfile`. To be specific,
there is *no* file compatibility guaranteed with future versions of this
profiler, and there is no compatibility with files produced by other
- profilers. If several files are provided, all the statistics for identical
- functions will be coalesced, so that an overall view of several processes can
- be considered in a single report. If additional files need to be combined
- with data in an existing :class:`~pstats.Stats` object, the
- :meth:`~pstats.Stats.add` method can be used.
+ profilers, or the same profiler run on a different operating system. If
+ several files are provided, all the statistics for identical functions will
+ be coalesced, so that an overall view of several processes can be considered
+ in a single report. If additional files need to be combined with data in an
+ existing :class:`~pstats.Stats` object, the :meth:`~pstats.Stats.add` method
+ can be used.
Instead of reading the profile data from a file, a :class:`cProfile.Profile`
or :class:`profile.Profile` object can be used as the profile data source.
.. seealso::
- The third-party `regex <https://pypi.python.org/pypi/regex/>`_ module,
+ The third-party `regex <https://pypi.org/project/regex/>`_ module,
which has an API compatible with the standard library :mod:`re` module,
but offers additional functionality and a more thorough Unicode support.
operations; boundary conditions between *A* and *B*; or have numbered group
references. Thus, complex expressions can easily be constructed from simpler
primitive expressions like the ones described here. For details of the theory
-and implementation of regular expressions, consult the Friedl book referenced
-above, or almost any textbook about compiler construction.
+and implementation of regular expressions, consult the Friedl book [Frie09]_,
+or almost any textbook about compiler construction.
A brief explanation of the format of regular expressions follows. For further
information and a gentler presentation, consult the :ref:`regex-howto`.
Unknown escapes consisting of ``'\'`` and an ASCII letter now are errors.
-.. seealso::
-
- Mastering Regular Expressions
- Book on regular expressions by Jeffrey Friedl, published by O'Reilly. The
- second edition of the book no longer covers Python at all, but the first
- edition covered writing good regular expression patterns in great detail.
-
-
.. _contents-of-module-re:
Token(typ='END', value=';', line=4, column=27)
Token(typ='ENDIF', value='ENDIF', line=5, column=4)
Token(typ='END', value=';', line=5, column=9)
+
+
+.. [Frie09] Friedl, Jeffrey. Mastering Regular Expressions. 3rd ed., O'Reilly
+ Media, 2009. The third edition of the book no longer covers Python at all,
+ but the first edition covered writing good regular expression patterns in
+ great detail.
The :mod:`site` module also provides a way to get the user directories from the
command line:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 -m site --user-site
/home/user/.local/lib/python3.3/site-packages
helps manage settings and certificates, which can then be inherited
by SSL sockets created through the :meth:`SSLContext.wrap_socket` method.
+.. versionchanged:: 3.5.3
+ Updated to support linking with OpenSSL 1.1.0
+
.. versionchanged:: 3.6
OpenSSL 0.9.8, 1.0.0 and 1.0.1 are deprecated and no longer supported.
.. data:: CERT_NONE
Possible value for :attr:`SSLContext.verify_mode`, or the ``cert_reqs``
- parameter to :func:`wrap_socket`. In this mode (the default), no
- certificates will be required from the other side of the socket connection.
- If a certificate is received from the other end, no attempt to validate it
- is made.
+ parameter to :func:`wrap_socket`. Except for :const:`PROTOCOL_TLS_CLIENT`,
+ it is the default mode. With client-side sockets, just about any
+ cert is accepted. Validation errors, such as untrusted or expired cert,
+ are ignored and do not abort the TLS/SSL handshake.
+
+ In server mode, no certificate is requested from the client, so the client
+ does not send any for client cert authentication.
See the discussion of :ref:`ssl-security` below.
.. data:: CERT_OPTIONAL
Possible value for :attr:`SSLContext.verify_mode`, or the ``cert_reqs``
- parameter to :func:`wrap_socket`. In this mode no certificates will be
- required from the other side of the socket connection; but if they
- are provided, validation will be attempted and an :class:`SSLError`
- will be raised on failure.
+ parameter to :func:`wrap_socket`. In client mode, :const:`CERT_OPTIONAL`
+ has the same meaning as :const:`CERT_REQUIRED`. It is recommended to
+ use :const:`CERT_REQUIRED` for client-side sockets instead.
+
+ In server mode, a client certificate request is sent to the client. The
+ client may either ignore the request or send a certificate in order
+ perform TLS client cert authentication. If the client chooses to send
+ a certificate, it is verified. Any verification error immediately aborts
+ the TLS handshake.
Use of this setting requires a valid set of CA certificates to
be passed, either to :meth:`SSLContext.load_verify_locations` or as a
parameter to :func:`wrap_socket`. In this mode, certificates are
required from the other side of the socket connection; an :class:`SSLError`
will be raised if no certificate is provided, or if its validation fails.
+ This mode is **not** sufficient to verify a certificate in client mode as
+ it does not match hostnames. :attr:`~SSLContext.check_hostname` must be
+ enabled as well to verify the authenticity of a cert.
+ :const:`PROTOCOL_TLS_CLIENT` uses :const:`CERT_REQUIRED` and
+ enables :attr:`~SSLContext.check_hostname` by default.
+
+ With server socket, this mode provides mandatory TLS client cert
+ authentication. A client certificate request is sent to the client and
+ the client must provide a valid and trusted certificate.
Use of this setting requires a valid set of CA certificates to
be passed, either to :meth:`SSLContext.load_verify_locations` or as a
(('commonName', 'www.python.org'),)),
'subjectAltName': (('DNS', 'www.python.org'),
('DNS', 'python.org'),
- ('DNS', 'pypi.python.org'),
+ ('DNS', 'pypi.org'),
('DNS', 'docs.python.org'),
- ('DNS', 'testpypi.python.org'),
+ ('DNS', 'testpypi.org'),
('DNS', 'bugs.python.org'),
('DNS', 'wiki.python.org'),
('DNS', 'hg.python.org'),
(rather than using a higher-level authentication mechanism), you'll also have
to specify :const:`CERT_REQUIRED` and similarly check the client certificate.
- .. note::
-
- In client mode, :const:`CERT_OPTIONAL` and :const:`CERT_REQUIRED` are
- equivalent unless anonymous ciphers are enabled (they are disabled
- by default).
Protocol versions
'''''''''''''''''
`SSL/TLS Strong Encryption: An Introduction <https://httpd.apache.org/docs/trunk/en/ssl/ssl_intro.html>`_
Intro from the Apache HTTP Server documentation
- `RFC 1422: Privacy Enhancement for Internet Electronic Mail: Part II: Certificate-Based Key Management <https://www.ietf.org/rfc/rfc1422>`_
+ :rfc:`RFC 1422: Privacy Enhancement for Internet Electronic Mail: Part II: Certificate-Based Key Management <1422>`
Steve Kent
- `RFC 4086: Randomness Requirements for Security <http://datatracker.ietf.org/doc/rfc4086/>`_
+ :rfc:`RFC 4086: Randomness Requirements for Security <4086>`
Donald E., Jeffrey I. Schiller
- `RFC 5280: Internet X.509 Public Key Infrastructure Certificate and Certificate Revocation List (CRL) Profile <http://datatracker.ietf.org/doc/rfc5280/>`_
+ :rfc:`RFC 5280: Internet X.509 Public Key Infrastructure Certificate and Certificate Revocation List (CRL) Profile <5280>`
D. Cooper
- `RFC 5246: The Transport Layer Security (TLS) Protocol Version 1.2 <https://tools.ietf.org/html/rfc5246>`_
+ :rfc:`RFC 5246: The Transport Layer Security (TLS) Protocol Version 1.2 <5246>`
T. Dierks et. al.
- `RFC 6066: Transport Layer Security (TLS) Extensions <https://tools.ietf.org/html/rfc6066>`_
+ :rfc:`RFC 6066: Transport Layer Security (TLS) Extensions <6066>`
D. Eastlake
`IANA TLS: Transport Layer Security (TLS) Parameters <https://www.iana.org/assignments/tls-parameters/tls-parameters.xml>`_
IANA
- `RFC 7525: Recommendations for Secure Use of Transport Layer Security (TLS) and Datagram Transport Layer Security (DTLS) <https://tools.ietf.org/html/rfc7525>`_
+ :rfc:`RFC 7525: Recommendations for Secure Use of Transport Layer Security (TLS) and Datagram Transport Layer Security (DTLS) <7525>`
IETF
`Mozilla's Server Side TLS recommendations <https://wiki.mozilla.org/Security/Server_Side_TLS>`_
procedure are part of the profile. One example of a ``stringprep`` profile is
``nameprep``, which is used for internationalized domain names.
-The module :mod:`stringprep` only exposes the tables from RFC 3454. As these
+The module :mod:`stringprep` only exposes the tables from :rfc:`3454`. As these
tables would be very large to represent them as dictionaries or lists, the
module uses the Unicode character database internally. The module source code
itself was generated using the ``mkstringprep.py`` utility.
.. function:: run(args, *, stdin=None, input=None, stdout=None, stderr=None,\
shell=False, cwd=None, timeout=None, check=False, \
- encoding=None, errors=None)
+ encoding=None, errors=None, env=None)
Run the command described by *args*. Wait for command to complete, then
return a :class:`CompletedProcess` instance.
specified *encoding* and *errors* or the :class:`io.TextIOWrapper` default.
Otherwise, file objects are opened in binary mode.
+ If *env* is not ``None``, it must be a mapping that defines the environment
+ variables for the new process; these are used instead of the default
+ behavior of inheriting the current process' environment. It is passed directly
+ to :class:`Popen`.
+
Examples::
>>> subprocess.run(["ls", "-l"]) # doesn't capture output
The following example shows how the :ref:`timeit-command-line-interface`
can be used to compare three different expressions:
-.. code-block:: sh
+.. code-block:: shell-session
$ python3 -m timeit '"-".join(str(n) for n in range(100))'
10000 loops, best of 3: 30.2 usec per loop
It is possible to provide a setup statement that is executed only once at the beginning:
-.. code-block:: sh
+.. code-block:: shell-session
$ python -m timeit -s 'text = "sample string"; char = "g"' 'char in text'
10000000 loops, best of 3: 0.0877 usec per loop
Here we compare the cost of using :func:`hasattr` vs. :keyword:`try`/:keyword:`except`
to test for missing and present object attributes:
-.. code-block:: sh
+.. code-block:: shell-session
$ python -m timeit 'try:' ' str.__bool__' 'except AttributeError:' ' pass'
100000 loops, best of 3: 15.7 usec per loop
The :mod:`tkinter` package ("Tk interface") is the standard Python interface to
the Tk GUI toolkit. Both Tk and :mod:`tkinter` are available on most Unix
platforms, as well as on Windows systems. (Tk itself is not part of Python; it
-is maintained at ActiveState.) You can check that :mod:`tkinter` is properly
-installed on your system by running ``python -m tkinter`` from the command line;
-this should open a window demonstrating a simple Tk interface.
+is maintained at ActiveState.)
+
+Running ``python -m tkinter`` from the command line should open a window
+demonstrating a simple Tk interface, letting you know that :mod:`tkinter` is
+properly installed on your system, and also showing what version of Tcl/Tk is
+installed, so you can read the Tcl/Tk documentation specific to that version.
.. seealso::
+ Tkinter documentation:
+
`Python Tkinter Resources <https://wiki.python.org/moin/TkInter>`_
The Python Tkinter Topic Guide provides a great deal of information on using Tk
from Python and links to other sources of information on Tk.
`Tkinter docs from effbot <http://effbot.org/tkinterbook/>`_
Online reference for tkinter supported by effbot.org.
- `Tcl/Tk manual <https://www.tcl.tk/man/tcl8.5/>`_
- Official manual for the latest tcl/tk version.
-
- `Programming Python <http://learning-python.com/books/about-pp4e.html>`_
+ `Programming Python <http://learning-python.com/about-pp4e.html>`_
Book by Mark Lutz, has excellent coverage of Tkinter.
- `Modern Tkinter for Busy Python Developers <http://www.amazon.com/Modern-Tkinter-Python-Developers-ebook/dp/B0071QDNLO/>`_
+ `Modern Tkinter for Busy Python Developers <https://www.amazon.com/Modern-Tkinter-Python-Developers-ebook/dp/B0071QDNLO/>`_
Book by Mark Rozerman about building attractive and modern graphical user interfaces with Python and Tkinter.
`Python and Tkinter Programming <https://www.manning.com/books/python-and-tkinter-programming>`_
- The book by John Grayson (ISBN 1-884777-81-3).
+ Book by John Grayson (ISBN 1-884777-81-3).
+
+ Tcl/Tk documentation:
+
+ `Tk commands <https://www.tcl.tk/man/tcl8.6/TkCmd/contents.htm>`_
+ Most commands are available as :mod:`tkinter` or :mod:`tkinter.ttk` classes.
+ Change '8.6' to match the version of your Tcl/Tk installation.
+
+ `Tcl/Tk recent man pages <https://www.tcl.tk/doc/>`_
+ Recent Tcl/Tk manuals on www.tcl.tk.
+
+ `ActiveState Tcl Home Page <http://tcl.activestate.com/>`_
+ The Tk/Tcl development is largely taking place at ActiveState.
+
+ `Tcl and the Tk Toolkit <https://www.amazon.com/exec/obidos/ASIN/020163337X>`_
+ Book by John Ousterhout, the inventor of Tcl.
+
+ `Practical Programming in Tcl and Tk <http://www.beedub.com/book/>`_
+ Brent Welch's encyclopedic book.
Tkinter Modules
place to go when nothing else makes sense.
-.. seealso::
-
- `Tcl/Tk 8.6 man pages <https://www.tcl.tk/man/tcl8.6/>`_
- The Tcl/Tk manual on www.tcl.tk.
-
- `ActiveState Tcl Home Page <http://tcl.activestate.com/>`_
- The Tk/Tcl development is largely taking place at ActiveState.
-
- `Tcl and the Tk Toolkit <http://www.amazon.com/exec/obidos/ASIN/020163337X>`_
- The book by John Ousterhout, the inventor of Tcl.
-
- `Practical Programming in Tcl and Tk <http://www.beedub.com/book/>`_
- Brent Welch's encyclopedic book.
-
-
A Simple Hello World Program
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Images
^^^^^^
-Bitmap/Pixelmap images can be created through the subclasses of
-:class:`tkinter.Image`:
+Images of different formats can be created through the corresponding subclass
+of :class:`tkinter.Image`:
-* :class:`BitmapImage` can be used for X11 bitmap data.
+* :class:`BitmapImage` for images in XBM format.
-* :class:`PhotoImage` can be used for GIF and PPM/PGM color bitmaps.
+* :class:`PhotoImage` for images in PGM, PPM, GIF and PNG formats. The latter
+ is supported starting with Tk 8.6.
Either type of image is created through either the ``file`` or the ``data``
option (other options are available as well).
deleted, the image data is deleted as well, and Tk will display an empty box
wherever the image was used.
+.. seealso::
+
+ The `Pillow <http://python-pillow.org/>`_ package adds support for
+ formats such as BMP, JPEG, TIFF, and WebP, among others.
.. _tkinter-file-handlers:
of the line/column coordinates where the token is found, the second column is
the name of the token, and the final column is the value of the token (if any)
-.. code-block:: sh
+.. code-block:: shell-session
$ python -m tokenize hello.py
0,0-0,0: ENCODING 'utf-8'
The exact token type names can be displayed using the ``-e`` option:
-.. code-block:: sh
+.. code-block:: shell-session
$ python -m tokenize -e hello.py
0,0-0,0: ENCODING 'utf-8'
.. versionadded:: 3.6
+.. class:: AsyncContextManager(Generic[T_co])
+
+ An ABC with async abstract :meth:`__aenter__` and :meth:`__aexit__`
+ methods.
+
+ .. versionadded:: 3.6
+
.. class:: Dict(dict, MutableMapping[KT, VT])
A generic version of :class:`dict`.
* Every type is compatible with :data:`Any`.
* :data:`Any` is compatible with every type.
+.. data:: NoReturn
+
+ Special type indicating that a function never returns.
+ For example::
+
+ from typing import NoReturn
+
+ def stop() -> NoReturn:
+ raise RuntimeError('no way')
+
+ .. versionadded:: 3.6.5
+
.. data:: Union
Union type; ``Union[X, Y]`` means either X or Y.
used by many mocking frameworks.
There is a backport of :mod:`unittest.mock` for earlier versions of Python,
-available as `mock on PyPI <https://pypi.python.org/pypi/mock>`_.
+available as `mock on PyPI <https://pypi.org/project/mock>`_.
Quick Guide
the start. If you need more control over the data that you are feeding to
the tested code you will need to customize this mock for yourself. When that
is insufficient, one of the in-memory filesystem packages on `PyPI
- <https://pypi.python.org/pypi>`_ can offer a realistic filesystem for testing.
+ <https://pypi.org>`_ can offer a realistic filesystem for testing.
.. versionchanged:: 3.4
Added :meth:`~io.IOBase.readline` and :meth:`~io.IOBase.readlines` support.
Note that in order to test something, we use one of the :meth:`assert\*`
methods provided by the :class:`TestCase` base class. If the test fails, an
-exception will be raised, and :mod:`unittest` will identify the test case as a
-:dfn:`failure`. Any other exceptions will be treated as :dfn:`errors`.
+exception will be raised with an explanatory message, and :mod:`unittest`
+will identify the test case as a :dfn:`failure`. Any other exceptions will be
+treated as :dfn:`errors`.
Tests can be numerous, and their set-up can be repetitive. Luckily, we
can factor out set-up code by implementing a method called
If :meth:`~TestCase.setUp` succeeded, :meth:`~TestCase.tearDown` will be
run whether the test method succeeded or not.
-Such a working environment for the testing code is called a :dfn:`fixture`.
-
-Test case instances are grouped together according to the features they test.
-:mod:`unittest` provides a mechanism for this: the :dfn:`test suite`,
-represented by :mod:`unittest`'s :class:`TestSuite` class. In most cases,
-calling :func:`unittest.main` will do the right thing and collect all the
-module's test cases for you, and then execute them.
+Such a working environment for the testing code is called a
+:dfn:`test fixture`. A new TestCase instance is created as a unique
+test fixture used to execute each individual test method. Thus
+`~TestCase.setUp`, `~TestCase.tearDown`, and `~TestCase.__init__`
+will be called once per test.
+
+It is recommended that you use TestCase implementations to group tests together
+according to the features they test. :mod:`unittest` provides a mechanism for
+this: the :dfn:`test suite`, represented by :mod:`unittest`'s
+:class:`TestSuite` class. In most cases, calling :func:`unittest.main` will do
+the right thing and collect all the module's test cases for you and execute
+them.
However, should you want to customize the building of your test suite,
you can do it yourself::
.. attribute:: code
- An HTTP status code as defined in `RFC 2616
- <http://www.faqs.org/rfcs/rfc2616.html>`_. This numeric value corresponds
+ An HTTP status code as defined in :rfc:`2616`. This numeric value corresponds
to a value found in the dictionary of codes as found in
:attr:`http.server.BaseHTTPRequestHandler.responses`.
containing the image.
*unverifiable* should indicate whether the request is unverifiable,
- as defined by RFC 2965. It defaults to ``False``. An unverifiable
+ as defined by :rfc:`2965`. It defaults to ``False``. An unverifiable
request is one whose URL the user did not have the option to
approve. For example, if the request is for an image in an HTML
document, and the user had no option to approve the automatic
.. attribute:: Request.unverifiable
boolean, indicates whether the request is unverifiable as defined
- by RFC 2965.
+ by :rfc:`2965`.
.. attribute:: Request.method
-:mod:`uuid` --- UUID objects according to RFC 4122
-==================================================
+:mod:`uuid` --- UUID objects according to :rfc:`4122`
+=====================================================
.. module:: uuid
:synopsis: UUID objects (universally unique identifiers) according to RFC 4122
.. class:: UUID(hex=None, bytes=None, bytes_le=None, fields=None, int=None, version=None)
Create a UUID from either a string of 32 hexadecimal digits, a string of 16
- bytes as the *bytes* argument, a string of 16 bytes in little-endian order as
- the *bytes_le* argument, a tuple of six integers (32-bit *time_low*, 16-bit
- *time_mid*, 16-bit *time_hi_version*, 8-bit *clock_seq_hi_variant*, 8-bit
- *clock_seq_low*, 48-bit *node*) as the *fields* argument, or a single 128-bit
- integer as the *int* argument. When a string of hex digits is given, curly
- braces, hyphens, and a URN prefix are all optional. For example, these
+ bytes in big-endian order as the *bytes* argument, a string of 16 bytes in
+ little-endian order as the *bytes_le* argument, a tuple of six integers
+ (32-bit *time_low*, 16-bit *time_mid*, 16-bit *time_hi_version*,
+ 8-bit *clock_seq_hi_variant*, 8-bit *clock_seq_low*, 48-bit *node*) as the
+ *fields* argument, or a single 128-bit integer as the *int* argument.
+ When a string of hex digits is given, curly braces, hyphens,
+ and a URN prefix are all optional. For example, these
expressions all yield the same UUID::
UUID('{12345678-1234-5678-1234-567812345678}')
Exactly one of *hex*, *bytes*, *bytes_le*, *fields*, or *int* must be given.
The *version* argument is optional; if given, the resulting UUID will have its
- variant and version number set according to RFC 4122, overriding bits in the
+ variant and version number set according to :rfc:`4122`, overriding bits in the
given *hex*, *bytes*, *bytes_le*, *fields*, or *int*.
Comparison of UUID objects are made by way of comparing their
.. attribute:: UUID.urn
- The UUID as a URN as specified in RFC 4122.
+ The UUID as a URN as specified in :rfc:`4122`.
.. attribute:: UUID.variant
Get the hardware address as a 48-bit positive integer. The first time this
runs, it may launch a separate program, which could be quite slow. If all
attempts to obtain the hardware address fail, we choose a random 48-bit number
- with its eighth bit set to 1 as recommended in RFC 4122. "Hardware address"
+ with its eighth bit set to 1 as recommended in :rfc:`4122`. "Hardware address"
means the MAC address of a network interface, and on a machine with multiple
network interfaces the MAC address of any one of them may be returned.
* ``symlinks`` -- a Boolean value indicating whether to attempt to symlink the
Python binary (and any necessary DLLs or other binaries,
- e.g. ``pythonw.exe``), rather than copying. Defaults to ``True`` on Linux and
- Unix systems, but ``False`` on Windows.
+ e.g. ``pythonw.exe``), rather than copying.
* ``upgrade`` -- a Boolean value which, if true, will upgrade an existing
environment with the running Python - for use when that Python has been
Wrap *application* and return a new WSGI application object. The returned
application will forward all requests to the original *application*, and will
check that both the *application* and the server invoking it are conforming to
- the WSGI specification and to RFC 2616.
+ the WSGI specification and to :rfc:`2616`.
Any detected nonconformance results in an :exc:`AssertionError` being raised;
note, however, that how these errors are handled is server-dependent. For
Python because they break backward compatibility.
-.. _defusedxml: https://pypi.python.org/pypi/defusedxml/
-.. _defusedexpat: https://pypi.python.org/pypi/defusedexpat/
+.. _defusedxml: https://pypi.org/project/defusedxml/
+.. _defusedexpat: https://pypi.org/project/defusedexpat/
.. _Billion Laughs: https://en.wikipedia.org/wiki/Billion_laughs
.. _ZIP bomb: https://en.wikipedia.org/wiki/Zip_bomb
.. _DTD: https://en.wikipedia.org/wiki/Document_type_definition
Write the XML-RPC base 64 encoding of this binary item to the *out* stream object.
The encoded data will have newlines every 76 characters as per
- `RFC 2045 section 6.8 <https://tools.ietf.org/html/rfc2045#section-6.8>`_,
+ :rfc:`RFC 2045 section 6.8 <2045#section-6.8>`,
which was the de facto standard base64 specification when the
XML-RPC spec was written.
Python code. When run, the archive will execute the ``main`` function from
the module ``myapp`` in the archive.
-.. code-block:: sh
+.. code-block:: shell-session
$ python -m zipapp myapp -m "myapp:main"
$ python myapp.pyz
When called as a program from the command line, the following form is used:
-.. code-block:: sh
+.. code-block:: shell-session
$ python -m zipapp source [options]
Pack up a directory into an archive, and run it.
-.. code-block:: sh
+.. code-block:: shell-session
$ python -m zipapp myapp
$ python myapp.pyz
To make the application directly executable on POSIX, specify an interpreter
to use.
-.. code-block:: sh
+.. code-block:: shell-session
$ python -m zipapp myapp -p "/usr/bin/env python"
$ ./myapp.pyz
>>> with open('myapp.pyz', 'wb') as f:
>>> f.write(temp.getvalue())
+
+.. _zipapp-specifying-the-interpreter:
+
+Specifying the Interpreter
+--------------------------
+
Note that if you specify an interpreter and then distribute your application
archive, you need to ensure that the interpreter used is portable. The Python
launcher for Windows supports most common forms of POSIX ``#!`` line, but there
exact version like "/usr/bin/env python3.4" as you will need to change your
shebang line for users of Python 3.5, for example.
+Typically, you should use an "/usr/bin/env python2" or "/usr/bin/env python3",
+depending on whether your code is written for Python 2 or 3.
+
+
+Creating Standalone Applications with zipapp
+--------------------------------------------
+
+Using the :mod:`zipapp` module, it is possible to create self-contained Python
+programs, which can be distributed to end users who only need to have a
+suitable version of Python installed on their system. The key to doing this
+is to bundle all of the application's dependencies into the archive, along
+with the application code.
+
+The steps to create a standalone archive are as follows:
+
+1. Create your application in a directory as normal, so you have a ``myapp``
+ directory containing a ``__main__.py`` file, and any supporting application
+ code.
+
+2. Install all of your application's dependencies into the ``myapp`` directory,
+ using pip:
+
+ .. code-block:: shell-session
+
+ $ python -m pip install -r requirements.txt --target myapp
+
+ (this assumes you have your project requirements in a ``requirements.txt``
+ file - if not, you can just list the dependencies manually on the pip command
+ line).
+
+3. Optionally, delete the ``.dist-info`` directories created by pip in the
+ ``myapp`` directory. These hold metadata for pip to manage the packages, and
+ as you won't be making any further use of pip they aren't required -
+ although it won't do any harm if you leave them.
+
+4. Package the application using:
+
+ .. code-block:: shell-session
+
+ $ python -m zipapp -p "interpreter" myapp
+
+This will produce a standalone executable, which can be run on any machine with
+the appropriate interpreter available. See :ref:`zipapp-specifying-the-interpreter`
+for details. It can be shipped to users as a single file.
+
+On Unix, the ``myapp.pyz`` file is executable as it stands. You can rename the
+file to remove the ``.pyz`` extension if you prefer a "plain" command name. On
+Windows, the ``myapp.pyz[w]`` file is executable by virtue of the fact that
+the Python interpreter registers the ``.pyz`` and ``.pyzw`` file extensions
+when installed.
+
+
+Making a Windows executable
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+On Windows, registration of the ``.pyz`` extension is optional, and
+furthermore, there are certain places that don't recognise registered
+extensions "transparently" (the simplest example is that
+``subprocess.run(['myapp'])`` won't find your application - you need to
+explicitly specify the extension).
+
+On Windows, therefore, it is often preferable to create an executable from the
+zipapp. This is relatively easy, although it does require a C compiler. The
+basic approach relies on the fact that zipfiles can have arbitrary data
+prepended, and Windows exe files can have arbitrary data appended. So by
+creating a suitable launcher and tacking the ``.pyz`` file onto the end of it,
+you end up with a single-file executable that runs your application.
+
+A suitable launcher can be as simple as the following::
+
+ #define Py_LIMITED_API 1
+ #include "Python.h"
+
+ #define WIN32_LEAN_AND_MEAN
+ #include <windows.h>
+
+ #ifdef WINDOWS
+ int WINAPI wWinMain(
+ HINSTANCE hInstance, /* handle to current instance */
+ HINSTANCE hPrevInstance, /* handle to previous instance */
+ LPWSTR lpCmdLine, /* pointer to command line */
+ int nCmdShow /* show state of window */
+ )
+ #else
+ int wmain()
+ #endif
+ {
+ wchar_t **myargv = _alloca((__argc + 1) * sizeof(wchar_t*));
+ myargv[0] = __wargv[0];
+ memcpy(myargv + 1, __wargv, __argc * sizeof(wchar_t *));
+ return Py_Main(__argc+1, myargv);
+ }
+
+If you define the ``WINDOWS`` preprocessor symbol, this will generate a
+GUI executable, and without it, a console executable.
+
+To compile the executable, you can either just use the standard MSVC
+command line tools, or you can take advantage of the fact that distutils
+knows how to compile Python source::
+
+ >>> from distutils.ccompiler import new_compiler
+ >>> import distutils.sysconfig
+ >>> import sys
+ >>> import os
+ >>> from pathlib import Path
+
+ >>> def compile(src):
+ >>> src = Path(src)
+ >>> cc = new_compiler()
+ >>> exe = src.stem
+ >>> cc.add_include_dir(distutils.sysconfig.get_python_inc())
+ >>> cc.add_library_dir(os.path.join(sys.base_exec_prefix, 'libs'))
+ >>> # First the CLI executable
+ >>> objs = cc.compile([str(src)])
+ >>> cc.link_executable(objs, exe)
+ >>> # Now the GUI executable
+ >>> cc.define_macro('WINDOWS')
+ >>> objs = cc.compile([str(src)])
+ >>> cc.link_executable(objs, exe + 'w')
+
+ >>> if __name__ == "__main__":
+ >>> compile("zastub.c")
+
+The resulting launcher uses the "Limited ABI", so it will run unchanged with
+any version of Python 3.x. All it needs is for Python (``python3.dll``) to be
+on the user's ``PATH``.
+
+For a fully standalone distribution, you can distribute the launcher with your
+application appended, bundled with the Python "embedded" distribution. This
+will run on any PC with the appropriate architecture (32 bit or 64 bit).
+
+
+Caveats
+~~~~~~~
+
+There are some limitations to the process of bundling your application into
+a single file. In most, if not all, cases they can be addressed without
+needing major changes to your application.
+
+1. If your application depends on a package that includes a C extension, that
+ package cannot be run from a zip file (this is an OS limitation, as executable
+ code must be present in the filesystem for the OS loader to load it). In this
+ case, you can exclude that dependency from the zipfile, and either require
+ your users to have it installed, or ship it alongside your zipfile and add code
+ to your ``__main__.py`` to include the directory containing the unzipped
+ module in ``sys.path``. In this case, you will need to make sure to ship
+ appropriate binaries for your target architecture(s) (and potentially pick the
+ correct version to add to ``sys.path`` at runtime, based on the user's machine).
+
+2. If you are shipping a Windows executable as described above, you either need to
+ ensure that your users have ``python3.dll`` on their PATH (which is not the
+ default behaviour of the installer) or you should bundle your application with
+ the embedded distribution.
+
+3. The suggested launcher above uses the Python embedding API. This means that in
+ your application, ``sys.executable`` will be your application, and *not* a
+ conventional Python interpreter. Your code and its dependencies need to be
+ prepared for this possibility. For example, if your application uses the
+ :mod:`multiprocessing` module, it will need to call
+ :func:`multiprocessing.set_executable` to let the module know where to find the
+ standard Python interpreter.
+
+
The Python Zip Application Archive Format
-----------------------------------------
\r
set this=%~n0\r
\r
-call ..\PCBuild\find_python.bat %PYTHON%\r
-if not defined SPHINXBUILD if defined PYTHON (\r
+call ..\PCbuild\find_python.bat %PYTHON%\r
+\r
+if not defined PYTHON set PYTHON=py\r
+\r
+if not defined SPHINXBUILD (\r
%PYTHON% -c "import sphinx" > nul 2> nul\r
if errorlevel 1 (\r
echo Installing sphinx with %PYTHON%\r
%PYTHON% -m pip install sphinx\r
if errorlevel 1 exit /B\r
)\r
- set SPHINXBUILD=%PYTHON% -c "import sphinx, sys; sys.argv[0] = 'sphinx-build'; sphinx.main()"\r
+ set SPHINXBUILD=%PYTHON% -c "import sphinx, sys; sys.argv[0] = 'sphinx-build'; sys.exit(sphinx.main())"\r
+)\r
+\r
+%PYTHON% -c "import python_docs_theme" > nul 2> nul\r
+if errorlevel 1 (\r
+ echo Installing python-docs-theme with %PYTHON%\r
+ %PYTHON% -m pip install python-docs-theme\r
+ if errorlevel 1 exit /B\r
)\r
\r
-if not defined BLURB if defined PYTHON (\r
+if not defined BLURB (\r
%PYTHON% -c "import blurb" > nul 2> nul\r
if errorlevel 1 (\r
echo Installing blurb with %PYTHON%\r
set BLURB=%PYTHON% -m blurb\r
)\r
\r
-if not defined PYTHON set PYTHON=py\r
-if not defined SPHINXBUILD set SPHINXBUILD=sphinx-build\r
-if not defined BLURB set BLURB=blurb\r
-\r
if "%1" NEQ "htmlhelp" goto :skiphhcsearch\r
if exist "%HTMLHELP%" goto :skiphhcsearch\r
\r
\r
if EXIST "%BUILDDIR%\html\index.html" (\r
echo.Opening "%BUILDDIR%\html\index.html" in the default web browser...\r
- start "%BUILDDIR%\html\index.html"\r
+ start "" "%BUILDDIR%\html\index.html"\r
)\r
\r
goto end\r
When a class attribute reference (for class :class:`C`, say) would yield a
class method object, it is transformed into an instance method object whose
- :attr:`__self__` attributes is :class:`C`. When it would yield a static
+ :attr:`__self__` attribute is :class:`C`. When it would yield a static
method object, it is transformed into the object wrapped by the static method
object. See section :ref:`descriptors` for another way in which attributes
retrieved from a class may differ from those actually contained in its
be propagated up to the ``type.__new__`` call in order for the class to be
initialised correctly.
Failing to do so will result in a :exc:`DeprecationWarning` in Python 3.6,
- and a :exc:`RuntimeWarning` in the future.
+ and a :exc:`RuntimeError` in Python 3.8.
When using the default metaclass :class:`type`, or any metaclass that ultimately
calls ``type.__new__``, the following additional customisation steps are
lambda_expr_nocond: "lambda" [`parameter_list`]: `expression_nocond`
Lambda expressions (sometimes called lambda forms) are used to create anonymous
-functions. The expression ``lambda arguments: expression`` yields a function
+functions. The expression ``lambda parameters: expression`` yields a function
object. The unnamed object behaves like a function object defined with:
.. code-block:: none
- def <lambda>(arguments):
+ def <lambda>(parameters):
return expression
See section :ref:`function` for the syntax of parameter lists. Note that
module.__path__
---------------
-By definition, if a module has a ``__path__`` attribute, it is a package,
-regardless of its value.
+By definition, if a module has a ``__path__`` attribute, it is a package.
A package's ``__path__`` attribute is used during imports of its subpackages.
Within the import machinery, it functions much the same as :data:`sys.path`,
--------------
A physical line is a sequence of characters terminated by an end-of-line
-sequence. In source files, any of the standard platform line termination
-sequences can be used - the Unix form using ASCII LF (linefeed), the Windows
-form using the ASCII sequence CR LF (return followed by linefeed), or the old
-Macintosh form using the ASCII CR (return) character. All of these forms can be
-used equally, regardless of platform.
+sequence. In source files and strings, any of the standard platform line
+termination sequences can be used - the Unix form using ASCII LF (linefeed),
+the Windows form using the ASCII sequence CR LF (return followed by linefeed),
+or the old Macintosh form using the ASCII CR (return) character. All of these
+forms can be used equally, regardless of platform. The end of input also serves
+as an implicit terminator for the final physical line.
When embedding Python, source code strings should be passed to Python APIs using
the standard C conventions for newline characters (the ``\n`` character,
from sphinx import addnodes
from sphinx.builders import Builder
from sphinx.locale import translators
+from sphinx.util import status_iterator
from sphinx.util.nodes import split_explicit_title
from sphinx.writers.html import HTMLTranslator
-from sphinx.writers.text import TextWriter
+from sphinx.writers.text import TextWriter, TextTranslator
from sphinx.writers.latex import LaTeXTranslator
from sphinx.domains.python import PyModulelevel, PyClassmember
class PydocTopicsBuilder(Builder):
name = 'pydoc-topics'
+ default_translator_class = TextTranslator
+
def init(self):
self.topics = {}
+ self.secnumbers = {}
def get_outdated_docs(self):
return 'all pydoc topics'
def write(self, *ignored):
writer = TextWriter(self)
- for label in self.status_iterator(pydoc_topic_labels,
- 'building topics... ',
- length=len(pydoc_topic_labels)):
+ for label in status_iterator(pydoc_topic_labels,
+ 'building topics... ',
+ length=len(pydoc_topic_labels)):
if label not in self.env.domaindata['std']['labels']:
self.warn('label %r not in documentation' % label)
continue
var all_versions = {
'3.8': 'dev (3.8)',
- '3.7': 'pre (3.7)',
+ '3.7': '3.7',
'3.6': '3.6',
'3.5': '3.5',
'2.7': '2.7',
'en': 'English',
'fr': 'French',
'ja': 'Japanese',
+ 'ko': 'Korean',
};
function build_version_select(current_version, current_release) {
<h3>{% trans %}Docs for other versions{% endtrans %}</h3>
<ul>
<li><a href="https://docs.python.org/3.8/">{% trans %}Python 3.8 (in development){% endtrans %}</a></li>
- <li><a href="https://docs.python.org/3.7/">{% trans %}Python 3.7 (pre-release){% endtrans %}</a></li>
- <li><a href="https://docs.python.org/3.5/">{% trans %}Python 3.5 (stable){% endtrans %}</a></li>
+ <li><a href="https://docs.python.org/3.7/">{% trans %}Python 3.7 (stable){% endtrans %}</a></li>
+ <li><a href="https://docs.python.org/3.5/">{% trans %}Python 3.5 (security-fixes){% endtrans %}</a></li>
<li><a href="https://docs.python.org/2.7/">{% trans %}Python 2.7 (stable){% endtrans %}</a></li>
<li><a href="https://www.python.org/doc/versions/">{% trans %}Old versions{% endtrans %}</a></li>
</ul>
The script can be given an executable mode, or permission, using the
:program:`chmod` command.
-.. code-block:: bash
+.. code-block:: shell-session
$ chmod +x myscript.py
single: -> (return annotation assignment)
:ref:`Function annotations <function>` are completely optional metadata
-information about the types used by user-defined functions (see :pep:`484`
-for more information).
+information about the types used by user-defined functions (see :pep:`3107` and
+:pep:`484` for more information).
Annotations are stored in the :attr:`__annotations__` attribute of the function
as a dictionary and have no effect on any other part of the function. Parameter
For example, to declare that Windows-1252 encoding is to be used, the first
line of your source code file should be::
- # -*- coding: cp-1252 -*-
+ # -*- coding: cp1252 -*-
One exception to the *first line* rule is when the source code starts with a
:ref:`UNIX "shebang" line <tut-scripts>`. In this case, the encoding
declaration should be added as the second line of the file. For example::
#!/usr/bin/env python3
- # -*- coding: cp-1252 -*-
+ # -*- coding: cp1252 -*-
.. rubric:: Footnotes
named ``B`` in a package named ``A``. Just like the use of modules saves the
authors of different modules from having to worry about each other's global
variable names, the use of dotted module names saves the authors of multi-module
-packages like NumPy or the Python Imaging Library from having to worry about
+packages like NumPy or Pillow from having to worry about
each other's module names.
Suppose you want to design a collection of modules (a "package") for the uniform
names, no direct knowledge or handling of XML is needed.
* The :mod:`email` package is a library for managing email messages, including
- MIME and other RFC 2822-based message documents. Unlike :mod:`smtplib` and
+ MIME and other :rfc:`2822`-based message documents. Unlike :mod:`smtplib` and
:mod:`poplib` which actually send and receive messages, the email package has
a complete toolset for building or decoding complex message structures
(including attachments) and for implementing internet encoding and header
You can install, upgrade, and remove packages using a program called
:program:`pip`. By default ``pip`` will install packages from the Python
-Package Index, <https://pypi.python.org/pypi>. You can browse the Python
+Package Index, <https://pypi.org>. You can browse the Python
Package Index by going to it in your web browser, or you can use ``pip``'s
limited search feature:
* https://docs.python.org: Fast access to Python's documentation.
-* https://pypi.python.org/pypi: The Python Package Index, previously also nicknamed
+* https://pypi.org: The Python Package Index, previously also nicknamed
the Cheese Shop, is an index of user-created Python modules that are available
for download. Once you begin releasing code, you can register it here so that
others can find it.
.. cmdoption:: -V
--version
- Print the Python version number and exit. Example output could be::
+ Print the Python version number and exit. Example output could be:
+
+ .. code-block:: none
Python 3.6.0b2+
- When given twice, print more information about the build, like::
+ When given twice, print more information about the build, like:
+
+ .. code-block:: none
Python 3.6.0b2+ (3.6:84a3c5003510+, Oct 26 2016, 02:33:55)
[GCC 6.2.0 20161005]
Warning control. Python's warning machinery by default prints warning
messages to :data:`sys.stderr`. A typical warning message has the following
- form::
+ form:
+
+ .. code-block:: none
file:line: category: message
=============
To easily use Python scripts on Unix, you need to make them executable,
-e.g. with ::
+e.g. with
+
+.. code-block:: shell-session
$ chmod +x script
alongside the executable. This file specifies a list of options and values.
When a value is provided as an attribute, it will be converted to a number if
possible. Values provided as element text are always left as strings. This
-example file sets the same options and the previous example::
+example file sets the same options and the previous example:
+
+.. code-block:: xml
<Options>
<Option Name="InstallAllUsers" Value="no" />
User level and the System level, or temporarily in a command prompt.
To temporarily set environment variables, open Command Prompt and use the
-:command:`set` command::
+:command:`set` command:
+
+.. code-block:: doscon
C:\>set PATH=C:\Program Files\Python 3.6;%PATH%
C:\>set PYTHONPATH=%PYTHONPATH%;C:\My_python_lib
Let's create a test Python script - create a file called ``hello.py`` with the
following contents
-::
+.. code-block:: python
#! python
import sys
You should notice the version number of your latest Python 2.x installation
is printed. Now try changing the first line to be:
-::
+.. code-block:: python
#! python3
For example, if the first line of your script starts with
-::
+.. code-block:: sh
#! /usr/bin/python
The shebang lines can also specify additional options to be passed to the
Python interpreter. For example, if you have a shebang line:
-::
+.. code-block:: sh
#! /usr/bin/python -v
* Setting ``PY_PYTHON=3.1`` is equivalent to the INI file containing:
-::
+.. code-block:: ini
[defaults]
python=3.1
* Setting ``PY_PYTHON=3`` and ``PY_PYTHON3=3.1`` is equivalent to the INI file
containing:
-::
+.. code-block:: ini
[defaults]
python=3
PyWin32
-------
-The `PyWin32 <https://pypi.python.org/pypi/pywin32>`_ module by Mark Hammond
+The `PyWin32 <https://pypi.org/project/pywin32>`_ module by Mark Hammond
is a collection of modules for advanced Windows-specific support. This includes
utilities for:
Running ``python setup.py register`` will collect the metadata describing a
package, such as its name, version, maintainer, description, &c., and send it to
a central catalog server. The resulting catalog is available from
-https://pypi.python.org/pypi.
+https://pypi.org.
To make the catalog a bit more useful, a new optional *classifiers* keyword
argument has been added to the Distutils :func:`setup` function. A list of
that lets you perform a limited number of passes through the polling loop. The
default is still to loop forever.
-* The :mod:`base64` module now has more complete RFC 3548 support for Base64,
+* The :mod:`base64` module now has more complete :rfc:`3548` support for Base64,
Base32, and Base16 encoding and decoding, including optional case folding and
optional alternative alphabets. (Contributed by Barry Warsaw.)
)
Another new enhancement to the Python package index at
-https://pypi.python.org is storing source and binary archives for a
+https://pypi.org is storing source and binary archives for a
package. The new :command:`upload` Distutils command will upload a package to
the repository.
* The XML-RPC :class:`SimpleXMLRPCServer` and :class:`DocXMLRPCServer`
classes can now be prevented from immediately opening and binding to
- their socket by passing True as the ``bind_and_activate``
+ their socket by passing ``False`` as the *bind_and_activate*
constructor parameter. This can be used to modify the instance's
:attr:`allow_reuse_address` attribute before calling the
:meth:`server_bind` and :meth:`server_activate` methods to
*ciphers* argument that's a string listing the encryption algorithms
to be allowed; the format of the string is described
`in the OpenSSL documentation
- <https://www.openssl.org/docs/apps/ciphers.html#CIPHER-LIST-FORMAT>`__.
+ <https://www.openssl.org/docs/manmaster/man1/ciphers.html#CIPHER-LIST-FORMAT>`__.
(Added by Antoine Pitrou; :issue:`8322`.)
Another change makes the extension load all of OpenSSL's ciphers and
Ttk theme engine, available at
https://www.tcl.tk/man/tcl8.5/TkCmd/ttk_intro.htm. Some
screenshots of the Python/Ttk code in use are at
-http://code.google.com/p/python-ttk/wiki/Screenshots.
+https://code.google.com/archive/p/python-ttk/wikis/Screenshots.wiki.
The :mod:`ttk` module was written by Guilherme Polo and added in
:issue:`2983`. An alternate version called ``Tile.py``, written by
by Michael Foord, unless otherwise noted. The enhanced version of
the module is downloadable separately for use with Python versions 2.4 to 2.6,
packaged as the :mod:`unittest2` package, from
-https://pypi.python.org/pypi/unittest2.
+https://pypi.org/project/unittest2.
When used from the command line, the module can automatically discover
tests. It's not as fancy as `py.test <http://pytest.org>`__ or
-`nose <http://code.google.com/p/python-nose/>`__, but provides a simple way
-to run tests kept within a set of package directories. For example,
+`nose <https://nose.readthedocs.io/>`__, but provides a
+simple way to run tests kept within a set of package directories. For example,
the following command will search the :file:`test/` subdirectory for
any importable test files named ``test*.py``::
installation and a user-installed copy of the same version.
(Changed by Ronald Oussoren; :issue:`4865`.)
+ .. versionchanged:: 2.7.13
+
+ As of 2.7.13, this change was removed.
+ ``/Library/Python/2.7/site-packages``, the site-packages directory
+ used by the Apple-supplied system Python 2.7 is no longer appended to
+ ``sys.path`` for user-installed Pythons such as from the python.org
+ installers. As of macOS 10.12, Apple changed how the system
+ site-packages directory is configured, which could cause installation
+ of pip components, like setuptools, to fail. Packages installed for
+ the system Python will no longer be shared with user-installed
+ Pythons. (:issue:`28440`)
+
Port-Specific Changes: FreeBSD
-----------------------------------
maintenance release.
+Two new environment variables for debug mode
+--------------------------------------------
+
+In debug mode, the ``[xxx refs]`` statistic is not written by default, the
+:envvar:`PYTHONSHOWREFCOUNT` environment variable now must also be set.
+(Contributed by Victor Stinner; :issue:`31733`.)
+
+When Python is compiled with ``COUNT_ALLOC`` defined, allocation counts are no
+longer dumped by default anymore: the :envvar:`PYTHONSHOWALLOCCOUNT` environment
+variable must now also be set. Moreover, allocation counts are now dumped into
+stderr, rather than stdout. (Contributed by Victor Stinner; :issue:`31692`.)
+
+.. versionadded:: 2.7.15
+
+
PEP 434: IDLE Enhancement Exception for All Branches
----------------------------------------------------
certificate store, the :class:`~ssl.SSLContext` class, and other
features. (Contributed by Alex Gaynor and David Reid; :issue:`21308`.)
+ Refer to the "Version added: 2.7.9" notes in the module documentation for
+ specific details.
+
* :func:`os.urandom` was changed to cache a file descriptor to ``/dev/urandom``
instead of reopening ``/dev/urandom`` on every call. (Contributed by Alex
Gaynor; :issue:`21305`.)
+* :data:`hashlib.algorithms_guaranteed` and
+ :data:`hashlib.algorithms_available` were backported from Python 3 to make
+ it easier for Python 2 applications to select the strongest available hash
+ algorithm. (Contributed by Alex Gaynor in :issue:`21307`)
+
+
+PEP 477: Backport ensurepip (PEP 453) to Python 2.7
+---------------------------------------------------
+
+:pep:`477` approves the inclusion of the :pep:`453` ensurepip module and the
+improved documentation that was enabled by it in the Python 2.7 maintenance
+releases, appearing first in the Python 2.7.9 release.
+
+
+Bootstrapping pip By Default
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The new :mod:`ensurepip` module (defined in :pep:`453`) provides a standard
+cross-platform mechanism to bootstrap the pip installer into Python
+installations. The version of ``pip`` included with Python 2.7.9 is ``pip``
+1.5.6, and future 2.7.x maintenance releases will update the bundled version to
+the latest version of ``pip`` that is available at the time of creating the
+release candidate.
+
+By default, the commands ``pip``, ``pipX`` and ``pipX.Y`` will be installed on
+all platforms (where X.Y stands for the version of the Python installation),
+along with the ``pip`` Python package and its dependencies.
+
+For CPython :ref:`source builds on POSIX systems <building-python-on-unix>`,
+the ``make install`` and ``make altinstall`` commands do not bootstrap ``pip``
+by default. This behaviour can be controlled through configure options, and
+overridden through Makefile options.
+
+On Windows and Mac OS X, the CPython installers now default to installing
+``pip`` along with CPython itself (users may opt out of installing it
+during the installation process). Window users will need to opt in to the
+automatic ``PATH`` modifications to have ``pip`` available from the command
+line by default, otherwise it can still be accessed through the Python
+launcher for Windows as ``py -m pip``.
+
+As `discussed in the PEP`__, platform packagers may choose not to install
+these commands by default, as long as, when invoked, they provide clear and
+simple directions on how to install them on that platform (usually using
+the system package manager).
+
+__ https://www.python.org/dev/peps/pep-0477/#disabling-ensurepip-by-downstream-distributors
+
+
+Documentation Changes
+~~~~~~~~~~~~~~~~~~~~~
+
+As part of this change, the :ref:`installing-index` and
+:ref:`distributing-index` sections of the documentation have been
+completely redesigned as short getting started and FAQ documents. Most
+packaging documentation has now been moved out to the Python Packaging
+Authority maintained `Python Packaging User Guide
+<http://packaging.python.org>`__ and the documentation of the individual
+projects.
+
+However, as this migration is currently still incomplete, the legacy
+versions of those guides remaining available as :ref:`install-index`
+and :ref:`distutils-index`.
+
+.. seealso::
+
+ :pep:`453` -- Explicit bootstrapping of pip in Python installations
+ PEP written by Donald Stufft and Nick Coghlan, implemented by
+ Donald Stufft, Nick Coghlan, Martin von Löwis and Ned Deily.
+
+PEP 476: Enabling certificate verification by default for stdlib http clients
+-----------------------------------------------------------------------------
+
+:pep:`476` updated :mod:`httplib` and modules which use it, such as
+:mod:`urllib2` and :mod:`xmlrpclib`, to now verify that the server
+presents a certificate which is signed by a Certificate Authority in the
+platform trust store and whose hostname matches the hostname being requested
+by default, significantly improving security for many applications. This
+change was made in the Python 2.7.9 release.
+
+For applications which require the old previous behavior, they can pass an
+alternate context::
+
+ import urllib2
+ import ssl
+
+ # This disables all verification
+ context = ssl._create_unverified_context()
+
+ # This allows using a specific certificate for the host, which doesn't need
+ # to be in the trust store
+ context = ssl.create_default_context(cafile="/path/to/file.crt")
+
+ urllib2.urlopen("https://invalid-cert", context=context)
+
+
+PEP 493: HTTPS verification migration tools for Python 2.7
+----------------------------------------------------------
+
+:pep:`493` provides additional migration tools to support a more incremental
+infrastructure upgrade process for environments containing applications and
+services relying on the historically permissive processing of server
+certificates when establishing client HTTPS connections. These additions were
+made in the Python 2.7.12 release.
+
+These tools are intended for use in cases where affected applications and
+services can't be modified to explicitly pass a more permissive SSL context
+when establishing the connection.
+
+For applications and services which can't be modified at all, the new
+``PYTHONHTTPSVERIFY`` environment variable may be set to ``0`` to revert an
+entire Python process back to the default permissive behaviour of Python 2.7.8
+and earlier.
+
+For cases where the connection establishment code can't be modified, but the
+overall application can be, the new :func:`ssl._https_verify_certificates`
+function can be used to adjust the default behaviour at runtime.
+
+
+New ``make regen-all`` build target
+-----------------------------------
+
+To simplify cross-compilation, and to ensure that CPython can reliably be
+compiled without requiring an existing version of Python to already be
+available, the autotools-based build system no longer attempts to implicitly
+recompile generated files based on file modification times.
+
+Instead, a new ``make regen-all`` command has been added to force regeneration
+of these files when desired (e.g. after an initial version of Python has
+already been built based on the pregenerated versions).
+
+More selective regeneration targets are also defined - see
+:source:`Makefile.pre.in` for details.
+
+(Contributed by Victor Stinner in :issue:`23404`.)
+
+.. versionadded:: 2.7.14
+
+
+Removal of ``make touch`` build target
+--------------------------------------
+
+The ``make touch`` build target previously used to request implicit regeneration
+of generated files by updating their modification times has been removed.
+
+It has been replaced by the new ``make regen-all`` target.
+
+(Contributed by Victor Stinner in :issue:`23404`.)
+
+.. versionchanged:: 2.7.14
.. ======================================================================
(Contributed by Yury Selivanov in :issue:`24184`.)
For earlier Python versions, a backport of the new ABCs is available in an
-external `PyPI package <https://pypi.python.org/pypi/backports_abc>`_.
+external `PyPI package <https://pypi.org/project/backports_abc>`_.
compileall
:c:member:`tp_as_async` slot. Refer to :ref:`coro-objects` for
new types, structures and functions.
+
+Notable changes in Python 3.5.4
+===============================
+
+New ``make regen-all`` build target
+-----------------------------------
+
+To simplify cross-compilation, and to ensure that CPython can reliably be
+compiled without requiring an existing version of Python to already be
+available, the autotools-based build system no longer attempts to implicitly
+recompile generated files based on file modification times.
+
+Instead, a new ``make regen-all`` command has been added to force regeneration
+of these files when desired (e.g. after an initial version of Python has
+already been built based on the pregenerated versions).
+
+More selective regeneration targets are also defined - see
+:source:`Makefile.pre.in` for details.
+
+(Contributed by Victor Stinner in :issue:`23404`.)
+
+.. versionadded:: 3.5.4
+
+
+Removal of ``make touch`` build target
+--------------------------------------
+
+The ``make touch`` build target previously used to request implicit regeneration
+of generated files by updating their modification times has been removed.
+
+It has been replaced by the new ``make regen-all`` target.
+
+(Contributed by Victor Stinner in :issue:`23404`.)
+
+.. versionchanged:: 3.5.4
+
Windows improvements:
-* :ref:`PEP 528 <whatsnew36-pep529>` and :ref:`PEP 529 <whatsnew36-pep529>`,
+* :ref:`PEP 528 <whatsnew36-pep528>` and :ref:`PEP 529 <whatsnew36-pep529>`,
Windows filesystem and console encoding changed to UTF-8.
* The ``py.exe`` launcher, when used interactively, no longer prefers
and Guido van Rossum. Implemented by Ivan Levkivskyi.
Tools that use or will use the new syntax:
- `mypy <http://github.com/python/mypy>`_,
- `pytype <http://github.com/google/pytype>`_, PyCharm, etc.
+ `mypy <http://www.mypy-lang.org/>`_,
+ `pytype <https://github.com/google/pytype>`_, PyCharm, etc.
.. _whatsnew36-pep515:
(all backported to 3.5.x due to the provisional status):
* The :func:`~asyncio.get_event_loop` function has been changed to
- always return the currently running loop when called from couroutines
+ always return the currently running loop when called from coroutines
and callbacks.
(Contributed by Yury Selivanov in :issue:`28613`.)
cmath
-----
-The new :const:`cmath.tau` (τ) constant has been added.
+The new :const:`cmath.tau` (*τ*) constant has been added.
(Contributed by Lisa Roach in :issue:`12345`, see :pep:`628` for details.)
New constants: :const:`cmath.inf` and :const:`cmath.nan` to
any code relying on the presence of ``default_format`` may
need to be adapted. See :issue:`27819` for more details.
-The ``upload`` command now longer tries to change CR end-of-line characters
-to CRLF. This fixes a corruption issue with sdists that ended with a byte
-equivalent to CR.
-(Contributed by Bo Bayles in :issue:`32304`.)
-
email
-----
easier to use, with better APIs and docstrings explaining them. Additional
useful information will be added to idlelib when available.
+New in 3.6.2:
+
+Multiple fixes for autocompletion. (Contributed by Louie Lu in :issue:`15786`.)
+
+New in 3.6.3:
+
+Module Browser (on the File menu, formerly called Class Browser),
+now displays nested functions and classes in addition to top-level
+functions and classes.
+(Contributed by Guilherme Polo, Cheryl Sabella, and Terry Jan Reedy
+in :issue:`1612262`.)
+
+The IDLE features formerly implemented as extensions have been reimplemented
+as normal features. Their settings have been moved from the Extensions tab
+to other dialog tabs.
+(Contributed by Charles Wohlganger and Terry Jan Reedy in :issue:`27099`.)
+
+The Settings dialog (Options, Configure IDLE) has been partly rewritten
+to improve both appearance and function.
+(Contributed by Cheryl Sabella and Terry Jan Reedy in multiple issues.)
+
+New in 3.6.4:
+
+The font sample now includes a selection of non-Latin characters so that
+users can better see the effect of selecting a particular font.
+(Contributed by Terry Jan Reedy in :issue:`13802`.)
+The sample can be edited to include other characters.
+(Contributed by Serhiy Storchaka in :issue:`31860`.)
+
+New in 3.6.6:
+
+Editor code context option revised. Box displays all context lines up to
+maxlines. Clicking on a context line jumps the editor to that line. Context
+colors for custom themes is added to Highlights tab of Settings dialog.
+(Contributed by Cheryl Sabella and Terry Jan Reedy in :issue:`33642`,
+:issue:`33768`, and :issue:`33679`)
+
+On Windows, a new API call tells Windows that tk scales for DPI. On Windows
+8.1+ or 10, with DPI compatibility properties of the Python binary
+unchanged, and a monitor resolution greater than 96 DPI, this should
+make text and lines sharper. It should otherwise have no effect.
+(Contributed by Terry Jan Reedy in :issue:`33656`).
+
importlib
---------
math
----
-The tau (τ) constant has been added to the :mod:`math` and :mod:`cmath`
+The tau (*τ*) constant has been added to the :mod:`math` and :mod:`cmath`
modules.
(Contributed by Lisa Roach in :issue:`12345`, see :pep:`628` for details.)
wrapped in ClassVar indicates that a given attribute is intended to be used as
a class variable and should not be set on instances of that class.
(Contributed by Ivan Levkivskyi in `Github #280
-<https://github.com/python/typing/issues/280>`_.)
+<https://github.com/python/typing/pull/280>`_.)
A new :const:`~typing.TYPE_CHECKING` constant that is assumed to be
``True`` by the static type chekers, but is ``False`` at runtime.
* The :c:func:`PyUnicode_FSConverter` and :c:func:`PyUnicode_FSDecoder`
functions will now accept :term:`path-like objects <path-like object>`.
-* The ``PyExc_RecursionErrorInst`` singleton that was part of the public API
- has been removed as its members being never cleared may cause a segfault
- during finalization of the interpreter. Contributed by Xavier de Gaye in
- :issue:`22898` and :issue:`30697`.
-
Other Improvements
==================
platform-specific selection is made.
In environments where distributions are
built on Windows and zip distributions are required, configure
- the project with a ``setup.cfg`` file containing the following::
+ the project with a ``setup.cfg`` file containing the following:
+
+ .. code-block:: ini
[sdist]
formats=zip
direct references from methods to the implicit ``__class__`` closure
variable, the implicit ``__classcell__`` namespace entry must now be passed
up to ``type.__new__`` for initialisation. Failing to do so will result in
- a :exc:`DeprecationWarning` in 3.6 and a :exc:`RuntimeWarning` in the future.
+ a :exc:`DeprecationWarning` in Python 3.6 and a :exc:`RuntimeError` in
+ Python 3.8.
Changes in the C API
--------------------
.. versionchanged:: 3.6.2
+Notable changes in Python 3.6.4
+===============================
+
+The ``PyExc_RecursionErrorInst`` singleton that was part of the public API
+has been removed as its members being never cleared may cause a segfault
+during finalization of the interpreter.
+(Contributed by Xavier de Gaye in :issue:`22898` and :issue:`30697`.)
+
+
Notable changes in Python 3.6.5
===============================
/*--start constants--*/
#define PY_MAJOR_VERSION 3
#define PY_MINOR_VERSION 6
-#define PY_MICRO_VERSION 5
+#define PY_MICRO_VERSION 6
#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL
#define PY_RELEASE_SERIAL 0
/* Version as a string */
-#define PY_VERSION "3.6.5"
+#define PY_VERSION "3.6.6"
/*--end constants--*/
/* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2.
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
- part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
+ part_regexp = (
+ r'\(.*?\)+(?=\s|$)|'
+ r'\[.*?\]+(?=\s|$)|'
+ r'\S+'
+ )
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
if bufsize != 0:
raise ValueError("bufsize must be 0")
protocol = protocol_factory()
+ debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = yield from self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
- if self._debug:
+ if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
"a bytes or text string, not %s"
% type(arg).__name__)
protocol = protocol_factory()
+ debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
transport = yield from self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
- if self._debug:
+ if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
if coro_name is None:
coro_name = events._format_callback(func, (), {})
- try:
- coro_code = coro.gi_code
- except AttributeError:
+ coro_code = None
+ if hasattr(coro, 'cr_code') and coro.cr_code:
coro_code = coro.cr_code
+ elif hasattr(coro, 'gi_code') and coro.gi_code:
+ coro_code = coro.gi_code
- try:
- coro_frame = coro.gi_frame
- except AttributeError:
+ coro_frame = None
+ if hasattr(coro, 'cr_frame') and coro.cr_frame:
coro_frame = coro.cr_frame
+ elif hasattr(coro, 'gi_frame') and coro.gi_frame:
+ coro_frame = coro.gi_frame
+
+ filename = '<empty co_filename>'
+ if coro_code and coro_code.co_filename:
+ filename = coro_code.co_filename
- filename = coro_code.co_filename
lineno = 0
+ coro_repr = coro_name
+
if (isinstance(coro, CoroWrapper) and
not inspect.isgeneratorfunction(coro.func) and
coro.func is not None):
lineno = coro_frame.f_lineno
coro_repr = ('%s running at %s:%s'
% (coro_name, filename, lineno))
- else:
+ elif coro_code:
lineno = coro_code.co_firstlineno
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
suffix = _format_args_and_kwargs(args, kwargs) + suffix
return _format_callback(func.func, func.args, func.keywords, suffix)
- if hasattr(func, '__qualname__'):
- func_repr = getattr(func, '__qualname__')
- elif hasattr(func, '__name__'):
- func_repr = getattr(func, '__name__')
+ if hasattr(func, '__qualname__') and func.__qualname__:
+ func_repr = func.__qualname__
+ elif hasattr(func, '__name__') and func.__name__:
+ func_repr = func.__name__
else:
func_repr = repr(func)
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(source):
+ if (destination.cancelled() and
+ dest_loop is not None and dest_loop.is_closed()):
+ return
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
extra=None, server=None):
super().__init__(loop, sock, protocol, waiter, extra, server)
self._paused = False
+ self._reschedule_on_resume = False
self._loop.call_soon(self._loop_reading)
def pause_reading(self):
self._paused = False
if self._closing:
return
- self._loop.call_soon(self._loop_reading, self._read_fut)
+ if self._reschedule_on_resume:
+ self._loop.call_soon(self._loop_reading, self._read_fut)
+ self._reschedule_on_resume = False
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _loop_reading(self, fut=None):
if self._paused:
+ self._reschedule_on_resume = True
return
data = None
def get_write_buffer_size(self):
return len(self._buffer)
+ def _add_reader(self, fd, callback, *args):
+ if self._closing:
+ return
+
+ self._loop._add_reader(fd, callback, *args)
+
class _SelectorSocketTransport(_SelectorTransport):
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
- self._loop.call_soon(self._loop._add_reader,
+ self._loop.call_soon(self._add_reader,
self._sock_fd, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
if not self._paused:
raise RuntimeError('Not paused')
self._paused = False
- if self._closing:
- return
- self._loop._add_reader(self._sock_fd, self._read_ready)
+ self._add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
self._sock.shutdown(socket.SHUT_WR)
def write_eof(self):
- if self._eof:
+ if self._closing or self._eof:
return
self._eof = True
if not self._buffer:
self._address = address
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
- self._loop.call_soon(self._loop._add_reader,
+ self._loop.call_soon(self._add_reader,
self._sock_fd, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
# (b'', 1) is a special value in _process_write_backlog() to do
# the SSL handshake
self._write_backlog.append((b'', 1))
- self._loop.call_soon(self._process_write_backlog)
+ self._process_write_backlog()
def _on_handshake_complete(self, handshake_exc):
self._in_handshake = False
self._step,
RuntimeError(
'yield was used instead of yield from for '
- 'generator in task {!r} with {}'.format(
+ 'generator in task {!r} with {!r}'.format(
self, result)))
else:
# Yielding something else is an error.
def __init__(self, children, *, loop=None):
super().__init__(loop=loop)
self._children = children
+ self._cancel_requested = False
def cancel(self):
if self.done():
for child in self._children:
if child.cancel():
ret = True
+ if ret:
+ # If any child tasks were actually cancelled, we should
+ # propagate the cancellation request regardless of
+ # *return_exceptions* argument. See issue 32684.
+ self._cancel_requested = True
return ret
results[i] = res
nfinished += 1
if nfinished == nchildren:
- outer.set_result(results)
+ if outer._cancel_requested:
+ outer.set_exception(futures.CancelledError())
+ else:
+ outer.set_result(results)
for i, fut in enumerate(children):
fut.add_done_callback(functools.partial(_done_callback, i))
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
- call = 'in ' + strong(func) + \
+ call = 'in ' + strong(pydoc.html.escape(func)) + \
inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.html.repr(value))
if self.display:
if plain:
- doc = doc.replace('&', '&').replace('<', '<')
+ doc = pydoc.html.escape(doc)
self.file.write('<pre>' + doc + '</pre>\n')
else:
self.file.write(doc + '\n')
import unittest
+from test.support import bigmemtest, _2G
+import sys
from ctypes import *
from ctypes.test import need_symbol
_type_ = c_int
_length_ = 1.87
+ @unittest.skipUnless(sys.maxsize > 2**32, 'requires 64bit platform')
+ @bigmemtest(size=_2G, memuse=1, dry_run=False)
+ def test_large_array(self, size):
+ c_char * size
+
if __name__ == '__main__':
unittest.main()
self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value,
"hiho")
+ def test_bad_type_arg(self):
+ # The type argument must be a ctypes pointer type.
+ array_type = c_byte * sizeof(c_int)
+ array = array_type()
+ self.assertRaises(TypeError, cast, array, None)
+ self.assertRaises(TypeError, cast, array, array_type)
+ class Struct(Structure):
+ _fields_ = [("a", c_int)]
+ self.assertRaises(TypeError, cast, array, Struct)
+ class MyUnion(Union):
+ _fields_ = [("a", c_int)]
+ self.assertRaises(TypeError, cast, array, MyUnion)
+
if __name__ == "__main__":
unittest.main()
mytz = self.tzinfo
if mytz is None:
mytz = self._local_timezone()
+ myoffset = mytz.utcoffset(self)
+ else:
+ myoffset = mytz.utcoffset(self)
+ if myoffset is None:
+ mytz = self.replace(tzinfo=None)._local_timezone()
+ myoffset = mytz.utcoffset(self)
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
- myoffset = mytz.utcoffset(self)
- if myoffset is None:
- raise ValueError("astimezone() requires an aware datetime")
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
- while(lines_to_write):
- from_line, to_line, found_diff = next(line_pair_iterator)
- # If another change within the context, extend the context
- if found_diff:
- lines_to_write = context-1
- else:
- lines_to_write -= 1
- yield from_line, to_line, found_diff
+ try:
+ while(lines_to_write):
+ from_line, to_line, found_diff = next(line_pair_iterator)
+ # If another change within the context, extend the context
+ if found_diff:
+ lines_to_write = context-1
+ else:
+ lines_to_write -= 1
+ yield from_line, to_line, found_diff
+ except StopIteration:
+ # Catch exception from next() and return normally
+ return
_file_template = """
_SETUPTOOLS_VERSION = "39.0.1"
-_PIP_VERSION = "9.0.3"
+_PIP_VERSION = "10.0.1"
_PROJECTS = [
("setuptools", _SETUPTOOLS_VERSION),
sys.path = additional_paths + sys.path
# Install the bundled software
- import pip
- return pip.main(args)
+ import pip._internal
+ return pip._internal.main(args)
def version():
"--altinstall",
action="store_true",
default=False,
- help=("Make an alternate install, installing only the X.Y versioned"
- "scripts (Default: pipX, pipX.Y, easy_install-X.Y)"),
+ help=("Make an alternate install, installing only the X.Y versioned "
+ "scripts (Default: pipX, pipX.Y, easy_install-X.Y)."),
)
parser.add_argument(
"--default-pip",
action="store_true",
default=False,
help=("Make a default pip install, installing the unqualified pip "
- "and easy_install in addition to the versioned scripts"),
+ "and easy_install in addition to the versioned scripts."),
)
args = parser.parse_args(argv)
raise AttributeError('Cannot reassign members.')
super().__setattr__(name, value)
- def _create_(cls, class_name, names=None, *, module=None, qualname=None, type=None, start=1):
+ def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
append("%s=%s" % (self._reserved[key], _getdate(value)))
elif key == "max-age" and isinstance(value, int):
append("%s=%d" % (self._reserved[key], value))
+ elif key == "comment" and isinstance(value, str):
+ append("%s=%s" % (self._reserved[key], _quote(value)))
elif key in self._flags:
if value:
append(str(self._reserved[key]))
+What's New in IDLE 3.6.6
+Released on 2018-06-15?
+======================================
+
+
+bpo-33656: On Windows, add API call saying that tk scales for DPI.
+On Windows 8.1+ or 10, with DPI compatibility properties of the Python
+binary unchanged, and a monitor resolution greater than 96 DPI, this
+should make text and lines sharper and some colors brighter.
+On other systems, it should have no effect. If you have a custom theme,
+you may want to adjust a color or two. If perchance it make text worse
+on your monitor, you can disable the ctypes.OleDLL call near the top of
+pyshell.py and report the problem on python-list or idle-dev@python.org.
+
+bpo-33768: Clicking on a context line moves that line to the top
+of the editor window.
+
+bpo-33763: Replace the code context label widget with a text widget.
+
+bpo-33664: Scroll IDLE editor text by lines.
+(Previously, the mouse wheel and scrollbar slider moved text by a fixed
+number of pixels, resulting in partial lines at the top of the editor
+box.) This change also applies to the shell and grep output windows,
+but currently not to read-only text views.
+
+bpo-33679: Enable theme-specific color configuration for Code Context.
+(Previously, there was one code context foreground and background font
+color setting, default or custom, on the extensions tab, that applied
+to all themes.) For built-in themes, the foreground is the same as
+normal text and the background is a contrasting gray. Context colors for
+custom themes are set on the Hightlights tab along with other colors.
+When one starts IDLE from a console and loads a custom theme without
+definitions for 'context', one will see a warning message on the
+console.
+
+bpo-33642: Display up to maxlines non-blank lines for Code Context.
+If there is no current context, show a single blank line. (Previously,
+the Code Contex had numlines lines, usually with some blank.) The use
+of a new option, 'maxlines' (default 15), avoids possible interference
+with user settings of the old option, 'numlines' (default 3).
+
+bpo-33628: Cleanup codecontext.py and its test.
+
+bpo-32831: Add docstrings and tests for codecontext.py.
+Coverage is 100%. Patch by Cheryl Sabella.
+
+bpo-33564: Code context now recognizes async as a block opener.
+
+bpo-29706: IDLE now colors async and await as keywords in 3.6.
+They become full keywords in 3.7.
+
+bpo-21474: Update word/identifier definition from ascii to unicode.
+In text and entry boxes, this affects selection by double-click,
+movement left/right by control-left/right, and deletion left/right
+by control-BACKSPACE/DEL.
+
+bpo-33204: Consistently color invalid string prefixes.
+A 'u' string prefix cannot be paired with either 'r' or 'f'.
+IDLE now consistently colors as much of the prefix, starting at the
+right, as is valid. Revise and extend colorizer test.
+
+
What's New in IDLE 3.6.5
-Released on 2017-03-26?
+Released on 2018-03-28
======================================
+bpo-32984: Set __file__ while running a startup file.
+Like Python, IDLE optionally runs 1 startup file in the Shell window
+before presenting the first interactive input prompt. For IDLE,
+option -s runs a file named in environmental variable IDLESTARTUP or
+PYTHONSTARTUP; -r file runs file. Python sets __file__ to the startup
+file name before running the file and unsets it before the first
+prompt. IDLE now does the same when run normally, without the -n
+option.
+
+bpo-32940: Replace StringTranslatePseudoMapping with faster code.
bpo-32916: Change 'str' to 'code' in idlelib.pyparse and users.
Edits persist while IDLE remains open.
Patch by Serhiy Storchake and Terry Jan Reedy.
-bpo-31858: Restrict shell prompt manipulaton to the shell.
+bpo-31858: Restrict shell prompt manipulation to the shell.
Editor and output windows only see an empty last prompt line. This
simplifies the code and fixes a minor bug when newline is inserted.
Sys.ps1, if present, is read on Shell start-up, but is not set or changed.
determine which block you are in. This extension implements a pane at the top
of each IDLE edit window which provides block structure hints. These hints are
the lines which contain the block opening keywords, e.g. 'if', for the
-enclosing block. The number of hint lines is determined by the numlines
+enclosing block. The number of hint lines is determined by the maxlines
variable in the codecontext section of config-extensions.def. Lines which do
not open blocks are not shown in the context hints pane.
from idlelib.config import idleConf
BLOCKOPENERS = {"class", "def", "elif", "else", "except", "finally", "for",
- "if", "try", "while", "with"}
+ "if", "try", "while", "with", "async"}
UPDATEINTERVAL = 100 # millisec
-FONTUPDATEINTERVAL = 1000 # millisec
+CONFIGUPDATEINTERVAL = 1000 # millisec
-def getspacesfirstword(s, c=re.compile(r"^(\s*)(\w*)")):
- return c.match(s).groups()
+
+def get_spaces_firstword(codeline, c=re.compile(r"^(\s*)(\w*)")):
+ "Extract the beginning whitespace and first word from codeline."
+ return c.match(codeline).groups()
+
+
+def get_line_info(codeline):
+ """Return tuple of (line indent value, codeline, block start keyword).
+
+ The indentation of empty lines (or comment lines) is INFINITY.
+ If the line does not start a block, the keyword value is False.
+ """
+ spaces, firstword = get_spaces_firstword(codeline)
+ indent = len(spaces)
+ if len(codeline) == indent or codeline[indent] == '#':
+ indent = INFINITY
+ opener = firstword in BLOCKOPENERS and firstword
+ return indent, codeline, opener
class CodeContext:
- bgcolor = "LightGray"
- fgcolor = "Black"
+ "Display block context above the edit window."
def __init__(self, editwin):
+ """Initialize settings for context block.
+
+ editwin is the Editor window for the context block.
+ self.text is the editor window text widget.
+ self.textfont is the editor window font.
+
+ self.context displays the code context text above the editor text.
+ Initially None, it is toggled via <<toggle-code-context>>.
+ self.topvisible is the number of the top text line displayed.
+ self.info is a list of (line number, indent level, line text,
+ block keyword) tuples for the block structure above topvisible.
+ self.info[0] is initialized with a 'dummy' line which
+ starts the toplevel 'block' of the module.
+
+ self.t1 and self.t2 are two timer events on the editor text widget to
+ monitor for changes to the context text or editor font.
+ """
self.editwin = editwin
self.text = editwin.text
self.textfont = self.text["font"]
- self.label = None
- # self.info is a list of (line number, indent level, line text, block
- # keyword) tuples providing the block structure associated with
- # self.topvisible (the linenumber of the line displayed at the top of
- # the edit window). self.info[0] is initialized as a 'dummy' line which
- # starts the toplevel 'block' of the module.
- self.info = [(0, -1, "", False)]
+ self.contextcolors = CodeContext.colors
+ self.context = None
self.topvisible = 1
+ self.info = [(0, -1, "", False)]
# Start two update cycles, one for context lines, one for font changes.
self.t1 = self.text.after(UPDATEINTERVAL, self.timer_event)
- self.t2 = self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
+ self.t2 = self.text.after(CONFIGUPDATEINTERVAL, self.config_timer_event)
@classmethod
def reload(cls):
+ "Load class variables from config."
cls.context_depth = idleConf.GetOption("extensions", "CodeContext",
- "numlines", type="int", default=3)
-## cls.bgcolor = idleConf.GetOption("extensions", "CodeContext",
-## "bgcolor", type="str", default="LightGray")
-## cls.fgcolor = idleConf.GetOption("extensions", "CodeContext",
-## "fgcolor", type="str", default="Black")
+ "maxlines", type="int", default=15)
+ cls.colors = idleConf.GetHighlight(idleConf.CurrentTheme(), 'context')
def __del__(self):
+ "Cancel scheduled events."
try:
self.text.after_cancel(self.t1)
self.text.after_cancel(self.t2)
pass
def toggle_code_context_event(self, event=None):
- if not self.label:
+ """Toggle code context display.
+
+ If self.context doesn't exist, create it to match the size of the editor
+ window text (toggle on). If it does exist, destroy it (toggle off).
+ Return 'break' to complete the processing of the binding.
+ """
+ if not self.context:
# Calculate the border width and horizontal padding required to
# align the context with the text in the main Text widget.
#
# All values are passed through getint(), since some
# values may be pixel objects, which can't simply be added to ints.
widgets = self.editwin.text, self.editwin.text_frame
- # Calculate the required vertical padding
+ # Calculate the required horizontal padding and border width.
padx = 0
+ border = 0
for widget in widgets:
padx += widget.tk.getint(widget.pack_info()['padx'])
padx += widget.tk.getint(widget.cget('padx'))
- # Calculate the required border width
- border = 0
- for widget in widgets:
border += widget.tk.getint(widget.cget('border'))
- self.label = tkinter.Label(
- self.editwin.top, text="\n" * (self.context_depth - 1),
- anchor=W, justify=LEFT, font=self.textfont,
- bg=self.bgcolor, fg=self.fgcolor,
- width=1, #don't request more than we get
- padx=padx, border=border, relief=SUNKEN)
- # Pack the label widget before and above the text_frame widget,
- # thus ensuring that it will appear directly above text_frame
- self.label.pack(side=TOP, fill=X, expand=False,
+ self.context = tkinter.Text(
+ self.editwin.top, font=self.textfont,
+ bg=self.contextcolors['background'],
+ fg=self.contextcolors['foreground'],
+ height=1,
+ width=1, # Don't request more than we get.
+ padx=padx, border=border, relief=SUNKEN, state='disabled')
+ self.context.bind('<ButtonRelease-1>', self.jumptoline)
+ # Pack the context widget before and above the text_frame widget,
+ # thus ensuring that it will appear directly above text_frame.
+ self.context.pack(side=TOP, fill=X, expand=False,
before=self.editwin.text_frame)
else:
- self.label.destroy()
- self.label = None
+ self.context.destroy()
+ self.context = None
return "break"
- def get_line_info(self, linenum):
- """Get the line indent value, text, and any block start keyword
-
- If the line does not start a block, the keyword value is False.
- The indentation of empty lines (or comment lines) is INFINITY.
-
- """
- text = self.text.get("%d.0" % linenum, "%d.end" % linenum)
- spaces, firstword = getspacesfirstword(text)
- opener = firstword in BLOCKOPENERS and firstword
- if len(text) == len(spaces) or text[len(spaces)] == '#':
- indent = INFINITY
- else:
- indent = len(spaces)
- return indent, text, opener
-
def get_context(self, new_topvisible, stopline=1, stopindent=0):
- """Get context lines, starting at new_topvisible and working backwards.
-
- Stop when stopline or stopindent is reached. Return a tuple of context
- data and the indent level at the top of the region inspected.
+ """Return a list of block line tuples and the 'last' indent.
+ The tuple fields are (linenum, indent, text, opener).
+ The list represents header lines from new_topvisible back to
+ stopline with successively shorter indents > stopindent.
+ The list is returned ordered by line number.
+ Last indent returned is the smallest indent observed.
"""
assert stopline > 0
lines = []
- # The indentation level we are currently in:
+ # The indentation level we are currently in.
lastindent = INFINITY
# For a line to be interesting, it must begin with a block opening
# keyword, and have less indentation than lastindent.
for linenum in range(new_topvisible, stopline-1, -1):
- indent, text, opener = self.get_line_info(linenum)
+ codeline = self.text.get(f'{linenum}.0', f'{linenum}.end')
+ indent, text, opener = get_line_info(codeline)
if indent < lastindent:
lastindent = indent
if opener in ("else", "elif"):
- # We also show the if statement
+ # Also show the if statement.
lastindent += 1
if opener and linenum < new_topvisible and indent >= stopindent:
lines.append((linenum, indent, text, opener))
def update_code_context(self):
"""Update context information and lines visible in the context pane.
+ No update is done if the text hasn't been scrolled. If the text
+ was scrolled, the lines that should be shown in the context will
+ be retrieved and the context area will be updated with the code,
+ up to the number of maxlines.
"""
new_topvisible = int(self.text.index("@0,0").split('.')[0])
- if self.topvisible == new_topvisible: # haven't scrolled
+ if self.topvisible == new_topvisible: # Haven't scrolled.
return
- if self.topvisible < new_topvisible: # scroll down
+ if self.topvisible < new_topvisible: # Scroll down.
lines, lastindent = self.get_context(new_topvisible,
self.topvisible)
- # retain only context info applicable to the region
- # between topvisible and new_topvisible:
+ # Retain only context info applicable to the region
+ # between topvisible and new_topvisible.
while self.info[-1][1] >= lastindent:
del self.info[-1]
- elif self.topvisible > new_topvisible: # scroll up
+ else: # self.topvisible > new_topvisible: # Scroll up.
stopindent = self.info[-1][1] + 1
- # retain only context info associated
- # with lines above new_topvisible:
+ # Retain only context info associated
+ # with lines above new_topvisible.
while self.info[-1][0] >= new_topvisible:
stopindent = self.info[-1][1]
del self.info[-1]
stopindent)
self.info.extend(lines)
self.topvisible = new_topvisible
- # empty lines in context pane:
- context_strings = [""] * max(0, self.context_depth - len(self.info))
- # followed by the context hint lines:
- context_strings += [x[2] for x in self.info[-self.context_depth:]]
- self.label["text"] = '\n'.join(context_strings)
+ # Last context_depth context lines.
+ context_strings = [x[2] for x in self.info[-self.context_depth:]]
+ showfirst = 0 if context_strings[0] else 1
+ # Update widget.
+ self.context['height'] = len(context_strings) - showfirst
+ self.context['state'] = 'normal'
+ self.context.delete('1.0', 'end')
+ self.context.insert('end', '\n'.join(context_strings[showfirst:]))
+ self.context['state'] = 'disabled'
+
+ def jumptoline(self, event=None):
+ "Show clicked context line at top of editor."
+ lines = len(self.info)
+ if lines == 1: # No context lines are showing.
+ newtop = 1
+ else:
+ # Line number clicked.
+ contextline = int(float(self.context.index('insert')))
+ # Lines not displayed due to maxlines.
+ offset = max(1, lines - self.context_depth) - 1
+ newtop = self.info[offset + contextline][0]
+ self.text.yview(f'{newtop}.0')
+ self.update_code_context()
def timer_event(self):
- if self.label:
+ "Event on editor text widget triggered every UPDATEINTERVAL ms."
+ if self.context:
self.update_code_context()
self.t1 = self.text.after(UPDATEINTERVAL, self.timer_event)
- def font_timer_event(self):
+ def config_timer_event(self):
+ "Event on editor text widget triggered every CONFIGUPDATEINTERVAL ms."
newtextfont = self.text["font"]
- if self.label and newtextfont != self.textfont:
+ if (self.context and (newtextfont != self.textfont or
+ CodeContext.colors != self.contextcolors)):
self.textfont = newtextfont
- self.label["font"] = self.textfont
- self.t2 = self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
+ self.contextcolors = CodeContext.colors
+ self.context["font"] = self.textfont
+ self.context['background'] = self.contextcolors['background']
+ self.context['foreground'] = self.contextcolors['foreground']
+ self.t2 = self.text.after(CONFIGUPDATEINTERVAL, self.config_timer_event)
CodeContext.reload()
+
+
+if __name__ == "__main__": # pragma: no cover
+ import unittest
+ unittest.main('idlelib.idle_test.test_codecontext', verbosity=2, exit=False)
return "(?P<%s>" % name + "|".join(alternates) + ")"
def make_pat():
- kw = r"\b" + any("KEYWORD", keyword.kwlist) + r"\b"
+ kw = r"\b" + any("KEYWORD", keyword.kwlist + ['async', 'await']) + r"\b"
builtinlist = [str(name) for name in dir(builtins)
if not name.startswith('_') and \
name not in keyword.kwlist]
- # self.file = open("file") :
- # 1st 'file' colorized normal, 2nd as builtin, 3rd as string
builtin = r"([^.'\"\\#]\b|^)" + any("BUILTIN", builtinlist) + r"\b"
comment = any("COMMENT", [r"#[^\n]*"])
- stringprefix = r"(?i:\br|u|f|fr|rf|b|br|rb)?"
+ stringprefix = r"(?i:r|u|f|fr|rf|b|br|rb)?"
sqstring = stringprefix + r"'[^'\\\n]*(\\.[^'\\\n]*)*'?"
dqstring = stringprefix + r'"[^"\\\n]*(\\.[^"\\\n]*)*"?'
sq3string = stringprefix + r"'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?"
top.title("Test ColorDelegator")
x, y = map(int, parent.geometry().split('+')[1:])
top.geometry("700x250+%d+%d" % (x + 20, y + 175))
- source = ("# Following has syntax errors\n"
- "if True: then int 1\nelif False: print 0\nelse: float(None)\n"
- "if iF + If + IF: 'keywork matching must respect case'\n"
- "# All valid prefixes for unicode and byte strings should be colored\n"
+ source = (
+ "if True: int ('1') # keyword, builtin, string, comment\n"
+ "elif False: print(0)\n"
+ "else: float(None)\n"
+ "if iF + If + IF: 'keyword matching must respect case'\n"
+ "if'': x or'' # valid string-keyword no-space combinations\n"
+ "async def f(): await g()\n"
+ "# All valid prefixes for unicode and byte strings should be colored.\n"
"'x', '''x''', \"x\", \"\"\"x\"\"\"\n"
- "r'x', u'x', R'x', U'x', f'x', F'x', ur'is invalid'\n"
+ "r'x', u'x', R'x', U'x', f'x', F'x'\n"
"fr'x', Fr'x', fR'x', FR'x', rf'x', rF'x', Rf'x', RF'x'\n"
- "b'x',B'x', br'x',Br'x',bR'x',BR'x', rb'x'.rB'x',Rb'x',RB'x'\n")
+ "b'x',B'x', br'x',Br'x',bR'x',BR'x', rb'x'.rB'x',Rb'x',RB'x'\n"
+ "# Invalid combinations of legal characters should be half colored.\n"
+ "ur'x', ru'x', uf'x', fu'x', UR'x', ufr'x', rfu'x', xf'x', fx'x'\n"
+ )
text = Text(top, background="white")
text.pack(expand=1, fill="both")
text.insert("insert", source)
popupwait= 2000
[CodeContext]
-numlines= 3
-visible= False
-bgcolor= LightGray
-fgcolor= Black
+maxlines= 15
[FormatParagraph]
max-width= 72
stderr-background= #ffffff
console-foreground= #770000
console-background= #ffffff
+context-foreground= #000000
+context-background= lightgray
[IDLE New]
normal-foreground= #000000
stderr-background= #ffffff
console-foreground= #770000
console-background= #ffffff
+context-foreground= #000000
+context-background= lightgray
[IDLE Dark]
comment-foreground = #dd0000
hit-foreground = #002240
comment-background = #002240
break-foreground = #FFFFFF
+context-foreground= #ffffff
+context-background= #454545
'stderr-background':'#ffffff',
'console-foreground':'#000000',
'console-background':'#ffffff',
+ 'context-foreground':'#000000',
+ 'context-background':'#ffffff',
}
for element in theme:
if not cfgParser.has_option(themeName, element):
).grid(row=row, column=1, sticky=W, padx=7)
elif opt['type'] == 'int':
Entry(entry_area, textvariable=var, validate='key',
- validatecommand=(self.is_int, '%P')
+ validatecommand=(self.is_int, '%P'), width=10
).grid(row=row, column=1, sticky=NSEW, padx=7)
- else:
- Entry(entry_area, textvariable=var
+ else: # type == 'str'
+ # Limit size to fit non-expanding space with larger font.
+ Entry(entry_area, textvariable=var, width=15
).grid(row=row, column=1, sticky=NSEW, padx=7)
return
"""
self.theme_elements = {
'Normal Text': ('normal', '00'),
- 'Python Keywords': ('keyword', '01'),
- 'Python Definitions': ('definition', '02'),
- 'Python Builtins': ('builtin', '03'),
- 'Python Comments': ('comment', '04'),
- 'Python Strings': ('string', '05'),
- 'Selected Text': ('hilite', '06'),
- 'Found Text': ('hit', '07'),
- 'Cursor': ('cursor', '08'),
- 'Editor Breakpoint': ('break', '09'),
- 'Shell Normal Text': ('console', '10'),
- 'Shell Error Text': ('error', '11'),
- 'Shell Stdout Text': ('stdout', '12'),
- 'Shell Stderr Text': ('stderr', '13'),
+ 'Code Context': ('context', '01'),
+ 'Python Keywords': ('keyword', '02'),
+ 'Python Definitions': ('definition', '03'),
+ 'Python Builtins': ('builtin', '04'),
+ 'Python Comments': ('comment', '05'),
+ 'Python Strings': ('string', '06'),
+ 'Selected Text': ('hilite', '07'),
+ 'Found Text': ('hit', '08'),
+ 'Cursor': ('cursor', '09'),
+ 'Editor Breakpoint': ('break', '10'),
+ 'Shell Normal Text': ('console', '11'),
+ 'Shell Error Text': ('error', '12'),
+ 'Shell Stdout Text': ('stdout', '13'),
+ 'Shell Stderr Text': ('stderr', '14'),
}
self.builtin_name = tracers.add(
StringVar(self), self.var_changed_builtin_name)
('\n', 'normal'),
('#you can click here', 'comment'), ('\n', 'normal'),
('#to choose items', 'comment'), ('\n', 'normal'),
+ ('code context section', 'context'), ('\n\n', 'normal'),
('def', 'keyword'), (' ', 'normal'),
('func', 'definition'), ('(param):\n ', 'normal'),
('"""string"""', 'string'), ('\n var0 = ', 'normal'),
self.format_width = tracers.add(
StringVar(self), ('extensions', 'FormatParagraph', 'max-width'))
self.context_lines = tracers.add(
- StringVar(self), ('extensions', 'CodeContext', 'numlines'))
+ StringVar(self), ('extensions', 'CodeContext', 'maxlines'))
# Create widgets:
# Section frames.
frame_format, textvariable=self.format_width, width=4)
frame_context = Frame(frame_editor, borderwidth=0)
- context_title = Label(frame_context, text='Context Lines :')
+ context_title = Label(frame_context, text='Max Context Lines :')
self.context_int = Entry(
frame_context, textvariable=self.context_lines, width=3)
self.format_width.set(idleConf.GetOption(
'extensions', 'FormatParagraph', 'max-width', type='int'))
self.context_lines.set(idleConf.GetOption(
- 'extensions', 'CodeContext', 'numlines', type='int'))
+ 'extensions', 'CodeContext', 'maxlines', type='int'))
# Set additional help sources.
self.user_helplist = idleConf.GetAllExtraHelpSourcesList()
'opener' - opener '({[' corresponding to closer; 'parens' - both chars;
'expression' (default) - also everything in between. Flash-delay is how
long to highlight if cursor is not moved (0 means forever).
+
+CodeContext: Maxlines is the maximum number of code context lines to
+display when Code Context is turned on for an editor window.
'''
}
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
_py_version = ' (%s)' % platform.python_version()
+darwin = sys.platform == 'darwin'
def _sphinx_version():
"Format sys.version_info to produce the Sphinx version string used to install the chm docs"
from idlelib.undo import UndoDelegator
from idlelib.iomenu import IOBinding, encoding
from idlelib import mainmenu
- from tkinter import Toplevel
+ from tkinter import Toplevel, EventType
from idlelib.statusbar import MultiStatusBar
from idlelib.autocomplete import AutoComplete
from idlelib.autoexpand import AutoExpand
else:
# Elsewhere, use right-click for popup menus.
text.bind("<3>",self.right_menu_event)
+ text.bind('<MouseWheel>', self.mousescroll)
+ text.bind('<Button-4>', self.mousescroll)
+ text.bind('<Button-5>', self.mousescroll)
text.bind("<<cut>>", self.cut)
text.bind("<<copy>>", self.copy)
text.bind("<<paste>>", self.paste)
text.bind("<<open-turtle-demo>>", self.open_turtle_demo)
self.set_status_bar()
- vbar['command'] = text.yview
+ vbar['command'] = self.handle_yview
vbar.pack(side=RIGHT, fill=Y)
text['yscrollcommand'] = vbar.set
text['font'] = idleConf.GetFont(self.root, 'main', 'EditorWindow')
menu.delete(self.wmenu_end+1, end)
windows.add_windows_to_menu(menu)
+ def handle_yview(self, event, *args):
+ "Handle scrollbar."
+ if event == 'moveto':
+ fraction = float(args[0])
+ lines = (round(self.getlineno('end') * fraction) -
+ self.getlineno('@0,0'))
+ event = 'scroll'
+ args = (lines, 'units')
+ self.text.yview(event, *args)
+ return 'break'
+
+ def mousescroll(self, event):
+ "Handle scroll wheel."
+ up = {EventType.MouseWheel: event.delta >= 0 == darwin,
+ EventType.Button: event.num == 4}
+ lines = 5
+ if up[event.type]:
+ lines = -lines
+ self.text.yview_scroll(lines, 'units')
+ return 'break'
+
rmenu = None
def right_menu_event(self, event):
def fixwordbreaks(root):
- # Make sure that Tk's double-click and next/previous word
- # operations use our definition of a word (i.e. an identifier)
+ # On Windows, tcl/tk breaks 'words' only on spaces, as in Command Prompt.
+ # We want Motif style everywhere. See #21474, msg218992 and followup.
tk = root.tk
tk.call('tcl_wordBreakAfter', 'a b', 0) # make sure word.tcl is loaded
- tk.call('set', 'tcl_wordchars', '[a-zA-Z0-9_]')
- tk.call('set', 'tcl_nonwordchars', '[^a-zA-Z0-9_]')
+ tk.call('set', 'tcl_wordchars', r'\w')
+ tk.call('set', 'tcl_nonwordchars', r'\W')
def _editor_window(parent): # htest #
+
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
+ <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
-
- <title>25.5. IDLE — Python 3.7.0a0 documentation</title>
-
+ <title>26.5. IDLE — Python 3.8.0a0 documentation</title>
<link rel="stylesheet" href="../_static/pydoctheme.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
-
- <script type="text/javascript">
- var DOCUMENTATION_OPTIONS = {
- URL_ROOT: '../',
- VERSION: '3.7.0a0',
- COLLAPSE_INDEX: false,
- FILE_SUFFIX: '.html',
- HAS_SOURCE: true
- };
- </script>
+ <script type="text/javascript" src="../_static/documentation_options.js"></script>
<script type="text/javascript" src="../_static/jquery.js"></script>
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<script type="text/javascript" src="../_static/sidebar.js"></script>
<link rel="search" type="application/opensearchdescription+xml"
- title="Search within Python 3.7.0a0 documentation"
+ title="Search within Python 3.8.0a0 documentation"
href="../_static/opensearch.xml"/>
<link rel="author" title="About these documents" href="../about.html" />
+ <link rel="index" title="Index" href="../genindex.html" />
+ <link rel="search" title="Search" href="../search.html" />
<link rel="copyright" title="Copyright" href="../copyright.html" />
- <link rel="top" title="Python 3.7.0a0 documentation" href="../contents.html" />
- <link rel="up" title="25. Graphical User Interfaces with Tk" href="tk.html" />
- <link rel="next" title="25.6. Other Graphical User Interface Packages" href="othergui.html" />
- <link rel="prev" title="25.4. tkinter.scrolledtext — Scrolled Text Widget" href="tkinter.scrolledtext.html" />
- <link rel="shortcut icon" type="image/png" href="../_static/py.png" />
+ <link rel="next" title="26.6. Other Graphical User Interface Packages" href="othergui.html" />
+ <link rel="prev" title="26.4. tkinter.scrolledtext — Scrolled Text Widget" href="tkinter.scrolledtext.html" />
<link rel="canonical" href="https://docs.python.org/3/library/idle.html" />
- <script type="text/javascript" src="../_static/copybutton.js"></script>
+ <link rel="shortcut icon" type="image/png" href="../_static/py.png" />
+
+ <script type="text/javascript" src="../_static/copybutton.js"></script>
+
+
+
- </head>
- <body role="document">
+ </head><body>
<div class="related" role="navigation" aria-label="related navigation">
<h3>Navigation</h3>
<ul>
<a href="../py-modindex.html" title="Python Module Index"
>modules</a> |</li>
<li class="right" >
- <a href="othergui.html" title="25.6. Other Graphical User Interface Packages"
+ <a href="othergui.html" title="26.6. Other Graphical User Interface Packages"
accesskey="N">next</a> |</li>
<li class="right" >
- <a href="tkinter.scrolledtext.html" title="25.4. tkinter.scrolledtext — Scrolled Text Widget"
+ <a href="tkinter.scrolledtext.html" title="26.4. tkinter.scrolledtext — Scrolled Text Widget"
accesskey="P">previous</a> |</li>
- <li><img src="../_static/py.png" alt=""
- style="vertical-align: middle; margin-top: -1px"/></li>
- <li><a href="https://www.python.org/">Python</a> »</li>
- <li>
- <a href="../index.html">3.7.0a0 Documentation</a> »
- </li>
-
- <li class="nav-item nav-item-1"><a href="index.html" >The Python Standard Library</a> »</li>
- <li class="nav-item nav-item-2"><a href="tk.html" accesskey="U">25. Graphical User Interfaces with Tk</a> »</li>
+
+ <li><img src="../_static/py.png" alt=""
+ style="vertical-align: middle; margin-top: -1px"/></li>
+ <li><a href="https://www.python.org/">Python</a> »</li>
+
+
+ <li>
+ <a href="../index.html">3.8.0a0 Documentation</a> »
+ </li>
+
+ <li class="nav-item nav-item-1"><a href="index.html" >The Python Standard Library</a> »</li>
+ <li class="nav-item nav-item-2"><a href="tk.html" accesskey="U">26. Graphical User Interfaces with Tk</a> »</li>
<li class="right">
<div class="body" role="main">
<div class="section" id="idle">
-<span id="id1"></span><h1>25.5. IDLE<a class="headerlink" href="#idle" title="Permalink to this headline">¶</a></h1>
+<span id="id1"></span><h1>26.5. IDLE<a class="headerlink" href="#idle" title="Permalink to this headline">¶</a></h1>
<p><strong>Source code:</strong> <a class="reference external" href="https://github.com/python/cpython/tree/master/Lib/idlelib/">Lib/idlelib/</a></p>
<hr class="docutils" id="index-0" />
-<p>IDLE is Python’s Integrated Development and Learning Environment.</p>
+<p>IDLE is Python’s Integrated Development and Learning Environment.</p>
<p>IDLE has the following features:</p>
<ul class="simple">
-<li>coded in 100% pure Python, using the <a class="reference internal" href="tkinter.html#module-tkinter" title="tkinter: Interface to Tcl/Tk for graphical user interfaces"><code class="xref py py-mod docutils literal"><span class="pre">tkinter</span></code></a> GUI toolkit</li>
+<li>coded in 100% pure Python, using the <a class="reference internal" href="tkinter.html#module-tkinter" title="tkinter: Interface to Tcl/Tk for graphical user interfaces"><code class="xref py py-mod docutils literal notranslate"><span class="pre">tkinter</span></code></a> GUI toolkit</li>
<li>cross-platform: works mostly the same on Windows, Unix, and Mac OS X</li>
<li>Python shell window (interactive interpreter) with colorizing
of code input, output, and error messages</li>
<li>configuration, browsers, and other dialogs</li>
</ul>
<div class="section" id="menus">
-<h2>25.5.1. Menus<a class="headerlink" href="#menus" title="Permalink to this headline">¶</a></h2>
+<h2>26.5.1. Menus<a class="headerlink" href="#menus" title="Permalink to this headline">¶</a></h2>
<p>IDLE has two main window types, the Shell window and the Editor window. It is
possible to have multiple editor windows simultaneously. Output windows, such
as used for Edit / Find in Files, are a subtype of edit window. They currently
have the same top menu as Editor windows but a different default title and
context menu.</p>
-<p>IDLE’s menus dynamically change based on which window is currently selected.
+<p>IDLE’s menus dynamically change based on which window is currently selected.
Each menu documented below indicates which window type it is associated with.</p>
<div class="section" id="file-menu-shell-and-editor">
-<h3>25.5.1.1. File menu (Shell and Editor)<a class="headerlink" href="#file-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.1.1. File menu (Shell and Editor)<a class="headerlink" href="#file-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
<dl class="docutils">
<dt>New File</dt>
<dd>Create a new file editing window.</dd>
-<dt>Open...</dt>
+<dt>Open…</dt>
<dd>Open an existing file with an Open dialog.</dd>
<dt>Recent Files</dt>
<dd>Open a list of recent files. Click one to open it.</dd>
-<dt>Open Module...</dt>
+<dt>Open Module…</dt>
<dd>Open an existing module (searches sys.path).</dd>
</dl>
<dl class="docutils" id="index-1">
that have been changed since being opened or last saved have a * before
and after the window title. If there is no associated file,
do Save As instead.</dd>
-<dt>Save As...</dt>
+<dt>Save As…</dt>
<dd>Save the current window with a Save As dialog. The file saved becomes the
new associated file for the window.</dd>
-<dt>Save Copy As...</dt>
+<dt>Save Copy As…</dt>
<dd>Save the current window to different file without changing the associated
file.</dd>
<dt>Print Window</dt>
</dl>
</div>
<div class="section" id="edit-menu-shell-and-editor">
-<h3>25.5.1.2. Edit menu (Shell and Editor)<a class="headerlink" href="#edit-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.1.2. Edit menu (Shell and Editor)<a class="headerlink" href="#edit-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
<dl class="docutils">
<dt>Undo</dt>
<dd>Undo the last change to the current window. A maximum of 1000 changes may
<dl class="docutils">
<dt>Select All</dt>
<dd>Select the entire contents of the current window.</dd>
-<dt>Find...</dt>
+<dt>Find…</dt>
<dd>Open a search dialog with many options</dd>
<dt>Find Again</dt>
<dd>Repeat the last search, if there is one.</dd>
<dt>Find Selection</dt>
<dd>Search for the currently selected string, if there is one.</dd>
-<dt>Find in Files...</dt>
+<dt>Find in Files…</dt>
<dd>Open a file search dialog. Put results in a new output window.</dd>
-<dt>Replace...</dt>
+<dt>Replace…</dt>
<dd>Open a search-and-replace dialog.</dd>
<dt>Go to Line</dt>
<dd>Move cursor to the line number requested and make that line visible.</dd>
</dl>
</div>
<div class="section" id="format-menu-editor-window-only">
-<h3>25.5.1.3. Format menu (Editor window only)<a class="headerlink" href="#format-menu-editor-window-only" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.1.3. Format menu (Editor window only)<a class="headerlink" href="#format-menu-editor-window-only" title="Permalink to this headline">¶</a></h3>
<dl class="docutils">
<dt>Indent Region</dt>
<dd>Shift selected lines right by the indent width (default 4 spaces).</dd>
</dl>
</div>
<div class="section" id="run-menu-editor-window-only">
-<span id="index-2"></span><h3>25.5.1.4. Run menu (Editor window only)<a class="headerlink" href="#run-menu-editor-window-only" title="Permalink to this headline">¶</a></h3>
+<span id="index-2"></span><h3>26.5.1.4. Run menu (Editor window only)<a class="headerlink" href="#run-menu-editor-window-only" title="Permalink to this headline">¶</a></h3>
<dl class="docutils">
<dt>Python Shell</dt>
<dd>Open or wake up the Python Shell window.</dd>
<dt>Run Module</dt>
<dd>Do Check Module (above). If no error, restart the shell to clean the
environment, then execute the module. Output is displayed in the Shell
-window. Note that output requires use of <code class="docutils literal"><span class="pre">print</span></code> or <code class="docutils literal"><span class="pre">write</span></code>.
+window. Note that output requires use of <code class="docutils literal notranslate"><span class="pre">print</span></code> or <code class="docutils literal notranslate"><span class="pre">write</span></code>.
When execution is complete, the Shell retains focus and displays a prompt.
At this point, one may interactively explore the result of execution.
-This is similar to executing a file with <code class="docutils literal"><span class="pre">python</span> <span class="pre">-i</span> <span class="pre">file</span></code> at a command
+This is similar to executing a file with <code class="docutils literal notranslate"><span class="pre">python</span> <span class="pre">-i</span> <span class="pre">file</span></code> at a command
line.</dd>
</dl>
</div>
<div class="section" id="shell-menu-shell-window-only">
-<h3>25.5.1.5. Shell menu (Shell window only)<a class="headerlink" href="#shell-menu-shell-window-only" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.1.5. Shell menu (Shell window only)<a class="headerlink" href="#shell-menu-shell-window-only" title="Permalink to this headline">¶</a></h3>
<dl class="docutils">
<dt>View Last Restart</dt>
<dd>Scroll the shell window to the last Shell restart.</dd>
</dl>
</div>
<div class="section" id="debug-menu-shell-window-only">
-<h3>25.5.1.6. Debug menu (Shell window only)<a class="headerlink" href="#debug-menu-shell-window-only" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.1.6. Debug menu (Shell window only)<a class="headerlink" href="#debug-menu-shell-window-only" title="Permalink to this headline">¶</a></h3>
<dl class="docutils">
<dt>Go to File/Line</dt>
<dd>Look on the current line. with the cursor, and the line above for a filename
</dl>
</div>
<div class="section" id="options-menu-shell-and-editor">
-<h3>25.5.1.7. Options menu (Shell and Editor)<a class="headerlink" href="#options-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.1.7. Options menu (Shell and Editor)<a class="headerlink" href="#options-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
<dl class="docutils">
<dt>Configure IDLE</dt>
<dd><p class="first">Open a configuration dialog and change preferences for the following:
open the configuration dialog by selecting Preferences in the application
menu. To use a new built-in color theme (IDLE Dark) with older IDLEs,
save it as a new custom theme.</p>
-<p class="last">Non-default user settings are saved in a .idlerc directory in the user’s
+<p class="last">Non-default user settings are saved in a .idlerc directory in the user’s
home directory. Problems caused by bad user configuration files are solved
by editing or deleting one or more of the files in .idlerc.</p>
</dd>
<dt>Code Context (toggle)(Editor Window only)</dt>
<dd>Open a pane at the top of the edit window which shows the block context
-of the code which has scrolled above the top of the window.</dd>
+of the code which has scrolled above the top of the window. Clicking a
+line in this pane exposes that line at the top of the editor.</dd>
</dl>
</div>
<div class="section" id="window-menu-shell-and-editor">
-<h3>25.5.1.8. Window menu (Shell and Editor)<a class="headerlink" href="#window-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.1.8. Window menu (Shell and Editor)<a class="headerlink" href="#window-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
<dl class="docutils">
<dt>Zoom Height</dt>
<dd>Toggles the window between normal size and maximum height. The initial size
it to the foreground (deiconifying it if necessary).</p>
</div>
<div class="section" id="help-menu-shell-and-editor">
-<h3>25.5.1.9. Help menu (Shell and Editor)<a class="headerlink" href="#help-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.1.9. Help menu (Shell and Editor)<a class="headerlink" href="#help-menu-shell-and-editor" title="Permalink to this headline">¶</a></h3>
<dl class="docutils">
<dt>About IDLE</dt>
<dd>Display version, copyright, license, credits, and more.</dd>
the General tab.</p>
</div>
<div class="section" id="context-menus">
-<span id="index-4"></span><h3>25.5.1.10. Context Menus<a class="headerlink" href="#context-menus" title="Permalink to this headline">¶</a></h3>
+<span id="index-4"></span><h3>26.5.1.10. Context Menus<a class="headerlink" href="#context-menus" title="Permalink to this headline">¶</a></h3>
<p>Open a context menu by right-clicking in a window (Control-click on OS X).
Context menus have the standard clipboard functions also on the Edit menu.</p>
<dl class="docutils">
</dl>
<p>Editor windows also have breakpoint functions. Lines with a breakpoint set are
specially marked. Breakpoints only have an effect when running under the
-debugger. Breakpoints for a file are saved in the user’s .idlerc directory.</p>
+debugger. Breakpoints for a file are saved in the user’s .idlerc directory.</p>
<dl class="docutils">
<dt>Set Breakpoint</dt>
<dd>Set a breakpoint on the current line.</dd>
</div>
</div>
<div class="section" id="editing-and-navigation">
-<h2>25.5.2. Editing and navigation<a class="headerlink" href="#editing-and-navigation" title="Permalink to this headline">¶</a></h2>
-<p>In this section, ‘C’ refers to the <code class="kbd docutils literal"><span class="pre">Control</span></code> key on Windows and Unix and
-the <code class="kbd docutils literal"><span class="pre">Command</span></code> key on Mac OSX.</p>
+<h2>26.5.2. Editing and navigation<a class="headerlink" href="#editing-and-navigation" title="Permalink to this headline">¶</a></h2>
+<p>In this section, ‘C’ refers to the <kbd class="kbd docutils literal notranslate">Control</kbd> key on Windows and Unix and
+the <kbd class="kbd docutils literal notranslate">Command</kbd> key on Mac OSX.</p>
<ul>
-<li><p class="first"><code class="kbd docutils literal"><span class="pre">Backspace</span></code> deletes to the left; <code class="kbd docutils literal"><span class="pre">Del</span></code> deletes to the right</p>
+<li><p class="first"><kbd class="kbd docutils literal notranslate">Backspace</kbd> deletes to the left; <kbd class="kbd docutils literal notranslate">Del</kbd> deletes to the right</p>
</li>
-<li><p class="first"><code class="kbd docutils literal"><span class="pre">C-Backspace</span></code> delete word left; <code class="kbd docutils literal"><span class="pre">C-Del</span></code> delete word to the right</p>
+<li><p class="first"><kbd class="kbd docutils literal notranslate">C-Backspace</kbd> delete word left; <kbd class="kbd docutils literal notranslate">C-Del</kbd> delete word to the right</p>
</li>
-<li><p class="first">Arrow keys and <code class="kbd docutils literal"><span class="pre">Page</span> <span class="pre">Up</span></code>/<code class="kbd docutils literal"><span class="pre">Page</span> <span class="pre">Down</span></code> to move around</p>
+<li><p class="first">Arrow keys and <kbd class="kbd docutils literal notranslate">Page Up</kbd>/<kbd class="kbd docutils literal notranslate">Page Down</kbd> to move around</p>
</li>
-<li><p class="first"><code class="kbd docutils literal"><span class="pre">C-LeftArrow</span></code> and <code class="kbd docutils literal"><span class="pre">C-RightArrow</span></code> moves by words</p>
+<li><p class="first"><kbd class="kbd docutils literal notranslate">C-LeftArrow</kbd> and <kbd class="kbd docutils literal notranslate">C-RightArrow</kbd> moves by words</p>
</li>
-<li><p class="first"><code class="kbd docutils literal"><span class="pre">Home</span></code>/<code class="kbd docutils literal"><span class="pre">End</span></code> go to begin/end of line</p>
+<li><p class="first"><kbd class="kbd docutils literal notranslate">Home</kbd>/<kbd class="kbd docutils literal notranslate">End</kbd> go to begin/end of line</p>
</li>
-<li><p class="first"><code class="kbd docutils literal"><span class="pre">C-Home</span></code>/<code class="kbd docutils literal"><span class="pre">C-End</span></code> go to begin/end of file</p>
+<li><p class="first"><kbd class="kbd docutils literal notranslate">C-Home</kbd>/<kbd class="kbd docutils literal notranslate">C-End</kbd> go to begin/end of file</p>
</li>
<li><p class="first">Some useful Emacs bindings are inherited from Tcl/Tk:</p>
<blockquote>
<div><ul class="simple">
-<li><code class="kbd docutils literal"><span class="pre">C-a</span></code> beginning of line</li>
-<li><code class="kbd docutils literal"><span class="pre">C-e</span></code> end of line</li>
-<li><code class="kbd docutils literal"><span class="pre">C-k</span></code> kill line (but doesn’t put it in clipboard)</li>
-<li><code class="kbd docutils literal"><span class="pre">C-l</span></code> center window around the insertion point</li>
-<li><code class="kbd docutils literal"><span class="pre">C-b</span></code> go backward one character without deleting (usually you can
+<li><kbd class="kbd docutils literal notranslate">C-a</kbd> beginning of line</li>
+<li><kbd class="kbd docutils literal notranslate">C-e</kbd> end of line</li>
+<li><kbd class="kbd docutils literal notranslate">C-k</kbd> kill line (but doesn’t put it in clipboard)</li>
+<li><kbd class="kbd docutils literal notranslate">C-l</kbd> center window around the insertion point</li>
+<li><kbd class="kbd docutils literal notranslate">C-b</kbd> go backward one character without deleting (usually you can
also use the cursor key for this)</li>
-<li><code class="kbd docutils literal"><span class="pre">C-f</span></code> go forward one character without deleting (usually you can
+<li><kbd class="kbd docutils literal notranslate">C-f</kbd> go forward one character without deleting (usually you can
also use the cursor key for this)</li>
-<li><code class="kbd docutils literal"><span class="pre">C-p</span></code> go up one line (usually you can also use the cursor key for
+<li><kbd class="kbd docutils literal notranslate">C-p</kbd> go up one line (usually you can also use the cursor key for
this)</li>
-<li><code class="kbd docutils literal"><span class="pre">C-d</span></code> delete next character</li>
+<li><kbd class="kbd docutils literal notranslate">C-d</kbd> delete next character</li>
</ul>
</div></blockquote>
</li>
</ul>
-<p>Standard keybindings (like <code class="kbd docutils literal"><span class="pre">C-c</span></code> to copy and <code class="kbd docutils literal"><span class="pre">C-v</span></code> to paste)
+<p>Standard keybindings (like <kbd class="kbd docutils literal notranslate">C-c</kbd> to copy and <kbd class="kbd docutils literal notranslate">C-v</kbd> to paste)
may work. Keybindings are selected in the Configure IDLE dialog.</p>
<div class="section" id="automatic-indentation">
-<h3>25.5.2.1. Automatic indentation<a class="headerlink" href="#automatic-indentation" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.2.1. Automatic indentation<a class="headerlink" href="#automatic-indentation" title="Permalink to this headline">¶</a></h3>
<p>After a block-opening statement, the next line is indented by 4 spaces (in the
Python Shell window by one tab). After certain keywords (break, return etc.)
-the next line is dedented. In leading indentation, <code class="kbd docutils literal"><span class="pre">Backspace</span></code> deletes up
-to 4 spaces if they are there. <code class="kbd docutils literal"><span class="pre">Tab</span></code> inserts spaces (in the Python
+the next line is dedented. In leading indentation, <kbd class="kbd docutils literal notranslate">Backspace</kbd> deletes up
+to 4 spaces if they are there. <kbd class="kbd docutils literal notranslate">Tab</kbd> inserts spaces (in the Python
Shell window one tab), number depends on Indent width. Currently, tabs
are restricted to four spaces due to Tcl/Tk limitations.</p>
<p>See also the indent/dedent region commands in the edit menu.</p>
</div>
<div class="section" id="completions">
-<h3>25.5.2.2. Completions<a class="headerlink" href="#completions" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.2.2. Completions<a class="headerlink" href="#completions" title="Permalink to this headline">¶</a></h3>
<p>Completions are supplied for functions, classes, and attributes of classes,
both built-in and user-defined. Completions are also provided for
filenames.</p>
<p>The AutoCompleteWindow (ACW) will open after a predefined delay (default is
-two seconds) after a ‘.’ or (in a string) an os.sep is typed. If after one
+two seconds) after a ‘.’ or (in a string) an os.sep is typed. If after one
of those characters (plus zero or more other characters) a tab is typed
the ACW will open immediately if a possible continuation is found.</p>
<p>If there is only one possible completion for the characters entered, a
-<code class="kbd docutils literal"><span class="pre">Tab</span></code> will supply that completion without opening the ACW.</p>
-<p>‘Show Completions’ will force open a completions window, by default the
-<code class="kbd docutils literal"><span class="pre">C-space</span></code> will open a completions window. In an empty
+<kbd class="kbd docutils literal notranslate">Tab</kbd> will supply that completion without opening the ACW.</p>
+<p>‘Show Completions’ will force open a completions window, by default the
+<kbd class="kbd docutils literal notranslate">C-space</kbd> will open a completions window. In an empty
string, this will contain the files in the current directory. On a
blank line, it will contain the built-in and user-defined functions and
classes in the current namespaces, plus any modules imported. If some
characters have been entered, the ACW will attempt to be more specific.</p>
<p>If a string of characters is typed, the ACW selection will jump to the
-entry most closely matching those characters. Entering a <code class="kbd docutils literal"><span class="pre">tab</span></code> will
+entry most closely matching those characters. Entering a <kbd class="kbd docutils literal notranslate">tab</kbd> will
cause the longest non-ambiguous match to be entered in the Editor window or
-Shell. Two <code class="kbd docutils literal"><span class="pre">tab</span></code> in a row will supply the current ACW selection, as
+Shell. Two <kbd class="kbd docutils literal notranslate">tab</kbd> in a row will supply the current ACW selection, as
will return or a double click. Cursor keys, Page Up/Down, mouse selection,
and the scroll wheel all operate on the ACW.</p>
-<p>“Hidden” attributes can be accessed by typing the beginning of hidden
-name after a ‘.’, e.g. ‘_’. This allows access to modules with
-<code class="docutils literal"><span class="pre">__all__</span></code> set, or to class-private attributes.</p>
-<p>Completions and the ‘Expand Word’ facility can save a lot of typing!</p>
+<p>“Hidden” attributes can be accessed by typing the beginning of hidden
+name after a ‘.’, e.g. ‘_’. This allows access to modules with
+<code class="docutils literal notranslate"><span class="pre">__all__</span></code> set, or to class-private attributes.</p>
+<p>Completions and the ‘Expand Word’ facility can save a lot of typing!</p>
<p>Completions are currently limited to those in the namespaces. Names in
-an Editor window which are not via <code class="docutils literal"><span class="pre">__main__</span></code> and <a class="reference internal" href="sys.html#sys.modules" title="sys.modules"><code class="xref py py-data docutils literal"><span class="pre">sys.modules</span></code></a> will
+an Editor window which are not via <code class="docutils literal notranslate"><span class="pre">__main__</span></code> and <a class="reference internal" href="sys.html#sys.modules" title="sys.modules"><code class="xref py py-data docutils literal notranslate"><span class="pre">sys.modules</span></code></a> will
not be found. Run the module once with your imports to correct this situation.
Note that IDLE itself places quite a few modules in sys.modules, so
much can be found by default, e.g. the re module.</p>
-<p>If you don’t like the ACW popping up unbidden, simply make the delay
+<p>If you don’t like the ACW popping up unbidden, simply make the delay
longer or disable the extension.</p>
</div>
<div class="section" id="calltips">
-<h3>25.5.2.3. Calltips<a class="headerlink" href="#calltips" title="Permalink to this headline">¶</a></h3>
-<p>A calltip is shown when one types <code class="kbd docutils literal"><span class="pre">(</span></code> after the name of an <em>accessible</em>
+<h3>26.5.2.3. Calltips<a class="headerlink" href="#calltips" title="Permalink to this headline">¶</a></h3>
+<p>A calltip is shown when one types <kbd class="kbd docutils literal notranslate">(</kbd> after the name of an <em>accessible</em>
function. A name expression may include dots and subscripts. A calltip
remains until it is clicked, the cursor is moved out of the argument area,
-or <code class="kbd docutils literal"><span class="pre">)</span></code> is typed. When the cursor is in the argument part of a definition,
+or <kbd class="kbd docutils literal notranslate">)</kbd> is typed. When the cursor is in the argument part of a definition,
the menu or shortcut display a calltip.</p>
<p>A calltip consists of the function signature and the first line of the
docstring. For builtins without an accessible signature, the calltip
<p>The set of <em>accessible</em> functions depends on what modules have been imported
into the user process, including those imported by Idle itself,
and what definitions have been run, all since the last restart.</p>
-<p>For example, restart the Shell and enter <code class="docutils literal"><span class="pre">itertools.count(</span></code>. A calltip
+<p>For example, restart the Shell and enter <code class="docutils literal notranslate"><span class="pre">itertools.count(</span></code>. A calltip
appears because Idle imports itertools into the user process for its own use.
-(This could change.) Enter <code class="docutils literal"><span class="pre">turtle.write(</span></code> and nothing appears. Idle does
+(This could change.) Enter <code class="docutils literal notranslate"><span class="pre">turtle.write(</span></code> and nothing appears. Idle does
not import turtle. The menu or shortcut do nothing either. Enter
-<code class="docutils literal"><span class="pre">import</span> <span class="pre">turtle</span></code> and then <code class="docutils literal"><span class="pre">turtle.write(</span></code> will work.</p>
+<code class="docutils literal notranslate"><span class="pre">import</span> <span class="pre">turtle</span></code> and then <code class="docutils literal notranslate"><span class="pre">turtle.write(</span></code> will work.</p>
<p>In an editor, import statements have no effect until one runs the file. One
might want to run a file after writing the import statements at the top,
or immediately run an existing file before editing.</p>
</div>
<div class="section" id="python-shell-window">
-<h3>25.5.2.4. Python Shell window<a class="headerlink" href="#python-shell-window" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.2.4. Python Shell window<a class="headerlink" href="#python-shell-window" title="Permalink to this headline">¶</a></h3>
<ul>
-<li><p class="first"><code class="kbd docutils literal"><span class="pre">C-c</span></code> interrupts executing command</p>
+<li><p class="first"><kbd class="kbd docutils literal notranslate">C-c</kbd> interrupts executing command</p>
</li>
-<li><p class="first"><code class="kbd docutils literal"><span class="pre">C-d</span></code> sends end-of-file; closes window if typed at a <code class="docutils literal"><span class="pre">>>></span></code> prompt</p>
+<li><p class="first"><kbd class="kbd docutils literal notranslate">C-d</kbd> sends end-of-file; closes window if typed at a <code class="docutils literal notranslate"><span class="pre">>>></span></code> prompt</p>
</li>
-<li><p class="first"><code class="kbd docutils literal"><span class="pre">Alt-/</span></code> (Expand word) is also useful to reduce typing</p>
+<li><p class="first"><kbd class="kbd docutils literal notranslate">Alt-/</kbd> (Expand word) is also useful to reduce typing</p>
<p>Command history</p>
<ul class="simple">
-<li><code class="kbd docutils literal"><span class="pre">Alt-p</span></code> retrieves previous command matching what you have typed. On
-OS X use <code class="kbd docutils literal"><span class="pre">C-p</span></code>.</li>
-<li><code class="kbd docutils literal"><span class="pre">Alt-n</span></code> retrieves next. On OS X use <code class="kbd docutils literal"><span class="pre">C-n</span></code>.</li>
-<li><code class="kbd docutils literal"><span class="pre">Return</span></code> while on any previous command retrieves that command</li>
+<li><kbd class="kbd docutils literal notranslate">Alt-p</kbd> retrieves previous command matching what you have typed. On
+OS X use <kbd class="kbd docutils literal notranslate">C-p</kbd>.</li>
+<li><kbd class="kbd docutils literal notranslate">Alt-n</kbd> retrieves next. On OS X use <kbd class="kbd docutils literal notranslate">C-n</kbd>.</li>
+<li><kbd class="kbd docutils literal notranslate">Return</kbd> while on any previous command retrieves that command</li>
</ul>
</li>
</ul>
</div>
<div class="section" id="text-colors">
-<h3>25.5.2.5. Text colors<a class="headerlink" href="#text-colors" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.2.5. Text colors<a class="headerlink" href="#text-colors" title="Permalink to this headline">¶</a></h3>
<p>Idle defaults to black on white text, but colors text with special meanings.
For the shell, these are shell output, shell error, user output, and
user error. For Python code, at the shell prompt or in an editor, these are
-keywords, builtin class and function names, names following <code class="docutils literal"><span class="pre">class</span></code> and
-<code class="docutils literal"><span class="pre">def</span></code>, strings, and comments. For any text window, these are the cursor (when
+keywords, builtin class and function names, names following <code class="docutils literal notranslate"><span class="pre">class</span></code> and
+<code class="docutils literal notranslate"><span class="pre">def</span></code>, strings, and comments. For any text window, these are the cursor (when
present), found text (when possible), and selected text.</p>
<p>Text coloring is done in the background, so uncolorized text is occasionally
visible. To change the color scheme, use the Configure IDLE dialog
</div>
</div>
<div class="section" id="startup-and-code-execution">
-<h2>25.5.3. Startup and code execution<a class="headerlink" href="#startup-and-code-execution" title="Permalink to this headline">¶</a></h2>
-<p>Upon startup with the <code class="docutils literal"><span class="pre">-s</span></code> option, IDLE will execute the file referenced by
-the environment variables <span class="target" id="index-5"></span><code class="xref std std-envvar docutils literal"><span class="pre">IDLESTARTUP</span></code> or <span class="target" id="index-6"></span><a class="reference internal" href="../using/cmdline.html#envvar-PYTHONSTARTUP"><code class="xref std std-envvar docutils literal"><span class="pre">PYTHONSTARTUP</span></code></a>.
-IDLE first checks for <code class="docutils literal"><span class="pre">IDLESTARTUP</span></code>; if <code class="docutils literal"><span class="pre">IDLESTARTUP</span></code> is present the file
-referenced is run. If <code class="docutils literal"><span class="pre">IDLESTARTUP</span></code> is not present, IDLE checks for
-<code class="docutils literal"><span class="pre">PYTHONSTARTUP</span></code>. Files referenced by these environment variables are
+<h2>26.5.3. Startup and code execution<a class="headerlink" href="#startup-and-code-execution" title="Permalink to this headline">¶</a></h2>
+<p>Upon startup with the <code class="docutils literal notranslate"><span class="pre">-s</span></code> option, IDLE will execute the file referenced by
+the environment variables <span class="target" id="index-5"></span><code class="xref std std-envvar docutils literal notranslate"><span class="pre">IDLESTARTUP</span></code> or <span class="target" id="index-6"></span><a class="reference internal" href="../using/cmdline.html#envvar-PYTHONSTARTUP"><code class="xref std std-envvar docutils literal notranslate"><span class="pre">PYTHONSTARTUP</span></code></a>.
+IDLE first checks for <code class="docutils literal notranslate"><span class="pre">IDLESTARTUP</span></code>; if <code class="docutils literal notranslate"><span class="pre">IDLESTARTUP</span></code> is present the file
+referenced is run. If <code class="docutils literal notranslate"><span class="pre">IDLESTARTUP</span></code> is not present, IDLE checks for
+<code class="docutils literal notranslate"><span class="pre">PYTHONSTARTUP</span></code>. Files referenced by these environment variables are
convenient places to store functions that are used frequently from the IDLE
shell, or for executing import statements to import common modules.</p>
-<p>In addition, <code class="docutils literal"><span class="pre">Tk</span></code> also loads a startup file if it is present. Note that the
-Tk file is loaded unconditionally. This additional file is <code class="docutils literal"><span class="pre">.Idle.py</span></code> and is
-looked for in the user’s home directory. Statements in this file will be
+<p>In addition, <code class="docutils literal notranslate"><span class="pre">Tk</span></code> also loads a startup file if it is present. Note that the
+Tk file is loaded unconditionally. This additional file is <code class="docutils literal notranslate"><span class="pre">.Idle.py</span></code> and is
+looked for in the user’s home directory. Statements in this file will be
executed in the Tk namespace, so this file is not useful for importing
-functions to be used from IDLE’s Python shell.</p>
+functions to be used from IDLE’s Python shell.</p>
<div class="section" id="command-line-usage">
-<h3>25.5.3.1. Command line usage<a class="headerlink" href="#command-line-usage" title="Permalink to this headline">¶</a></h3>
-<div class="highlight-none"><div class="highlight"><pre><span></span>idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
+<h3>26.5.3.1. Command line usage<a class="headerlink" href="#command-line-usage" title="Permalink to this headline">¶</a></h3>
+<div class="highlight-none notranslate"><div class="highlight"><pre><span></span>idle.py [-c command] [-d] [-e] [-h] [-i] [-r file] [-s] [-t title] [-] [arg] ...
-c command run command in the shell window
-d enable debugger and open shell window
</div>
<p>If there are arguments:</p>
<ul class="simple">
-<li>If <code class="docutils literal"><span class="pre">-</span></code>, <code class="docutils literal"><span class="pre">-c</span></code>, or <code class="docutils literal"><span class="pre">r</span></code> is used, all arguments are placed in
-<code class="docutils literal"><span class="pre">sys.argv[1:...]</span></code> and <code class="docutils literal"><span class="pre">sys.argv[0]</span></code> is set to <code class="docutils literal"><span class="pre">''</span></code>, <code class="docutils literal"><span class="pre">'-c'</span></code>,
-or <code class="docutils literal"><span class="pre">'-r'</span></code>. No editor window is opened, even if that is the default
+<li>If <code class="docutils literal notranslate"><span class="pre">-</span></code>, <code class="docutils literal notranslate"><span class="pre">-c</span></code>, or <code class="docutils literal notranslate"><span class="pre">r</span></code> is used, all arguments are placed in
+<code class="docutils literal notranslate"><span class="pre">sys.argv[1:...]</span></code> and <code class="docutils literal notranslate"><span class="pre">sys.argv[0]</span></code> is set to <code class="docutils literal notranslate"><span class="pre">''</span></code>, <code class="docutils literal notranslate"><span class="pre">'-c'</span></code>,
+or <code class="docutils literal notranslate"><span class="pre">'-r'</span></code>. No editor window is opened, even if that is the default
set in the Options dialog.</li>
<li>Otherwise, arguments are files opened for editing and
-<code class="docutils literal"><span class="pre">sys.argv</span></code> reflects the arguments passed to IDLE itself.</li>
+<code class="docutils literal notranslate"><span class="pre">sys.argv</span></code> reflects the arguments passed to IDLE itself.</li>
</ul>
</div>
<div class="section" id="startup-failure">
-<h3>25.5.3.2. Startup failure<a class="headerlink" href="#startup-failure" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.3.2. Startup failure<a class="headerlink" href="#startup-failure" title="Permalink to this headline">¶</a></h3>
<p>IDLE uses a socket to communicate between the IDLE GUI process and the user
code execution process. A connection must be established whenever the Shell
starts or restarts. (The latter is indicated by a divider line that says
-‘RESTART’). If the user process fails to connect to the GUI process, it
-displays a <code class="docutils literal"><span class="pre">Tk</span></code> error box with a ‘cannot connect’ message that directs the
+‘RESTART’). If the user process fails to connect to the GUI process, it
+displays a <code class="docutils literal notranslate"><span class="pre">Tk</span></code> error box with a ‘cannot connect’ message that directs the
user here. It then exits.</p>
<p>A common cause of failure is a user-written file with the same name as a
standard library module, such as <em>random.py</em> and <em>tkinter.py</em>. When such a
crash or Keyboard Interrupt (control-C) may fail to connect. Dismissing
the error box or Restart Shell on the Shell menu may fix a temporary problem.</p>
<p>When IDLE first starts, it attempts to read user configuration files in
-~/.idlerc/ (~ is one’s home directory). If there is a problem, an error
+~/.idlerc/ (~ is one’s home directory). If there is a problem, an error
message should be displayed. Leaving aside random disk glitches, this can
be prevented by never editing the files by hand, using the configuration
dialog, under Options, instead Options. Once it happens, the solution may
be to delete one or more of the configuration files.</p>
<p>If IDLE quits with no message, and it was not started from a console, try
-starting from a console (<code class="docutils literal"><span class="pre">python</span> <span class="pre">-m</span> <span class="pre">idlelib)</span></code> and see if a message appears.</p>
+starting from a console (<code class="docutils literal notranslate"><span class="pre">python</span> <span class="pre">-m</span> <span class="pre">idlelib)</span></code> and see if a message appears.</p>
</div>
<div class="section" id="idle-console-differences">
-<h3>25.5.3.3. IDLE-console differences<a class="headerlink" href="#idle-console-differences" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.3.3. IDLE-console differences<a class="headerlink" href="#idle-console-differences" title="Permalink to this headline">¶</a></h3>
<p>With rare exceptions, the result of executing Python code with IDLE is
intended to be the same as executing the same code in a console window.
However, the different interface and operation occasionally affect
-visible results. For instance, <code class="docutils literal"><span class="pre">sys.modules</span></code> starts with more entries.</p>
-<p>IDLE also replaces <code class="docutils literal"><span class="pre">sys.stdin</span></code>, <code class="docutils literal"><span class="pre">sys.stdout</span></code>, and <code class="docutils literal"><span class="pre">sys.stderr</span></code> with
+visible results. For instance, <code class="docutils literal notranslate"><span class="pre">sys.modules</span></code> starts with more entries.</p>
+<p>IDLE also replaces <code class="docutils literal notranslate"><span class="pre">sys.stdin</span></code>, <code class="docutils literal notranslate"><span class="pre">sys.stdout</span></code>, and <code class="docutils literal notranslate"><span class="pre">sys.stderr</span></code> with
objects that get input from and send output to the Shell window.
When Shell has the focus, it controls the keyboard and screen. This is
normally transparent, but functions that directly access the keyboard
-and screen will not work. If <code class="docutils literal"><span class="pre">sys</span></code> is reset with <code class="docutils literal"><span class="pre">importlib.reload(sys)</span></code>,
-IDLE’s changes are lost and things like <code class="docutils literal"><span class="pre">input</span></code>, <code class="docutils literal"><span class="pre">raw_input</span></code>, and
-<code class="docutils literal"><span class="pre">print</span></code> will not work correctly.</p>
-<p>With IDLE’s Shell, one enters, edits, and recalls complete statements.
+and screen will not work. If <code class="docutils literal notranslate"><span class="pre">sys</span></code> is reset with <code class="docutils literal notranslate"><span class="pre">importlib.reload(sys)</span></code>,
+IDLE’s changes are lost and things like <code class="docutils literal notranslate"><span class="pre">input</span></code>, <code class="docutils literal notranslate"><span class="pre">raw_input</span></code>, and
+<code class="docutils literal notranslate"><span class="pre">print</span></code> will not work correctly.</p>
+<p>With IDLE’s Shell, one enters, edits, and recalls complete statements.
Some consoles only work with a single physical line at a time. IDLE uses
-<code class="docutils literal"><span class="pre">exec</span></code> to run each statement. As a result, <code class="docutils literal"><span class="pre">'__builtins__'</span></code> is always
+<code class="docutils literal notranslate"><span class="pre">exec</span></code> to run each statement. As a result, <code class="docutils literal notranslate"><span class="pre">'__builtins__'</span></code> is always
defined for each statement.</p>
</div>
<div class="section" id="developing-tkinter-applications">
-<h3>25.5.3.4. Developing tkinter applications<a class="headerlink" href="#developing-tkinter-applications" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.3.4. Developing tkinter applications<a class="headerlink" href="#developing-tkinter-applications" title="Permalink to this headline">¶</a></h3>
<p>IDLE is intentionally different from standard Python in order to
-facilitate development of tkinter programs. Enter <code class="docutils literal"><span class="pre">import</span> <span class="pre">tkinter</span> <span class="pre">as</span> <span class="pre">tk;</span>
+facilitate development of tkinter programs. Enter <code class="docutils literal notranslate"><span class="pre">import</span> <span class="pre">tkinter</span> <span class="pre">as</span> <span class="pre">tk;</span>
<span class="pre">root</span> <span class="pre">=</span> <span class="pre">tk.Tk()</span></code> in standard Python and nothing appears. Enter the same
in IDLE and a tk window appears. In standard Python, one must also enter
-<code class="docutils literal"><span class="pre">root.update()</span></code> to see the window. IDLE does the equivalent in the
+<code class="docutils literal notranslate"><span class="pre">root.update()</span></code> to see the window. IDLE does the equivalent in the
background, about 20 times a second, which is about every 50 milleseconds.
-Next enter <code class="docutils literal"><span class="pre">b</span> <span class="pre">=</span> <span class="pre">tk.Button(root,</span> <span class="pre">text='button');</span> <span class="pre">b.pack()</span></code>. Again,
-nothing visibly changes in standard Python until one enters <code class="docutils literal"><span class="pre">root.update()</span></code>.</p>
-<p>Most tkinter programs run <code class="docutils literal"><span class="pre">root.mainloop()</span></code>, which usually does not
+Next enter <code class="docutils literal notranslate"><span class="pre">b</span> <span class="pre">=</span> <span class="pre">tk.Button(root,</span> <span class="pre">text='button');</span> <span class="pre">b.pack()</span></code>. Again,
+nothing visibly changes in standard Python until one enters <code class="docutils literal notranslate"><span class="pre">root.update()</span></code>.</p>
+<p>Most tkinter programs run <code class="docutils literal notranslate"><span class="pre">root.mainloop()</span></code>, which usually does not
return until the tk app is destroyed. If the program is run with
-<code class="docutils literal"><span class="pre">python</span> <span class="pre">-i</span></code> or from an IDLE editor, a <code class="docutils literal"><span class="pre">>>></span></code> shell prompt does not
-appear until <code class="docutils literal"><span class="pre">mainloop()</span></code> returns, at which time there is nothing left
+<code class="docutils literal notranslate"><span class="pre">python</span> <span class="pre">-i</span></code> or from an IDLE editor, a <code class="docutils literal notranslate"><span class="pre">>>></span></code> shell prompt does not
+appear until <code class="docutils literal notranslate"><span class="pre">mainloop()</span></code> returns, at which time there is nothing left
to interact with.</p>
<p>When running a tkinter program from an IDLE editor, one can comment out
the mainloop call. One then gets a shell prompt immediately and can
re-enable the mainloop call when running in standard Python.</p>
</div>
<div class="section" id="running-without-a-subprocess">
-<h3>25.5.3.5. Running without a subprocess<a class="headerlink" href="#running-without-a-subprocess" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.3.5. Running without a subprocess<a class="headerlink" href="#running-without-a-subprocess" title="Permalink to this headline">¶</a></h3>
<p>By default, IDLE executes user code in a separate subprocess via a socket,
which uses the internal loopback interface. This connection is not
externally visible and no data is sent to or received from the Internet.
</div>
</div>
<div class="section" id="help-and-preferences">
-<h2>25.5.4. Help and preferences<a class="headerlink" href="#help-and-preferences" title="Permalink to this headline">¶</a></h2>
+<h2>26.5.4. Help and preferences<a class="headerlink" href="#help-and-preferences" title="Permalink to this headline">¶</a></h2>
<div class="section" id="additional-help-sources">
-<h3>25.5.4.1. Additional help sources<a class="headerlink" href="#additional-help-sources" title="Permalink to this headline">¶</a></h3>
-<p>IDLE includes a help menu entry called “Python Docs” that will open the
+<h3>26.5.4.1. Additional help sources<a class="headerlink" href="#additional-help-sources" title="Permalink to this headline">¶</a></h3>
+<p>IDLE includes a help menu entry called “Python Docs” that will open the
extensive sources of help, including tutorials, available at docs.python.org.
Selected URLs can be added or removed from the help menu at any time using the
Configure IDLE dialog. See the IDLE help option in the help menu of IDLE for
more information.</p>
</div>
<div class="section" id="setting-preferences">
-<h3>25.5.4.2. Setting preferences<a class="headerlink" href="#setting-preferences" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.4.2. Setting preferences<a class="headerlink" href="#setting-preferences" title="Permalink to this headline">¶</a></h3>
<p>The font preferences, highlighting, keys, and general preferences can be
changed via Configure IDLE on the Option menu. Keys can be user defined;
IDLE ships with four built-in key sets. In addition, a user can create a
custom key set in the Configure IDLE dialog under the keys tab.</p>
</div>
<div class="section" id="extensions">
-<h3>25.5.4.3. Extensions<a class="headerlink" href="#extensions" title="Permalink to this headline">¶</a></h3>
+<h3>26.5.4.3. Extensions<a class="headerlink" href="#extensions" title="Permalink to this headline">¶</a></h3>
<p>IDLE contains an extension facility. Preferences for extensions can be
changed with the Extensions tab of the preferences dialog. See the
beginning of config-extensions.def in the idlelib directory for further
<div class="sphinxsidebarwrapper">
<h3><a href="../contents.html">Table Of Contents</a></h3>
<ul>
-<li><a class="reference internal" href="#">25.5. IDLE</a><ul>
-<li><a class="reference internal" href="#menus">25.5.1. Menus</a><ul>
-<li><a class="reference internal" href="#file-menu-shell-and-editor">25.5.1.1. File menu (Shell and Editor)</a></li>
-<li><a class="reference internal" href="#edit-menu-shell-and-editor">25.5.1.2. Edit menu (Shell and Editor)</a></li>
-<li><a class="reference internal" href="#format-menu-editor-window-only">25.5.1.3. Format menu (Editor window only)</a></li>
-<li><a class="reference internal" href="#run-menu-editor-window-only">25.5.1.4. Run menu (Editor window only)</a></li>
-<li><a class="reference internal" href="#shell-menu-shell-window-only">25.5.1.5. Shell menu (Shell window only)</a></li>
-<li><a class="reference internal" href="#debug-menu-shell-window-only">25.5.1.6. Debug menu (Shell window only)</a></li>
-<li><a class="reference internal" href="#options-menu-shell-and-editor">25.5.1.7. Options menu (Shell and Editor)</a></li>
-<li><a class="reference internal" href="#window-menu-shell-and-editor">25.5.1.8. Window menu (Shell and Editor)</a></li>
-<li><a class="reference internal" href="#help-menu-shell-and-editor">25.5.1.9. Help menu (Shell and Editor)</a></li>
-<li><a class="reference internal" href="#context-menus">25.5.1.10. Context Menus</a></li>
+<li><a class="reference internal" href="#">26.5. IDLE</a><ul>
+<li><a class="reference internal" href="#menus">26.5.1. Menus</a><ul>
+<li><a class="reference internal" href="#file-menu-shell-and-editor">26.5.1.1. File menu (Shell and Editor)</a></li>
+<li><a class="reference internal" href="#edit-menu-shell-and-editor">26.5.1.2. Edit menu (Shell and Editor)</a></li>
+<li><a class="reference internal" href="#format-menu-editor-window-only">26.5.1.3. Format menu (Editor window only)</a></li>
+<li><a class="reference internal" href="#run-menu-editor-window-only">26.5.1.4. Run menu (Editor window only)</a></li>
+<li><a class="reference internal" href="#shell-menu-shell-window-only">26.5.1.5. Shell menu (Shell window only)</a></li>
+<li><a class="reference internal" href="#debug-menu-shell-window-only">26.5.1.6. Debug menu (Shell window only)</a></li>
+<li><a class="reference internal" href="#options-menu-shell-and-editor">26.5.1.7. Options menu (Shell and Editor)</a></li>
+<li><a class="reference internal" href="#window-menu-shell-and-editor">26.5.1.8. Window menu (Shell and Editor)</a></li>
+<li><a class="reference internal" href="#help-menu-shell-and-editor">26.5.1.9. Help menu (Shell and Editor)</a></li>
+<li><a class="reference internal" href="#context-menus">26.5.1.10. Context Menus</a></li>
</ul>
</li>
-<li><a class="reference internal" href="#editing-and-navigation">25.5.2. Editing and navigation</a><ul>
-<li><a class="reference internal" href="#automatic-indentation">25.5.2.1. Automatic indentation</a></li>
-<li><a class="reference internal" href="#completions">25.5.2.2. Completions</a></li>
-<li><a class="reference internal" href="#calltips">25.5.2.3. Calltips</a></li>
-<li><a class="reference internal" href="#python-shell-window">25.5.2.4. Python Shell window</a></li>
-<li><a class="reference internal" href="#text-colors">25.5.2.5. Text colors</a></li>
+<li><a class="reference internal" href="#editing-and-navigation">26.5.2. Editing and navigation</a><ul>
+<li><a class="reference internal" href="#automatic-indentation">26.5.2.1. Automatic indentation</a></li>
+<li><a class="reference internal" href="#completions">26.5.2.2. Completions</a></li>
+<li><a class="reference internal" href="#calltips">26.5.2.3. Calltips</a></li>
+<li><a class="reference internal" href="#python-shell-window">26.5.2.4. Python Shell window</a></li>
+<li><a class="reference internal" href="#text-colors">26.5.2.5. Text colors</a></li>
</ul>
</li>
-<li><a class="reference internal" href="#startup-and-code-execution">25.5.3. Startup and code execution</a><ul>
-<li><a class="reference internal" href="#command-line-usage">25.5.3.1. Command line usage</a></li>
-<li><a class="reference internal" href="#startup-failure">25.5.3.2. Startup failure</a></li>
-<li><a class="reference internal" href="#idle-console-differences">25.5.3.3. IDLE-console differences</a></li>
-<li><a class="reference internal" href="#developing-tkinter-applications">25.5.3.4. Developing tkinter applications</a></li>
-<li><a class="reference internal" href="#running-without-a-subprocess">25.5.3.5. Running without a subprocess</a></li>
+<li><a class="reference internal" href="#startup-and-code-execution">26.5.3. Startup and code execution</a><ul>
+<li><a class="reference internal" href="#command-line-usage">26.5.3.1. Command line usage</a></li>
+<li><a class="reference internal" href="#startup-failure">26.5.3.2. Startup failure</a></li>
+<li><a class="reference internal" href="#idle-console-differences">26.5.3.3. IDLE-console differences</a></li>
+<li><a class="reference internal" href="#developing-tkinter-applications">26.5.3.4. Developing tkinter applications</a></li>
+<li><a class="reference internal" href="#running-without-a-subprocess">26.5.3.5. Running without a subprocess</a></li>
</ul>
</li>
-<li><a class="reference internal" href="#help-and-preferences">25.5.4. Help and preferences</a><ul>
-<li><a class="reference internal" href="#additional-help-sources">25.5.4.1. Additional help sources</a></li>
-<li><a class="reference internal" href="#setting-preferences">25.5.4.2. Setting preferences</a></li>
-<li><a class="reference internal" href="#extensions">25.5.4.3. Extensions</a></li>
+<li><a class="reference internal" href="#help-and-preferences">26.5.4. Help and preferences</a><ul>
+<li><a class="reference internal" href="#additional-help-sources">26.5.4.1. Additional help sources</a></li>
+<li><a class="reference internal" href="#setting-preferences">26.5.4.2. Setting preferences</a></li>
+<li><a class="reference internal" href="#extensions">26.5.4.3. Extensions</a></li>
</ul>
</li>
</ul>
<h4>Previous topic</h4>
<p class="topless"><a href="tkinter.scrolledtext.html"
- title="previous chapter">25.4. <code class="docutils literal"><span class="pre">tkinter.scrolledtext</span></code> — Scrolled Text Widget</a></p>
+ title="previous chapter">26.4. <code class="docutils literal notranslate"><span class="pre">tkinter.scrolledtext</span></code> — Scrolled Text Widget</a></p>
<h4>Next topic</h4>
<p class="topless"><a href="othergui.html"
- title="next chapter">25.6. Other Graphical User Interface Packages</a></p>
+ title="next chapter">26.6. Other Graphical User Interface Packages</a></p>
<div role="note" aria-label="source link">
<h3>This Page</h3>
<ul class="this-page-menu">
<li><a href="../bugs.html">Report a Bug</a></li>
<li>
- <a href="https://github.com/python/cpython/blob/master/Doc/library/idle.txt"
+ <a href="https://github.com/python/cpython/blob/master/Doc/library/idle.rst"
rel="nofollow">Show Source
</a>
</li>
<a href="../py-modindex.html" title="Python Module Index"
>modules</a> |</li>
<li class="right" >
- <a href="othergui.html" title="25.6. Other Graphical User Interface Packages"
+ <a href="othergui.html" title="26.6. Other Graphical User Interface Packages"
>next</a> |</li>
<li class="right" >
- <a href="tkinter.scrolledtext.html" title="25.4. tkinter.scrolledtext — Scrolled Text Widget"
+ <a href="tkinter.scrolledtext.html" title="26.4. tkinter.scrolledtext — Scrolled Text Widget"
>previous</a> |</li>
- <li><img src="../_static/py.png" alt=""
- style="vertical-align: middle; margin-top: -1px"/></li>
- <li><a href="https://www.python.org/">Python</a> »</li>
- <li>
- <a href="../index.html">3.7.0a0 Documentation</a> »
- </li>
-
- <li class="nav-item nav-item-1"><a href="index.html" >The Python Standard Library</a> »</li>
- <li class="nav-item nav-item-2"><a href="tk.html" >25. Graphical User Interfaces with Tk</a> »</li>
+
+ <li><img src="../_static/py.png" alt=""
+ style="vertical-align: middle; margin-top: -1px"/></li>
+ <li><a href="https://www.python.org/">Python</a> »</li>
+
+
+ <li>
+ <a href="../index.html">3.8.0a0 Documentation</a> »
+ </li>
+
+ <li class="nav-item nav-item-1"><a href="index.html" >The Python Standard Library</a> »</li>
+ <li class="nav-item nav-item-2"><a href="tk.html" >26. Graphical User Interfaces with Tk</a> »</li>
<li class="right">
</ul>
</div>
<div class="footer">
- © <a href="../copyright.html">Copyright</a> 2001-2017, Python Software Foundation.
+ © <a href="../copyright.html">Copyright</a> 2001-2018, Python Software Foundation.
<br />
+
The Python Software Foundation is a non-profit corporation.
- <a href="https://www.python.org/psf/donations/">Please donate.</a>
+<a href="https://www.python.org/psf/donations/">Please donate.</a>
+<br />
<br />
- Last updated on Sep 15, 2017.
- <a href="../bugs.html">Found a bug</a>?
+
+ Last updated on Jun 10, 2018.
+ <a href="https://docs.python.org/3/bugs.html">Found a bug</a>?
<br />
- Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.3.6.
+
+ Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.7.4.
</div>
</body>
--- /dev/null
+"""Test idlelib.codecontext.
+
+Coverage: 100%
+"""
+
+import re
+
+import unittest
+from unittest import mock
+from test.support import requires
+from tkinter import Tk, Frame, Text, TclError
+
+import idlelib.codecontext as codecontext
+from idlelib import config
+
+
+usercfg = codecontext.idleConf.userCfg
+testcfg = {
+ 'main': config.IdleUserConfParser(''),
+ 'highlight': config.IdleUserConfParser(''),
+ 'keys': config.IdleUserConfParser(''),
+ 'extensions': config.IdleUserConfParser(''),
+}
+code_sample = """\
+
+class C1():
+ # Class comment.
+ def __init__(self, a, b):
+ self.a = a
+ self.b = b
+ def compare(self):
+ if a > b:
+ return a
+ elif a < b:
+ return b
+ else:
+ return None
+"""
+
+
+class DummyEditwin:
+ def __init__(self, root, frame, text):
+ self.root = root
+ self.top = root
+ self.text_frame = frame
+ self.text = text
+
+
+class CodeContextTest(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ requires('gui')
+ root = cls.root = Tk()
+ root.withdraw()
+ frame = cls.frame = Frame(root)
+ text = cls.text = Text(frame)
+ text.insert('1.0', code_sample)
+ # Need to pack for creation of code context text widget.
+ frame.pack(side='left', fill='both', expand=1)
+ text.pack(side='top', fill='both', expand=1)
+ cls.editor = DummyEditwin(root, frame, text)
+ codecontext.idleConf.userCfg = testcfg
+
+ @classmethod
+ def tearDownClass(cls):
+ codecontext.idleConf.userCfg = usercfg
+ cls.editor.text.delete('1.0', 'end')
+ del cls.editor, cls.frame, cls.text
+ cls.root.update_idletasks()
+ cls.root.destroy()
+ del cls.root
+
+ def setUp(self):
+ self.text.yview(0)
+ self.cc = codecontext.CodeContext(self.editor)
+
+ def tearDown(self):
+ if self.cc.context:
+ self.cc.context.destroy()
+ # Explicitly call __del__ to remove scheduled scripts.
+ self.cc.__del__()
+ del self.cc.context, self.cc
+
+ def test_init(self):
+ eq = self.assertEqual
+ ed = self.editor
+ cc = self.cc
+
+ eq(cc.editwin, ed)
+ eq(cc.text, ed.text)
+ eq(cc.textfont, ed.text['font'])
+ self.assertIsNone(cc.context)
+ eq(cc.info, [(0, -1, '', False)])
+ eq(cc.topvisible, 1)
+ eq(self.root.tk.call('after', 'info', self.cc.t1)[1], 'timer')
+ eq(self.root.tk.call('after', 'info', self.cc.t2)[1], 'timer')
+
+ def test_del(self):
+ self.cc.__del__()
+ with self.assertRaises(TclError) as msg:
+ self.root.tk.call('after', 'info', self.cc.t1)
+ self.assertIn("doesn't exist", msg)
+ with self.assertRaises(TclError) as msg:
+ self.root.tk.call('after', 'info', self.cc.t2)
+ self.assertIn("doesn't exist", msg)
+ # For coverage on the except. Have to delete because the
+ # above Tcl error is caught by after_cancel.
+ del self.cc.t1, self.cc.t2
+ self.cc.__del__()
+
+ def test_reload(self):
+ codecontext.CodeContext.reload()
+ self.assertEqual(self.cc.colors, {'background': 'lightgray',
+ 'foreground': '#000000'})
+ self.assertEqual(self.cc.context_depth, 15)
+
+ def test_toggle_code_context_event(self):
+ eq = self.assertEqual
+ cc = self.cc
+ toggle = cc.toggle_code_context_event
+
+ # Make sure code context is off.
+ if cc.context:
+ toggle()
+
+ # Toggle on.
+ eq(toggle(), 'break')
+ self.assertIsNotNone(cc.context)
+ eq(cc.context['font'], cc.textfont)
+ eq(cc.context['fg'], cc.colors['foreground'])
+ eq(cc.context['bg'], cc.colors['background'])
+ eq(cc.context.get('1.0', 'end-1c'), '')
+
+ # Toggle off.
+ eq(toggle(), 'break')
+ self.assertIsNone(cc.context)
+
+ def test_get_context(self):
+ eq = self.assertEqual
+ gc = self.cc.get_context
+
+ # stopline must be greater than 0.
+ with self.assertRaises(AssertionError):
+ gc(1, stopline=0)
+
+ eq(gc(3), ([(2, 0, 'class C1():', 'class')], 0))
+
+ # Don't return comment.
+ eq(gc(4), ([(2, 0, 'class C1():', 'class')], 0))
+
+ # Two indentation levels and no comment.
+ eq(gc(5), ([(2, 0, 'class C1():', 'class'),
+ (4, 4, ' def __init__(self, a, b):', 'def')], 0))
+
+ # Only one 'def' is returned, not both at the same indent level.
+ eq(gc(10), ([(2, 0, 'class C1():', 'class'),
+ (7, 4, ' def compare(self):', 'def'),
+ (8, 8, ' if a > b:', 'if')], 0))
+
+ # With 'elif', also show the 'if' even though it's at the same level.
+ eq(gc(11), ([(2, 0, 'class C1():', 'class'),
+ (7, 4, ' def compare(self):', 'def'),
+ (8, 8, ' if a > b:', 'if'),
+ (10, 8, ' elif a < b:', 'elif')], 0))
+
+ # Set stop_line to not go back to first line in source code.
+ # Return includes stop_line.
+ eq(gc(11, stopline=2), ([(2, 0, 'class C1():', 'class'),
+ (7, 4, ' def compare(self):', 'def'),
+ (8, 8, ' if a > b:', 'if'),
+ (10, 8, ' elif a < b:', 'elif')], 0))
+ eq(gc(11, stopline=3), ([(7, 4, ' def compare(self):', 'def'),
+ (8, 8, ' if a > b:', 'if'),
+ (10, 8, ' elif a < b:', 'elif')], 4))
+ eq(gc(11, stopline=8), ([(8, 8, ' if a > b:', 'if'),
+ (10, 8, ' elif a < b:', 'elif')], 8))
+
+ # Set stop_indent to test indent level to stop at.
+ eq(gc(11, stopindent=4), ([(7, 4, ' def compare(self):', 'def'),
+ (8, 8, ' if a > b:', 'if'),
+ (10, 8, ' elif a < b:', 'elif')], 4))
+ # Check that the 'if' is included.
+ eq(gc(11, stopindent=8), ([(8, 8, ' if a > b:', 'if'),
+ (10, 8, ' elif a < b:', 'elif')], 8))
+
+ def test_update_code_context(self):
+ eq = self.assertEqual
+ cc = self.cc
+ # Ensure code context is active.
+ if not cc.context:
+ cc.toggle_code_context_event()
+
+ # Invoke update_code_context without scrolling - nothing happens.
+ self.assertIsNone(cc.update_code_context())
+ eq(cc.info, [(0, -1, '', False)])
+ eq(cc.topvisible, 1)
+
+ # Scroll down to line 1.
+ cc.text.yview(1)
+ cc.update_code_context()
+ eq(cc.info, [(0, -1, '', False)])
+ eq(cc.topvisible, 2)
+ eq(cc.context.get('1.0', 'end-1c'), '')
+
+ # Scroll down to line 2.
+ cc.text.yview(2)
+ cc.update_code_context()
+ eq(cc.info, [(0, -1, '', False), (2, 0, 'class C1():', 'class')])
+ eq(cc.topvisible, 3)
+ eq(cc.context.get('1.0', 'end-1c'), 'class C1():')
+
+ # Scroll down to line 3. Since it's a comment, nothing changes.
+ cc.text.yview(3)
+ cc.update_code_context()
+ eq(cc.info, [(0, -1, '', False), (2, 0, 'class C1():', 'class')])
+ eq(cc.topvisible, 4)
+ eq(cc.context.get('1.0', 'end-1c'), 'class C1():')
+
+ # Scroll down to line 4.
+ cc.text.yview(4)
+ cc.update_code_context()
+ eq(cc.info, [(0, -1, '', False),
+ (2, 0, 'class C1():', 'class'),
+ (4, 4, ' def __init__(self, a, b):', 'def')])
+ eq(cc.topvisible, 5)
+ eq(cc.context.get('1.0', 'end-1c'), 'class C1():\n'
+ ' def __init__(self, a, b):')
+
+ # Scroll down to line 11. Last 'def' is removed.
+ cc.text.yview(11)
+ cc.update_code_context()
+ eq(cc.info, [(0, -1, '', False),
+ (2, 0, 'class C1():', 'class'),
+ (7, 4, ' def compare(self):', 'def'),
+ (8, 8, ' if a > b:', 'if'),
+ (10, 8, ' elif a < b:', 'elif')])
+ eq(cc.topvisible, 12)
+ eq(cc.context.get('1.0', 'end-1c'), 'class C1():\n'
+ ' def compare(self):\n'
+ ' if a > b:\n'
+ ' elif a < b:')
+
+ # No scroll. No update, even though context_depth changed.
+ cc.update_code_context()
+ cc.context_depth = 1
+ eq(cc.info, [(0, -1, '', False),
+ (2, 0, 'class C1():', 'class'),
+ (7, 4, ' def compare(self):', 'def'),
+ (8, 8, ' if a > b:', 'if'),
+ (10, 8, ' elif a < b:', 'elif')])
+ eq(cc.topvisible, 12)
+ eq(cc.context.get('1.0', 'end-1c'), 'class C1():\n'
+ ' def compare(self):\n'
+ ' if a > b:\n'
+ ' elif a < b:')
+
+ # Scroll up.
+ cc.text.yview(5)
+ cc.update_code_context()
+ eq(cc.info, [(0, -1, '', False),
+ (2, 0, 'class C1():', 'class'),
+ (4, 4, ' def __init__(self, a, b):', 'def')])
+ eq(cc.topvisible, 6)
+ # context_depth is 1.
+ eq(cc.context.get('1.0', 'end-1c'), ' def __init__(self, a, b):')
+
+ def test_jumptoline(self):
+ eq = self.assertEqual
+ cc = self.cc
+ jump = cc.jumptoline
+
+ if not cc.context:
+ cc.toggle_code_context_event()
+
+ # Empty context.
+ cc.text.yview(f'{2}.0')
+ cc.update_code_context()
+ eq(cc.topvisible, 2)
+ cc.context.mark_set('insert', '1.5')
+ jump()
+ eq(cc.topvisible, 1)
+
+ # 4 lines of context showing.
+ cc.text.yview(f'{12}.0')
+ cc.update_code_context()
+ eq(cc.topvisible, 12)
+ cc.context.mark_set('insert', '3.0')
+ jump()
+ eq(cc.topvisible, 8)
+
+ # More context lines than limit.
+ cc.context_depth = 2
+ cc.text.yview(f'{12}.0')
+ cc.update_code_context()
+ eq(cc.topvisible, 12)
+ cc.context.mark_set('insert', '1.0')
+ jump()
+ eq(cc.topvisible, 8)
+
+ @mock.patch.object(codecontext.CodeContext, 'update_code_context')
+ def test_timer_event(self, mock_update):
+ # Ensure code context is not active.
+ if self.cc.context:
+ self.cc.toggle_code_context_event()
+ self.cc.timer_event()
+ mock_update.assert_not_called()
+
+ # Activate code context.
+ self.cc.toggle_code_context_event()
+ self.cc.timer_event()
+ mock_update.assert_called()
+
+ def test_config_timer_event(self):
+ eq = self.assertEqual
+ cc = self.cc
+ save_font = cc.text['font']
+ save_colors = codecontext.CodeContext.colors
+ test_font = 'FakeFont'
+ test_colors = {'background': '#222222', 'foreground': '#ffff00'}
+
+ # Ensure code context is not active.
+ if cc.context:
+ cc.toggle_code_context_event()
+
+ # Nothing updates on inactive code context.
+ cc.text['font'] = test_font
+ codecontext.CodeContext.colors = test_colors
+ cc.config_timer_event()
+ eq(cc.textfont, save_font)
+ eq(cc.contextcolors, save_colors)
+
+ # Activate code context, but no change to font or color.
+ cc.toggle_code_context_event()
+ cc.text['font'] = save_font
+ codecontext.CodeContext.colors = save_colors
+ cc.config_timer_event()
+ eq(cc.textfont, save_font)
+ eq(cc.contextcolors, save_colors)
+ eq(cc.context['font'], save_font)
+ eq(cc.context['background'], save_colors['background'])
+ eq(cc.context['foreground'], save_colors['foreground'])
+
+ # Active code context, change font.
+ cc.text['font'] = test_font
+ cc.config_timer_event()
+ eq(cc.textfont, test_font)
+ eq(cc.contextcolors, save_colors)
+ eq(cc.context['font'], test_font)
+ eq(cc.context['background'], save_colors['background'])
+ eq(cc.context['foreground'], save_colors['foreground'])
+
+ # Active code context, change color.
+ cc.text['font'] = save_font
+ codecontext.CodeContext.colors = test_colors
+ cc.config_timer_event()
+ eq(cc.textfont, save_font)
+ eq(cc.contextcolors, test_colors)
+ eq(cc.context['font'], save_font)
+ eq(cc.context['background'], test_colors['background'])
+ eq(cc.context['foreground'], test_colors['foreground'])
+ codecontext.CodeContext.colors = save_colors
+ cc.config_timer_event()
+
+
+class HelperFunctionText(unittest.TestCase):
+
+ def test_get_spaces_firstword(self):
+ get = codecontext.get_spaces_firstword
+ test_lines = (
+ (' first word', (' ', 'first')),
+ ('\tfirst word', ('\t', 'first')),
+ (' \u19D4\u19D2: ', (' ', '\u19D4\u19D2')),
+ ('no spaces', ('', 'no')),
+ ('', ('', '')),
+ ('# TEST COMMENT', ('', '')),
+ (' (continuation)', (' ', ''))
+ )
+ for line, expected_output in test_lines:
+ self.assertEqual(get(line), expected_output)
+
+ # Send the pattern in the call.
+ self.assertEqual(get(' (continuation)',
+ c=re.compile(r'^(\s*)([^\s]*)')),
+ (' ', '(continuation)'))
+
+ def test_get_line_info(self):
+ eq = self.assertEqual
+ gli = codecontext.get_line_info
+ lines = code_sample.splitlines()
+
+ # Line 1 is not a BLOCKOPENER.
+ eq(gli(lines[0]), (codecontext.INFINITY, '', False))
+ # Line 2 is a BLOCKOPENER without an indent.
+ eq(gli(lines[1]), (0, 'class C1():', 'class'))
+ # Line 3 is not a BLOCKOPENER and does not return the indent level.
+ eq(gli(lines[2]), (codecontext.INFINITY, ' # Class comment.', False))
+ # Line 4 is a BLOCKOPENER and is indented.
+ eq(gli(lines[3]), (4, ' def __init__(self, a, b):', 'def'))
+ # Line 8 is a different BLOCKOPENER and is indented.
+ eq(gli(lines[7]), (8, ' if a > b:', 'if'))
+ # Test tab.
+ eq(gli('\tif a == b:'), (1, '\tif a == b:', 'if'))
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
def test_context(self):
self.page.context_int.delete(0, 'end')
self.page.context_int.insert(0, '1')
- self.assertEqual(extpage, {'CodeContext': {'numlines': '1'}})
+ self.assertEqual(extpage, {'CodeContext': {'maxlines': '1'}})
def test_source_selected(self):
d = self.page
'''Test (selected) IDLE Edit menu items.
-Edit modules have their own test files files
+Edit modules have their own test files
'''
from test.support import requires
requires('gui')
class TextTest(object):
"Define items common to both sets of tests."
- hw = 'hello\nworld' # Several tests insert this after after initialization.
+ hw = 'hello\nworld' # Several tests insert this after initialization.
hwn = hw+'\n' # \n present at initialization, before insert
# setUpClass defines cls.Text and maybe cls.root.
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
raise SystemExit(1)
+
+# Valid arguments for the ...Awareness call below are defined in the following.
+# https://msdn.microsoft.com/en-us/library/windows/desktop/dn280512(v=vs.85).aspx
+if sys.platform == 'win32':
+ import ctypes
+ PROCESS_SYSTEM_DPI_AWARE = 1
+ try:
+ ctypes.OleDLL('shcore').SetProcessDpiAwareness(PROCESS_SYSTEM_DPI_AWARE)
+ except (AttributeError, OSError):
+ pass
+
import tkinter.messagebox as tkMessageBox
if TkVersion < 8.5:
root = Tk() # otherwise create root in main
_KEYWORD_ONLY = _ParameterKind.KEYWORD_ONLY
_VAR_KEYWORD = _ParameterKind.VAR_KEYWORD
+_PARAM_NAME_MAPPING = {
+ _POSITIONAL_ONLY: 'positional-only',
+ _POSITIONAL_OR_KEYWORD: 'positional or keyword',
+ _VAR_POSITIONAL: 'variadic positional',
+ _KEYWORD_ONLY: 'keyword-only',
+ _VAR_KEYWORD: 'variadic keyword'
+}
+
+_get_paramkind_descr = _PARAM_NAME_MAPPING.__getitem__
+
class Parameter:
"""Represents a parameter in a function signature.
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty):
-
- if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
- _VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
- raise ValueError("invalid value for 'Parameter.kind' attribute")
- self._kind = kind
-
+ try:
+ self._kind = _ParameterKind(kind)
+ except ValueError:
+ raise ValueError(f'value {kind!r} is not a valid Parameter.kind')
if default is not _empty:
- if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
- msg = '{} parameters cannot have default values'.format(kind)
+ if self._kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
+ msg = '{} parameters cannot have default values'
+ msg = msg.format(_get_paramkind_descr(self._kind))
raise ValueError(msg)
self._default = default
self._annotation = annotation
raise ValueError('name is a required attribute for Parameter')
if not isinstance(name, str):
- raise TypeError("name must be a str, not a {!r}".format(name))
+ msg = 'name must be a str, not a {}'.format(type(name).__name__)
+ raise TypeError(msg)
if name[0] == '.' and name[1:].isdigit():
# These are implicit arguments generated by comprehensions. In
# order to provide a friendlier interface to users, we recast
# their name as "implicitN" and treat them as positional-only.
# See issue 19611.
- if kind != _POSITIONAL_OR_KEYWORD:
- raise ValueError(
- 'implicit arguments must be passed in as {}'.format(
- _POSITIONAL_OR_KEYWORD
- )
+ if self._kind != _POSITIONAL_OR_KEYWORD:
+ msg = (
+ 'implicit arguments must be passed as '
+ 'positional or keyword arguments, not {}'
)
+ msg = msg.format(_get_paramkind_descr(self._kind))
+ raise ValueError(msg)
self._kind = _POSITIONAL_ONLY
name = 'implicit{}'.format(name[1:])
name = param.name
if kind < top_kind:
- msg = 'wrong parameter order: {!r} before {!r}'
- msg = msg.format(top_kind, kind)
+ msg = (
+ 'wrong parameter order: {} parameter before {} '
+ 'parameter'
+ )
+ msg = msg.format(_get_paramkind_descr(top_kind),
+ _get_paramkind_descr(kind))
raise ValueError(msg)
elif kind > top_kind:
kind_defaults = False
# Constructing from a packed address or integer
if isinstance(address, (int, bytes)):
- self.network_address = IPv4Address(address)
- self.netmask, self._prefixlen = self._make_netmask(self._max_prefixlen)
- #fixme: address/network test here.
- return
-
- if isinstance(address, tuple):
- if len(address) > 1:
- arg = address[1]
- else:
- # We weren't given an address[1]
- arg = self._max_prefixlen
- self.network_address = IPv4Address(address[0])
- self.netmask, self._prefixlen = self._make_netmask(arg)
- packed = int(self.network_address)
- if packed & int(self.netmask) != packed:
- if strict:
- raise ValueError('%s has host bits set' % self)
- else:
- self.network_address = IPv4Address(packed &
- int(self.netmask))
- return
-
+ addr = address
+ mask = self._max_prefixlen
+ # Constructing from a tuple (addr, [mask])
+ elif isinstance(address, tuple):
+ addr = address[0]
+ mask = address[1] if len(address) > 1 else self._max_prefixlen
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
- addr = _split_optional_netmask(address)
- self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
-
- if len(addr) == 2:
- arg = addr[1]
else:
- arg = self._max_prefixlen
- self.netmask, self._prefixlen = self._make_netmask(arg)
-
- if strict:
- if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
- self.network_address):
+ args = _split_optional_netmask(address)
+ addr = self._ip_int_from_string(args[0])
+ mask = args[1] if len(args) == 2 else self._max_prefixlen
+
+ self.network_address = IPv4Address(addr)
+ self.netmask, self._prefixlen = self._make_netmask(mask)
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
raise ValueError('%s has host bits set' % self)
- self.network_address = IPv4Address(int(self.network_address) &
- int(self.netmask))
+ else:
+ self.network_address = IPv4Address(packed &
+ int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
"""
_BaseNetwork.__init__(self, address)
- # Efficient constructor from integer or packed address
- if isinstance(address, (bytes, int)):
- self.network_address = IPv6Address(address)
- self.netmask, self._prefixlen = self._make_netmask(self._max_prefixlen)
- return
-
- if isinstance(address, tuple):
- if len(address) > 1:
- arg = address[1]
- else:
- arg = self._max_prefixlen
- self.netmask, self._prefixlen = self._make_netmask(arg)
- self.network_address = IPv6Address(address[0])
- packed = int(self.network_address)
- if packed & int(self.netmask) != packed:
- if strict:
- raise ValueError('%s has host bits set' % self)
- else:
- self.network_address = IPv6Address(packed &
- int(self.netmask))
- return
-
+ # Constructing from a packed address or integer
+ if isinstance(address, (int, bytes)):
+ addr = address
+ mask = self._max_prefixlen
+ # Constructing from a tuple (addr, [mask])
+ elif isinstance(address, tuple):
+ addr = address[0]
+ mask = address[1] if len(address) > 1 else self._max_prefixlen
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
- addr = _split_optional_netmask(address)
-
- self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
-
- if len(addr) == 2:
- arg = addr[1]
else:
- arg = self._max_prefixlen
- self.netmask, self._prefixlen = self._make_netmask(arg)
-
- if strict:
- if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
- self.network_address):
+ args = _split_optional_netmask(address)
+ addr = self._ip_int_from_string(args[0])
+ mask = args[1] if len(args) == 2 else self._max_prefixlen
+
+ self.network_address = IPv6Address(addr)
+ self.netmask, self._prefixlen = self._make_netmask(mask)
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
raise ValueError('%s has host bits set' % self)
- self.network_address = IPv6Address(int(self.network_address) &
- int(self.netmask))
+ else:
+ self.network_address = IPv6Address(packed &
+ int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
finally:
memo.clear()
- return _scan_once
+ return scan_once
make_scanner = c_make_scanner or py_make_scanner
filename += self._append_suffix
if orig_filename != filename:
output_dir = os.path.dirname(filename)
- if not os.path.isdir(output_dir):
+ if not os.path.isdir(output_dir) and output_dir:
os.makedirs(output_dir)
self.log_message('Writing converted %s to %s.', orig_filename,
filename)
#
# SS 2014-10-01:
# Updated alias mapping with glibc 2.19 supported locales.
+#
+# SS 2018-05-05:
+# Updated alias mapping with glibc 2.27 supported locales.
+#
+# These are the differences compared to the old mapping (Python 3.6.5
+# and older):
+#
+# updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia'
+# updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154'
+# updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R'
locale_alias = {
'a3': 'az_AZ.KOI8-C',
'aa_et': 'aa_ET.UTF-8',
'af': 'af_ZA.ISO8859-1',
'af_za': 'af_ZA.ISO8859-1',
+ 'agr_pe': 'agr_PE.UTF-8',
+ 'ak_gh': 'ak_GH.UTF-8',
'am': 'am_ET.UTF-8',
'am_et': 'am_ET.UTF-8',
'american': 'en_US.ISO8859-1',
'an_es': 'an_ES.ISO8859-15',
+ 'anp_in': 'anp_IN.UTF-8',
'ar': 'ar_AA.ISO8859-6',
'ar_aa': 'ar_AA.ISO8859-6',
'ar_ae': 'ar_AE.ISO8859-6',
'ar_qa': 'ar_QA.ISO8859-6',
'ar_sa': 'ar_SA.ISO8859-6',
'ar_sd': 'ar_SD.ISO8859-6',
+ 'ar_ss': 'ar_SS.UTF-8',
'ar_sy': 'ar_SY.ISO8859-6',
'ar_tn': 'ar_TN.ISO8859-6',
'ar_ye': 'ar_YE.ISO8859-6',
'az': 'az_AZ.ISO8859-9E',
'az_az': 'az_AZ.ISO8859-9E',
'az_az.iso88599e': 'az_AZ.ISO8859-9E',
+ 'az_ir': 'az_IR.UTF-8',
'be': 'be_BY.CP1251',
'be@latin': 'be_BY.UTF-8@latin',
'be_bg.utf8': 'bg_BG.UTF-8',
'ber_ma': 'ber_MA.UTF-8',
'bg': 'bg_BG.CP1251',
'bg_bg': 'bg_BG.CP1251',
+ 'bhb_in.utf8': 'bhb_IN.UTF-8',
'bho_in': 'bho_IN.UTF-8',
+ 'bho_np': 'bho_NP.UTF-8',
+ 'bi_vu': 'bi_VU.UTF-8',
'bn_bd': 'bn_BD.UTF-8',
'bn_in': 'bn_IN.UTF-8',
'bo_cn': 'bo_CN.UTF-8',
'ca': 'ca_ES.ISO8859-1',
'ca_ad': 'ca_AD.ISO8859-1',
'ca_es': 'ca_ES.ISO8859-1',
- 'ca_es@valencia': 'ca_ES.ISO8859-15@valencia',
+ 'ca_es@valencia': 'ca_ES.UTF-8@valencia',
'ca_fr': 'ca_FR.ISO8859-1',
'ca_it': 'ca_IT.ISO8859-1',
'catalan': 'ca_ES.ISO8859-1',
+ 'ce_ru': 'ce_RU.UTF-8',
'cextend': 'en_US.ISO8859-1',
'chinese-s': 'zh_CN.eucCN',
'chinese-t': 'zh_TW.eucTW',
+ 'chr_us': 'chr_US.UTF-8',
+ 'ckb_iq': 'ckb_IQ.UTF-8',
+ 'cmn_tw': 'cmn_TW.UTF-8',
'crh_ua': 'crh_UA.UTF-8',
'croatian': 'hr_HR.ISO8859-2',
'cs': 'cs_CZ.ISO8859-2',
'de_be': 'de_BE.ISO8859-1',
'de_ch': 'de_CH.ISO8859-1',
'de_de': 'de_DE.ISO8859-1',
+ 'de_it': 'de_IT.ISO8859-1',
'de_li.utf8': 'de_LI.UTF-8',
'de_lu': 'de_LU.ISO8859-1',
'deutsch': 'de_DE.ISO8859-1',
'en_gb': 'en_GB.ISO8859-1',
'en_hk': 'en_HK.ISO8859-1',
'en_ie': 'en_IE.ISO8859-1',
+ 'en_il': 'en_IL.UTF-8',
'en_in': 'en_IN.ISO8859-1',
'en_ng': 'en_NG.UTF-8',
'en_nz': 'en_NZ.ISO8859-1',
'en_ph': 'en_PH.ISO8859-1',
+ 'en_sc.utf8': 'en_SC.UTF-8',
'en_sg': 'en_SG.ISO8859-1',
'en_uk': 'en_GB.ISO8859-1',
'en_us': 'en_US.ISO8859-1',
'en_zw.utf8': 'en_ZS.UTF-8',
'eng_gb': 'en_GB.ISO8859-1',
'english': 'en_EN.ISO8859-1',
+ 'english.iso88591': 'en_US.ISO8859-1',
'english_uk': 'en_GB.ISO8859-1',
'english_united-states': 'en_US.ISO8859-1',
'english_united-states.437': 'C',
'gv': 'gv_GB.ISO8859-1',
'gv_gb': 'gv_GB.ISO8859-1',
'ha_ng': 'ha_NG.UTF-8',
+ 'hak_tw': 'hak_TW.UTF-8',
'he': 'he_IL.ISO8859-8',
'he_il': 'he_IL.ISO8859-8',
'hebrew': 'he_IL.ISO8859-8',
'hi': 'hi_IN.ISCII-DEV',
'hi_in': 'hi_IN.ISCII-DEV',
'hi_in.isciidev': 'hi_IN.ISCII-DEV',
+ 'hif_fj': 'hif_FJ.UTF-8',
'hne': 'hne_IN.UTF-8',
'hne_in': 'hne_IN.UTF-8',
'hr': 'hr_HR.ISO8859-2',
'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
- 'kk_kz': 'kk_KZ.RK1048',
+ 'kab_dz': 'kab_DZ.UTF-8',
+ 'kk_kz': 'kk_KZ.ptcp154',
'kl': 'kl_GL.ISO8859-1',
'kl_gl': 'kl_GL.ISO8859-1',
'km_kh': 'km_KH.UTF-8',
'li_nl': 'li_NL.UTF-8',
'lij_it': 'lij_IT.UTF-8',
'lithuanian': 'lt_LT.ISO8859-13',
+ 'ln_cd': 'ln_CD.UTF-8',
'lo': 'lo_LA.MULELAO-1',
'lo_la': 'lo_LA.MULELAO-1',
'lo_la.cp1133': 'lo_LA.IBM-CP1133',
'lt_lt': 'lt_LT.ISO8859-13',
'lv': 'lv_LV.ISO8859-13',
'lv_lv': 'lv_LV.ISO8859-13',
+ 'lzh_tw': 'lzh_TW.UTF-8',
'mag_in': 'mag_IN.UTF-8',
'mai': 'mai_IN.UTF-8',
'mai_in': 'mai_IN.UTF-8',
+ 'mai_np': 'mai_NP.UTF-8',
+ 'mfe_mu': 'mfe_MU.UTF-8',
'mg_mg': 'mg_MG.ISO8859-15',
'mhr_ru': 'mhr_RU.UTF-8',
'mi': 'mi_NZ.ISO8859-1',
'mi_nz': 'mi_NZ.ISO8859-1',
+ 'miq_ni': 'miq_NI.UTF-8',
+ 'mjw_in': 'mjw_IN.UTF-8',
'mk': 'mk_MK.ISO8859-5',
'mk_mk': 'mk_MK.ISO8859-5',
'ml': 'ml_IN.UTF-8',
'mt': 'mt_MT.ISO8859-3',
'mt_mt': 'mt_MT.ISO8859-3',
'my_mm': 'my_MM.UTF-8',
- 'nan_tw@latin': 'nan_TW.UTF-8@latin',
+ 'nan_tw': 'nan_TW.UTF-8',
'nb': 'nb_NO.ISO8859-1',
'nb_no': 'nb_NO.ISO8859-1',
'nds_de': 'nds_DE.UTF-8',
'pa_in': 'pa_IN.UTF-8',
'pa_pk': 'pa_PK.UTF-8',
'pap_an': 'pap_AN.UTF-8',
+ 'pap_aw': 'pap_AW.UTF-8',
+ 'pap_cw': 'pap_CW.UTF-8',
'pd': 'pd_US.ISO8859-1',
'pd_de': 'pd_DE.ISO8859-1',
'pd_us': 'pd_US.ISO8859-1',
'pt': 'pt_PT.ISO8859-1',
'pt_br': 'pt_BR.ISO8859-1',
'pt_pt': 'pt_PT.ISO8859-1',
+ 'quz_pe': 'quz_PE.UTF-8',
+ 'raj_in': 'raj_IN.UTF-8',
'ro': 'ro_RO.ISO8859-2',
'ro_ro': 'ro_RO.ISO8859-2',
'romanian': 'ro_RO.ISO8859-2',
'ru_ru': 'ru_RU.UTF-8',
'ru_ua': 'ru_UA.KOI8-U',
'rumanian': 'ro_RO.ISO8859-2',
- 'russian': 'ru_RU.ISO8859-5',
+ 'russian': 'ru_RU.KOI8-R',
'rw': 'rw_RW.ISO8859-1',
'rw_rw': 'rw_RW.ISO8859-1',
'sa_in': 'sa_IN.UTF-8',
'sd_pk': 'sd_PK.UTF-8',
'se_no': 'se_NO.UTF-8',
'serbocroatian': 'sr_RS.UTF-8@latin',
+ 'sgs_lt': 'sgs_LT.UTF-8',
'sh': 'sr_RS.UTF-8@latin',
'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2',
'sh_hr': 'sh_HR.ISO8859-2',
'sh_hr.iso88592': 'hr_HR.ISO8859-2',
'sh_sp': 'sr_CS.ISO8859-2',
'sh_yu': 'sr_RS.UTF-8@latin',
+ 'shn_mm': 'shn_MM.UTF-8',
'shs_ca': 'shs_CA.UTF-8',
'si': 'si_LK.UTF-8',
'si_lk': 'si_LK.UTF-8',
'slovak': 'sk_SK.ISO8859-2',
'slovene': 'sl_SI.ISO8859-2',
'slovenian': 'sl_SI.ISO8859-2',
+ 'sm_ws': 'sm_WS.UTF-8',
'so_dj': 'so_DJ.ISO8859-1',
'so_et': 'so_ET.UTF-8',
'so_ke': 'so_KE.ISO8859-1',
'ta_in.tscii': 'ta_IN.TSCII-0',
'ta_in.tscii0': 'ta_IN.TSCII-0',
'ta_lk': 'ta_LK.UTF-8',
+ 'tcy_in.utf8': 'tcy_IN.UTF-8',
'te': 'te_IN.UTF-8',
'te_in': 'te_IN.UTF-8',
'tg': 'tg_TJ.KOI8-C',
'th_th.tactis': 'th_TH.TIS620',
'th_th.tis620': 'th_TH.TIS620',
'thai': 'th_TH.ISO8859-11',
+ 'the_np': 'the_NP.UTF-8',
'ti_er': 'ti_ER.UTF-8',
'ti_et': 'ti_ET.UTF-8',
'tig_er': 'tig_ER.UTF-8',
'tl_ph': 'tl_PH.ISO8859-1',
'tn': 'tn_ZA.ISO8859-15',
'tn_za': 'tn_ZA.ISO8859-15',
+ 'to_to': 'to_TO.UTF-8',
+ 'tpi_pg': 'tpi_PG.UTF-8',
'tr': 'tr_TR.ISO8859-9',
'tr_cy': 'tr_CY.ISO8859-9',
'tr_tr': 'tr_TR.ISO8859-9',
'yi_us': 'yi_US.CP1255',
'yo_ng': 'yo_NG.UTF-8',
'yue_hk': 'yue_HK.UTF-8',
+ 'yuw_pg': 'yuw_PG.UTF-8',
'zh': 'zh_CN.eucCN',
'zh_cn': 'zh_CN.gb2312',
'zh_cn.big5': 'zh_TW.big5',
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
- the optional datefmt argument (if omitted, you get the ISO8601 format).
+ the optional datefmt argument. If datefmt is omitted, you get an
+ ISO8601-like (or RFC 3339-like) format.
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
- record. Otherwise, the ISO8601 format is used. The resulting
- string is returned. This function uses a user-configurable function
- to convert the creation time to a tuple. By default, time.localtime()
- is used; to change this for a particular formatter instance, set the
- 'converter' attribute to a function with the same signature as
- time.localtime() or time.gmtime(). To change it for all formatters,
- for example if you want all logging times to be shown in GMT,
+ record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used.
+ The resulting string is returned. This function uses a user-configurable
+ function to convert the creation time to a tuple. By default,
+ time.localtime() is used; to change this for a particular formatter
+ instance, set the 'converter' attribute to a function with the same
+ signature as time.localtime() or time.gmtime(). To change it for all
+ formatters, for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
+ _strprefixes = [p + q for p in ('b', 'f', 'r', 'u') for q in ("'", '"')]
_symbols_inverse = {
- 'STRINGS' : ("'", "'''", "r'", "b'", '"""', '"', 'r"', 'b"'),
+ 'STRINGS' : ("'", "'''", '"', '"""', *_strprefixes),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
if not request: break
except (KeyboardInterrupt, EOFError):
break
- request = replace(request, '"', '', "'", '').strip()
+ request = request.strip()
+
+ # Make sure significant trailing quoting marks of literals don't
+ # get deleted while cleaning input
+ if (len(request) > 2 and request[0] == request[-1] in ("'", '"')
+ and request[0] not in request[1:-1]):
+ request = request[1:-1]
if request.lower() in ('q', 'quit'): break
if request == 'help':
self.intro()
def stop(self):
"""Stop the server and this thread nicely"""
self.docserver.quit = True
+ self.join()
+ # explicitly break a reference cycle: DocServer.callback
+ # has indirectly a reference to ServerThread.
+ self.docserver = None
self.serving = False
self.url = None
# -*- coding: utf-8 -*-
-# Autogenerated by Sphinx on Tue Mar 13 21:13:16 2018
+# Autogenerated by Sphinx on Tue Jun 12 00:16:23 2018
topics = {'assert': 'The "assert" statement\n'
'**********************\n'
'\n'
' corresponding targets.\n'
'\n'
' * If the target list contains one target prefixed with an\n'
- ' asterisk, called a "starred" target: The object must be '
+ ' asterisk, called a “starred” target: The object must be '
'an\n'
' iterable with at least as many items as there are targets '
'in the\n'
' If the primary is a mutable sequence object (such as a '
'list), the\n'
' subscript must yield an integer. If it is negative, the '
- "sequence's\n"
+ 'sequence’s\n'
' length is added to it. The resulting value must be a '
'nonnegative\n'
- " integer less than the sequence's length, and the sequence is "
+ ' integer less than the sequence’s length, and the sequence is '
'asked\n'
' to assign the assigned object to its item with that index. '
'If the\n'
'\n'
' If the primary is a mapping object (such as a dictionary), '
'the\n'
- " subscript must have a type compatible with the mapping's key "
+ ' subscript must have a type compatible with the mapping’s key '
'type,\n'
' and the mapping is then asked to create a key/datum pair '
'which maps\n'
'expressions are\n'
' evaluated, insofar they are present; defaults are zero and '
'the\n'
- " sequence's length. The bounds should evaluate to integers. "
+ ' sequence’s length. The bounds should evaluate to integers. '
'If\n'
- " either bound is negative, the sequence's length is added to "
+ ' either bound is negative, the sequence’s length is added to '
'it. The\n'
' resulting bounds are clipped to lie between zero and the '
- "sequence's\n"
+ 'sequence’s\n'
' length, inclusive. Finally, the sequence object is asked to '
'replace\n'
' the slice with the items of the assigned sequence. The '
'\n'
'Although the definition of assignment implies that overlaps '
'between\n'
- "the left-hand side and the right-hand side are 'simultaneous' "
+ 'the left-hand side and the right-hand side are ‘simultaneous’ '
'(for\n'
'example "a, b = b, a" swaps two variables), overlaps *within* '
'the\n'
'\n'
'All literals correspond to immutable data types, and hence '
'the\n'
- "object's identity is less important than its value. "
+ 'object’s identity is less important than its value. '
'Multiple\n'
'evaluations of literals with the same value (either the '
'same\n'
'\n'
'Note: Setting module "__class__" only affects lookups '
'made using the\n'
- ' attribute access syntax -- directly accessing the '
+ ' attribute access syntax – directly accessing the '
'module globals\n'
' (whether by code within the module, or via a reference '
'to the\n'
- " module's globals dictionary) is unaffected.\n"
+ ' module’s globals dictionary) is unaffected.\n'
'\n'
'Changed in version 3.5: "__class__" module attribute is '
'now writable.\n'
'containing the method (a so-called *descriptor* class) '
'appears in an\n'
'*owner* class (the descriptor must be in either the '
- "owner's class\n"
+ 'owner’s class\n'
'dictionary or in the class dictionary for one of its '
'parents). In the\n'
- 'examples below, "the attribute" refers to the attribute '
+ 'examples below, “the attribute” refers to the attribute '
'whose name is\n'
- "the key of the property in the owner class' "
- '"__dict__".\n'
+ 'the key of the property in the owner class’ "__dict__".\n'
'\n'
'object.__get__(self, instance, owner)\n'
'\n'
'====================\n'
'\n'
'In general, a descriptor is an object attribute with '
- '"binding\n'
- 'behavior", one whose attribute access has been '
+ '“binding\n'
+ 'behavior”, one whose attribute access has been '
'overridden by methods\n'
'in the descriptor protocol: "__get__()", "__set__()", '
'and\n'
'\n'
'The default behavior for attribute access is to get, '
'set, or delete\n'
- "the attribute from an object's dictionary. For instance, "
+ 'the attribute from an object’s dictionary. For instance, '
'"a.x" has a\n'
'lookup chain starting with "a.__dict__[\'x\']", then\n'
'"type(a).__dict__[\'x\']", and continuing through the '
'does not define "__get__()", then accessing the '
'attribute will return\n'
'the descriptor object itself unless there is a value in '
- "the object's\n"
+ 'the object’s\n'
'instance dictionary. If the descriptor defines '
'"__set__()" and/or\n'
'"__delete__()", it is a data descriptor; if it defines '
'\n'
'* Nonempty *__slots__* does not work for classes derived '
'from\n'
- ' "variable-length" built-in types such as "int", '
+ ' “variable-length” built-in types such as "int", '
'"bytes" and "tuple".\n'
'\n'
'* Any non-string iterable may be assigned to '
'while\n'
'floor division of integers results in an integer; the result is '
'that\n'
- "of mathematical division with the 'floor' function applied to the\n"
+ 'of mathematical division with the ‘floor’ function applied to the\n'
'result. Division by zero raises the "ZeroDivisionError" '
'exception.\n'
'\n'
'************\n'
'\n'
'Code objects are used by the implementation to '
- 'represent "pseudo-\n'
- 'compiled" executable Python code such as a function '
+ 'represent “pseudo-\n'
+ 'compiled” executable Python code such as a function '
'body. They differ\n'
- "from function objects because they don't contain a "
+ 'from function objects because they don’t contain a '
'reference to their\n'
'global execution environment. Code objects are '
'returned by the built-\n'
'bltin-null-object': 'The Null Object\n'
'***************\n'
'\n'
- "This object is returned by functions that don't "
+ 'This object is returned by functions that don’t '
'explicitly return a\n'
'value. It supports no special operations. There is '
'exactly one null\n'
'************\n'
'\n'
'Type objects represent the various object types. An '
- "object's type is\n"
+ 'object’s type is\n'
'accessed by the built-in function "type()". There are '
'no special\n'
'operations on types. The standard module "types" '
'\n'
'object.__call__(self[, args...])\n'
'\n'
- ' Called when the instance is "called" as a function; if '
+ ' Called when the instance is “called” as a function; if '
'this method\n'
' is defined, "x(arg1, arg2, ...)" is a shorthand for\n'
' "x.__call__(arg1, arg2, ...)".\n',
'values are calculated, once, when the function is defined; thus, a\n'
'mutable object such as a list or dictionary used as default value '
'will\n'
- "be shared by all calls that don't specify an argument value for "
+ 'be shared by all calls that don’t specify an argument value for '
'the\n'
'corresponding slot; this should usually be avoided.) If there are '
'any\n'
'**CPython implementation detail:** An implementation may provide\n'
'built-in functions whose positional parameters do not have names, '
'even\n'
- "if they are 'named' for the purpose of documentation, and which\n"
+ 'if they are ‘named’ for the purpose of documentation, and which\n'
'therefore cannot be supplied by keyword. In CPython, this is the '
'case\n'
'for functions implemented in C that use "PyArg_ParseTuple()" to '
'must evaluate to an *iterable*. Elements from these iterables are\n'
'treated as if they were additional positional arguments. For the '
'call\n'
- '"f(x1, x2, *y, x3, x4)", if *y* evaluates to a sequence *y1*, ...,\n'
- '*yM*, this is equivalent to a call with M+4 positional arguments '
- '*x1*,\n'
- '*x2*, *y1*, ..., *yM*, *x3*, *x4*.\n'
+ '"f(x1, x2, *y, x3, x4)", if *y* evaluates to a sequence *y1*, …, '
+ '*yM*,\n'
+ 'this is equivalent to a call with M+4 positional arguments *x1*, '
+ '*x2*,\n'
+ '*y1*, …, *yM*, *x3*, *x4*.\n'
'\n'
'A consequence of this is that although the "*expression" syntax '
'may\n'
'appear *after* explicit keyword arguments, it is processed '
'*before*\n'
- 'the keyword arguments (and any "**expression" arguments -- see '
+ 'the keyword arguments (and any "**expression" arguments – see '
'below).\n'
'So:\n'
'\n'
'exception. How this value is computed depends on the type of the\n'
'callable object.\n'
'\n'
- 'If it is---\n'
+ 'If it is—\n'
'\n'
'a user-defined function:\n'
' The code block for the function is executed, passing it the\n'
' class Foo(object):\n'
' pass\n'
'\n'
- "The class's suite is then executed in a new execution frame (see\n"
+ 'The class’s suite is then executed in a new execution frame (see\n'
'Naming and binding), using a newly created local namespace and the\n'
'original global namespace. (Usually, the suite contains mostly\n'
- "function definitions.) When the class's suite finishes execution, "
+ 'function definitions.) When the class’s suite finishes execution, '
'its\n'
'execution frame is discarded but its local namespace is saved. [4] '
'A\n'
'namespace.\n'
'\n'
'The order in which attributes are defined in the class body is\n'
- 'preserved in the new class\'s "__dict__". Note that this is '
+ 'preserved in the new class’s "__dict__". Note that this is '
'reliable\n'
'only right after the class is created and only for classes that '
'were\n'
'for\n'
'function decorators. The result is then bound to the class name.\n'
'\n'
- "**Programmer's note:** Variables defined in the class definition "
+ '**Programmer’s note:** Variables defined in the class definition '
'are\n'
'class attributes; they are shared by instances. Instance '
'attributes\n'
'can be set in a method with "self.name = value". Both class and\n'
'instance attributes are accessible through the notation '
- '""self.name"",\n'
+ '“"self.name"”,\n'
'and an instance attribute hides a class attribute with the same '
'name\n'
'when accessed in this way. Class attributes can be used as '
'y" is\n'
'found to be false).\n'
'\n'
- 'Formally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and '
+ 'Formally, if *a*, *b*, *c*, …, *y*, *z* are expressions and '
'*op1*,\n'
- '*op2*, ..., *opN* are comparison operators, then "a op1 b op2 '
- 'c ... y\n'
+ '*op2*, …, *opN* are comparison operators, then "a op1 b op2 c '
+ '... y\n'
'opN z" is equivalent to "a op1 b and b op2 c and ... y opN '
'z", except\n'
'that each expression is evaluated at most once.\n'
'\n'
- 'Note that "a op1 b op2 c" doesn\'t imply any kind of '
+ 'Note that "a op1 b op2 c" doesn’t imply any kind of '
'comparison between\n'
'*a* and *c*, so that, e.g., "x < y > z" is perfectly legal '
'(though\n'
'rather\n'
'abstract notion in Python: For example, there is no canonical '
'access\n'
- "method for an object's value. Also, there is no requirement "
+ 'method for an object’s value. Also, there is no requirement '
'that the\n'
'value of an object should be constructed in a particular way, '
'e.g.\n'
'most\n'
'important built-in types.\n'
'\n'
- '* Numbers of built-in numeric types (Numeric Types --- int, '
+ '* Numbers of built-in numeric types (Numeric Types — int, '
'float,\n'
' complex) and of the standard library types '
'"fractions.Fraction" and\n'
'compound\n'
'statements.\n'
'\n'
- "A compound statement consists of one or more 'clauses.' A "
+ 'A compound statement consists of one or more ‘clauses.’ A '
'clause\n'
- "consists of a header and a 'suite.' The clause headers of a\n"
+ 'consists of a header and a ‘suite.’ The clause headers of a\n'
'particular compound statement are all at the same indentation '
'level.\n'
'Each clause header begins with a uniquely identifying keyword '
'with a colon. A suite is a group of statements controlled by a\n'
'clause. A suite can be one or more semicolon-separated simple\n'
'statements on the same line as the header, following the '
- "header's\n"
+ 'header’s\n'
'colon, or it can be one or more indented statements on '
'subsequent\n'
'lines. Only the latter form of a suite can contain nested '
'compound\n'
- "statements; the following is illegal, mostly because it wouldn't "
+ 'statements; the following is illegal, mostly because it wouldn’t '
'be\n'
'clear to which "if" clause a following "else" clause would '
'belong:\n'
'"DEDENT". Also note that optional continuation clauses always '
'begin\n'
'with a keyword that cannot start a statement, thus there are no\n'
- 'ambiguities (the \'dangling "else"\' problem is solved in Python '
+ 'ambiguities (the ‘dangling "else"’ problem is solved in Python '
'by\n'
'requiring nested "if" statements to be indented).\n'
'\n'
'\n'
'A "break" statement executed in the first suite terminates the '
'loop\n'
- 'without executing the "else" clause\'s suite. A "continue" '
+ 'without executing the "else" clause’s suite. A "continue" '
'statement\n'
'executed in the first suite skips the rest of the suite and goes '
'back\n'
'\n'
'A "break" statement executed in the first suite terminates the '
'loop\n'
- 'without executing the "else" clause\'s suite. A "continue" '
+ 'without executing the "else" clause’s suite. A "continue" '
'statement\n'
'executed in the first suite skips the rest of the suite and '
'continues\n'
'to at\n'
'all by the loop. Hint: the built-in function "range()" returns '
'an\n'
- "iterator of integers suitable to emulate the effect of Pascal's "
+ 'iterator of integers suitable to emulate the effect of Pascal’s '
'"for i\n'
':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, '
'2]".\n'
'expression\n'
'is evaluated, and the clause matches the exception if the '
'resulting\n'
- 'object is "compatible" with the exception. An object is '
+ 'object is “compatible” with the exception. An object is '
'compatible\n'
'with an exception if it is the class or a base class of the '
'exception\n'
'assigned to\n'
'the target specified after the "as" keyword in that except '
'clause, if\n'
- "present, and the except clause's suite is executed. All except\n"
+ 'present, and the except clause’s suite is executed. All except\n'
'clauses must have an executable block. When the end of this '
'block is\n'
'reached, execution continues normally after the entire try '
'alive\n'
'until the next garbage collection occurs.\n'
'\n'
- "Before an except clause's suite is executed, details about the\n"
+ 'Before an except clause’s suite is executed, details about the\n'
'exception are stored in the "sys" module and can be accessed '
'via\n'
'"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting '
'are\n'
'not handled by the preceding "except" clauses.\n'
'\n'
- 'If "finally" is present, it specifies a \'cleanup\' handler. '
- 'The "try"\n'
+ 'If "finally" is present, it specifies a ‘cleanup’ handler. The '
+ '"try"\n'
'clause is executed, including any "except" and "else" clauses. '
'If an\n'
'exception occurs in any of the clauses and is not handled, the\n'
'\n'
'When a "return", "break" or "continue" statement is executed in '
'the\n'
- '"try" suite of a "try"..."finally" statement, the "finally" '
- 'clause is\n'
- 'also executed \'on the way out.\' A "continue" statement is '
+ '"try" suite of a "try"…"finally" statement, the "finally" clause '
+ 'is\n'
+ 'also executed ‘on the way out.’ A "continue" statement is '
'illegal in\n'
'the "finally" clause. (The reason is a problem with the current\n'
- 'implementation --- this restriction may be lifted in the '
- 'future).\n'
+ 'implementation — this restriction may be lifted in the future).\n'
'\n'
'The return value of a function is determined by the last '
'"return"\n'
'with\n'
'methods defined by a context manager (see section With '
'Statement\n'
- 'Context Managers). This allows common '
- '"try"..."except"..."finally"\n'
- 'usage patterns to be encapsulated for convenient reuse.\n'
+ 'Context Managers). This allows common "try"…"except"…"finally" '
+ 'usage\n'
+ 'patterns to be encapsulated for convenient reuse.\n'
'\n'
' with_stmt ::= "with" with_item ("," with_item)* ":" suite\n'
' with_item ::= expression ["as" target]\n'
'\n'
- 'The execution of the "with" statement with one "item" proceeds '
+ 'The execution of the "with" statement with one “item” proceeds '
'as\n'
'follows:\n'
'\n'
'"with_item")\n'
' is evaluated to obtain a context manager.\n'
'\n'
- '2. The context manager\'s "__exit__()" is loaded for later use.\n'
+ '2. The context manager’s "__exit__()" is loaded for later use.\n'
'\n'
- '3. The context manager\'s "__enter__()" method is invoked.\n'
+ '3. The context manager’s "__enter__()" method is invoked.\n'
'\n'
'4. If a target was included in the "with" statement, the return\n'
' value from "__enter__()" is assigned to it.\n'
'\n'
'5. The suite is executed.\n'
'\n'
- '6. The context manager\'s "__exit__()" method is invoked. If '
- 'an\n'
+ '6. The context manager’s "__exit__()" method is invoked. If an\n'
' exception caused the suite to be exited, its type, value, '
'and\n'
' traceback are passed as arguments to "__exit__()". Otherwise, '
'\n'
'See also:\n'
'\n'
- ' **PEP 343** - The "with" statement\n'
+ ' **PEP 343** - The “with” statement\n'
' The specification, background, and examples for the Python '
'"with"\n'
' statement.\n'
'"func".\n'
'\n'
'When one or more *parameters* have the form *parameter* "="\n'
- '*expression*, the function is said to have "default parameter '
- 'values."\n'
+ '*expression*, the function is said to have “default parameter '
+ 'values.”\n'
'For a parameter with a default value, the corresponding '
'*argument* may\n'
- "be omitted from a call, in which case the parameter's default "
+ 'be omitted from a call, in which case the parameter’s default '
'value is\n'
'substituted. If a parameter has a default value, all following\n'
- 'parameters up until the ""*"" must also have a default value --- '
- 'this\n'
- 'is a syntactic restriction that is not expressed by the '
- 'grammar.\n'
+ 'parameters up until the “"*"” must also have a default value — '
+ 'this is\n'
+ 'a syntactic restriction that is not expressed by the grammar.\n'
'\n'
'**Default parameter values are evaluated from left to right when '
'the\n'
'function definition is executed.** This means that the '
'expression is\n'
'evaluated once, when the function is defined, and that the same '
- '"pre-\n'
- 'computed" value is used for each call. This is especially '
+ '“pre-\n'
+ 'computed” value is used for each call. This is especially '
'important\n'
'to understand when a default parameter is a mutable object, such '
'as a\n'
'mentioned in\n'
'the parameter list, either from position arguments, from '
'keyword\n'
- 'arguments, or from default values. If the form ""*identifier"" '
+ 'arguments, or from default values. If the form “"*identifier"” '
'is\n'
'present, it is initialized to a tuple receiving any excess '
'positional\n'
'parameters, defaulting to the empty tuple. If the form\n'
- '""**identifier"" is present, it is initialized to a new ordered\n'
+ '“"**identifier"” is present, it is initialized to a new ordered\n'
'mapping receiving any excess keyword arguments, defaulting to a '
'new\n'
- 'empty mapping of the same type. Parameters after ""*"" or\n'
- '""*identifier"" are keyword-only parameters and may only be '
+ 'empty mapping of the same type. Parameters after “"*"” or\n'
+ '“"*identifier"” are keyword-only parameters and may only be '
'passed\n'
'used keyword arguments.\n'
'\n'
- 'Parameters may have annotations of the form "": expression"" '
+ 'Parameters may have annotations of the form “": expression"” '
'following\n'
'the parameter name. Any parameter may have an annotation even '
'those\n'
'of the form "*identifier" or "**identifier". Functions may '
'have\n'
- '"return" annotation of the form ""-> expression"" after the '
+ '“return” annotation of the form “"-> expression"” after the '
'parameter\n'
'list. These annotations can be any valid Python expression and '
'are\n'
'code.\n'
'The presence of annotations does not change the semantics of a\n'
'function. The annotation values are available as values of a\n'
- "dictionary keyed by the parameters' names in the "
+ 'dictionary keyed by the parameters’ names in the '
'"__annotations__"\n'
'attribute of the function object.\n'
'\n'
'lambda\n'
'expression is merely a shorthand for a simplified function '
'definition;\n'
- 'a function defined in a ""def"" statement can be passed around '
+ 'a function defined in a “"def"” statement can be passed around '
'or\n'
'assigned to another name just like a function defined by a '
'lambda\n'
- 'expression. The ""def"" form is actually more powerful since '
+ 'expression. The “"def"” form is actually more powerful since '
'it\n'
'allows the execution of multiple statements and annotations.\n'
'\n'
- "**Programmer's note:** Functions are first-class objects. A "
- '""def""\n'
+ '**Programmer’s note:** Functions are first-class objects. A '
+ '“"def"”\n'
'statement executed inside a function definition defines a local\n'
'function that can be returned or passed around. Free variables '
'used\n'
' class Foo(object):\n'
' pass\n'
'\n'
- "The class's suite is then executed in a new execution frame "
+ 'The class’s suite is then executed in a new execution frame '
'(see\n'
'Naming and binding), using a newly created local namespace and '
'the\n'
'original global namespace. (Usually, the suite contains mostly\n'
- "function definitions.) When the class's suite finishes "
+ 'function definitions.) When the class’s suite finishes '
'execution, its\n'
'execution frame is discarded but its local namespace is saved. '
'[4] A\n'
'namespace.\n'
'\n'
'The order in which attributes are defined in the class body is\n'
- 'preserved in the new class\'s "__dict__". Note that this is '
+ 'preserved in the new class’s "__dict__". Note that this is '
'reliable\n'
'only right after the class is created and only for classes that '
'were\n'
'function decorators. The result is then bound to the class '
'name.\n'
'\n'
- "**Programmer's note:** Variables defined in the class definition "
+ '**Programmer’s note:** Variables defined in the class definition '
'are\n'
'class attributes; they are shared by instances. Instance '
'attributes\n'
'can be set in a method with "self.name = value". Both class '
'and\n'
'instance attributes are accessible through the notation '
- '""self.name"",\n'
+ '“"self.name"”,\n'
'and an instance attribute hides a class attribute with the same '
'name\n'
'when accessed in this way. Class attributes can be used as '
' exception. That new exception causes the old one to be '
'lost.\n'
'\n'
- '[2] Currently, control "flows off the end" except in the case '
+ '[2] Currently, control “flows off the end” except in the case '
'of\n'
' an exception or the execution of a "return", "continue", or\n'
' "break" statement.\n'
'\n'
'[3] A string literal appearing as the first statement in the\n'
- ' function body is transformed into the function\'s "__doc__"\n'
- " attribute and therefore the function's *docstring*.\n"
+ ' function body is transformed into the function’s "__doc__"\n'
+ ' attribute and therefore the function’s *docstring*.\n'
'\n'
'[4] A string literal appearing as the first statement in the '
'class\n'
- ' body is transformed into the namespace\'s "__doc__" item '
- 'and\n'
- " therefore the class's *docstring*.\n",
+ ' body is transformed into the namespace’s "__doc__" item and\n'
+ ' therefore the class’s *docstring*.\n',
'context-managers': 'With Statement Context Managers\n'
'*******************************\n'
'\n'
'\n'
' Enter the runtime context related to this object. The '
'"with"\n'
- " statement will bind this method's return value to the "
+ ' statement will bind this method’s return value to the '
'target(s)\n'
' specified in the "as" clause of the statement, if '
'any.\n'
'\n'
' Note that "__exit__()" methods should not reraise the '
'passed-in\n'
- " exception; this is the caller's responsibility.\n"
+ ' exception; this is the caller’s responsibility.\n'
'\n'
'See also:\n'
'\n'
- ' **PEP 343** - The "with" statement\n'
+ ' **PEP 343** - The “with” statement\n'
' The specification, background, and examples for the '
'Python "with"\n'
' statement.\n',
'\n'
'When a description of an arithmetic operator below uses the '
'phrase\n'
- '"the numeric arguments are converted to a common type," this '
+ '“the numeric arguments are converted to a common type,” this '
'means\n'
'that the operator implementation for built-in types works as '
'follows:\n'
'\n'
'Some additional rules apply for certain operators (e.g., a '
'string as a\n'
- "left argument to the '%' operator). Extensions must define "
+ 'left argument to the ‘%’ operator). Extensions must define '
'their own\n'
'conversion behavior.\n',
'customization': 'Basic customization\n'
'\n'
' Typical implementations create a new instance of the '
'class by\n'
- ' invoking the superclass\'s "__new__()" method using\n'
+ ' invoking the superclass’s "__new__()" method using\n'
' "super().__new__(cls[, ...])" with appropriate arguments '
'and then\n'
' modifying the newly-created instance as necessary before '
'\n'
' If "__new__()" returns an instance of *cls*, then the '
'new\n'
- ' instance\'s "__init__()" method will be invoked like\n'
+ ' instance’s "__init__()" method will be invoked like\n'
' "__init__(self[, ...])", where *self* is the new '
'instance and the\n'
' remaining arguments are the same as were passed to '
'\n'
' If "__new__()" does not return an instance of *cls*, '
'then the new\n'
- ' instance\'s "__init__()" method will not be invoked.\n'
+ ' instance’s "__init__()" method will not be invoked.\n'
'\n'
' "__new__()" is intended mainly to allow subclasses of '
'immutable\n'
'those\n'
' passed to the class constructor expression. If a base '
'class has an\n'
- ' "__init__()" method, the derived class\'s "__init__()" '
+ ' "__init__()" method, the derived class’s "__init__()" '
'method, if\n'
' any, must explicitly call it to ensure proper '
'initialization of the\n'
'is also\n'
' called a finalizer or (improperly) a destructor. If a '
'base class\n'
- ' has a "__del__()" method, the derived class\'s '
+ ' has a "__del__()" method, the derived class’s '
'"__del__()" method,\n'
' if any, must explicitly call it to ensure proper '
'deletion of the\n'
'for\n'
' objects that still exist when the interpreter exits.\n'
'\n'
- ' Note: "del x" doesn\'t directly call "x.__del__()" --- '
- 'the former\n'
+ ' Note: "del x" doesn’t directly call "x.__del__()" — the '
+ 'former\n'
' decrements the reference count for "x" by one, and the '
'latter is\n'
- ' only called when "x"\'s reference count reaches zero.\n'
+ ' only called when "x"’s reference count reaches zero.\n'
'\n'
' **CPython implementation detail:** It is possible for a '
'reference\n'
'reference\n'
' cycles is when an exception has been caught in a local '
'variable.\n'
- " The frame's locals then reference the exception, which "
+ ' The frame’s locals then reference the exception, which '
'references\n'
' its own traceback, which references the locals of all '
'frames caught\n'
'object.__repr__(self)\n'
'\n'
' Called by the "repr()" built-in function to compute the '
- '"official"\n'
+ '“official”\n'
' string representation of an object. If at all possible, '
'this\n'
' should look like a valid Python expression that could be '
' value must be a string object. If a class defines '
'"__repr__()" but\n'
' not "__str__()", then "__repr__()" is also used when an '
- '"informal"\n'
+ '“informal”\n'
' string representation of instances of that class is '
'required.\n'
'\n'
'\n'
' Called by "str(object)" and the built-in functions '
'"format()" and\n'
- ' "print()" to compute the "informal" or nicely printable '
+ ' "print()" to compute the “informal” or nicely printable '
'string\n'
' representation of an object. The return value must be a '
'string\n'
'extension,\n'
' evaluation of formatted string literals and the '
'"str.format()"\n'
- ' method, to produce a "formatted" string representation '
+ ' method, to produce a “formatted” string representation '
'of an\n'
' object. The "format_spec" argument is a string that '
'contains a\n'
'object.__gt__(self, other)\n'
'object.__ge__(self, other)\n'
'\n'
- ' These are the so-called "rich comparison" methods. The\n'
+ ' These are the so-called “rich comparison” methods. The\n'
' correspondence between operator symbols and method names '
'is as\n'
' follows: "x<y" calls "x.__lt__(y)", "x<=y" calls '
' when the left argument does not support the operation '
'but the right\n'
' argument does); rather, "__lt__()" and "__gt__()" are '
- "each other's\n"
- ' reflection, "__le__()" and "__ge__()" are each other\'s '
+ 'each other’s\n'
+ ' reflection, "__le__()" and "__ge__()" are each other’s '
'reflection,\n'
' and "__eq__()" and "__ne__()" are their own reflection. '
'If the\n'
- " operands are of different types, and right operand's "
+ ' operands are of different types, and right operand’s '
'type is a\n'
- " direct or indirect subclass of the left operand's type, "
+ ' direct or indirect subclass of the left operand’s type, '
'the\n'
' reflected method of the right operand has priority, '
'otherwise the\n'
- " left operand's method has priority. Virtual subclassing "
+ ' left operand’s method has priority. Virtual subclassing '
'is not\n'
' considered.\n'
'\n'
' return hash((self.name, self.nick, self.color))\n'
'\n'
' Note: "hash()" truncates the value returned from an '
- "object's\n"
+ 'object’s\n'
' custom "__hash__()" method to the size of a '
'"Py_ssize_t". This\n'
' is typically 8 bytes on 64-bit builds and 4 bytes on '
'32-bit\n'
- ' builds. If an object\'s "__hash__()" must '
+ ' builds. If an object’s "__hash__()" must '
'interoperate on builds\n'
' of different bit sizes, be sure to check the width on '
'all\n'
'implement\n'
' "__hash__()", since the implementation of hashable '
'collections\n'
- " requires that a key's hash value is immutable (if the "
- "object's hash\n"
+ ' requires that a key’s hash value is immutable (if the '
+ 'object’s hash\n'
' value changes, it will be in the wrong hash bucket).\n'
'\n'
' User-defined classes have "__eq__()" and "__hash__()" '
'\n'
' Note: By default, the "__hash__()" values of str, bytes '
'and\n'
- ' datetime objects are "salted" with an unpredictable '
+ ' datetime objects are “salted” with an unpredictable '
'random value.\n'
' Although they remain constant within an individual '
'Python\n'
' neither "__len__()" nor "__bool__()", all its instances '
'are\n'
' considered true.\n',
- 'debugger': '"pdb" --- The Python Debugger\n'
- '*****************************\n'
+ 'debugger': '"pdb" — The Python Debugger\n'
+ '***************************\n'
'\n'
'**Source code:** Lib/pdb.py\n'
'\n'
'debugging\n'
'and can be called under program control.\n'
'\n'
- 'The debugger is extensible -- it is actually defined as the '
+ 'The debugger is extensible – it is actually defined as the '
'class\n'
'"Pdb". This is currently undocumented but easily understood by '
'reading\n'
'the source. The extension interface uses the modules "bdb" and '
'"cmd".\n'
'\n'
- 'The debugger\'s prompt is "(Pdb)". Typical usage to run a '
- 'program under\n'
+ 'The debugger’s prompt is "(Pdb)". Typical usage to run a program '
+ 'under\n'
'control of the debugger is:\n'
'\n'
' >>> import pdb\n'
'post-\n'
'mortem debugging (or after normal exit of the program), pdb '
'will\n'
- "restart the program. Automatic restarting preserves pdb's state "
+ 'restart the program. Automatic restarting preserves pdb’s state '
'(such\n'
'as breakpoints) and in most cases is more useful than quitting '
'the\n'
- "debugger upon program's exit.\n"
+ 'debugger upon program’s exit.\n'
'\n'
'New in version 3.2: "pdb.py" now accepts a "-c" option that '
'executes\n'
'the last command was a "list" command, the next 11 lines are '
'listed.\n'
'\n'
- "Commands that the debugger doesn't recognize are assumed to be "
+ 'Commands that the debugger doesn’t recognize are assumed to be '
'Python\n'
'statements and are executed in the context of the program being\n'
'debugged. Python statements can also be prefixed with an '
'function.\n'
'When an exception occurs in such a statement, the exception name '
'is\n'
- "printed but the debugger's state is not changed.\n"
+ 'printed but the debugger’s state is not changed.\n'
'\n'
'The debugger supports aliases. Aliases can have parameters '
'which\n'
'first\n'
'";;" pair, even if it is in the middle of a quoted string.\n'
'\n'
- 'If a file ".pdbrc" exists in the user\'s home directory or in '
+ 'If a file ".pdbrc" exists in the user’s home directory or in '
'the\n'
'current directory, it is read in and executed as if it had been '
'typed\n'
'prefixed\n'
' with a filename and a colon, to specify a breakpoint in '
'another\n'
- " file (probably one that hasn't been loaded yet). The file "
+ ' file (probably one that hasn’t been loaded yet). The file '
'is\n'
' searched on "sys.path". Note that each breakpoint is '
'assigned a\n'
'which\n'
' list to execute.\n'
'\n'
- " If you use the 'silent' command in the command list, the "
+ ' If you use the ‘silent’ command in the command list, the '
'usual\n'
' message about stopping at a breakpoint is not printed. This '
'may be\n'
'the\n'
' bottom-most frame. This lets you jump back and execute code '
'again,\n'
- " or jump forward to skip code that you don't want to run.\n"
+ ' or jump forward to skip code that you don’t want to run.\n'
'\n'
- ' It should be noted that not all jumps are allowed -- for '
- 'instance\n'
- ' it is not possible to jump into the middle of a "for" loop or '
- 'out\n'
- ' of a "finally" clause.\n'
+ ' It should be noted that not all jumps are allowed – for '
+ 'instance it\n'
+ ' is not possible to jump into the middle of a "for" loop or '
+ 'out of a\n'
+ ' "finally" clause.\n'
'\n'
'l(ist) [first[, last]]\n'
'\n'
' value.\n'
'\n'
' Note: "print()" can also be used, but is not a debugger '
- 'command\n'
- ' --- this executes the Python "print()" function.\n'
+ 'command —\n'
+ ' this executes the Python "print()" function.\n'
'\n'
'pp expression\n'
'\n'
'dictionary:\n'
'each key object is used as a key into the dictionary to store the\n'
'corresponding datum. This means that you can specify the same key\n'
- "multiple times in the key/datum list, and the final dictionary's "
+ 'multiple times in the key/datum list, and the final dictionary’s '
'value\n'
'for that key will be the last one given.\n'
'\n'
'\n'
'A dict comprehension, in contrast to list and set comprehensions,\n'
'needs two expressions separated with a colon followed by the usual\n'
- '"for" and "if" clauses. When the comprehension is run, the '
+ '“for” and “if” clauses. When the comprehension is run, the '
'resulting\n'
'key and value elements are inserted in the new dictionary in the '
'order\n'
'error (such as division by zero). A Python program can also\n'
'explicitly raise an exception with the "raise" statement. '
'Exception\n'
- 'handlers are specified with the "try" ... "except" statement. '
+ 'handlers are specified with the "try" … "except" statement. '
'The\n'
'"finally" clause of such a statement can be used to specify '
'cleanup\n'
'whether an\n'
'exception occurred or not in the preceding code.\n'
'\n'
- 'Python uses the "termination" model of error handling: an '
+ 'Python uses the “termination” model of error handling: an '
'exception\n'
'handler can find out what happened and continue execution at '
'an outer\n'
'argument to the interpreter) is a code block. A script command '
'(a\n'
'command specified on the interpreter command line with the '
- "'**-c**'\n"
+ '‘**-c**’\n'
'option) is a code block. The string argument passed to the '
'built-in\n'
'functions "eval()" and "exec()" is a code block.\n'
'contains\n'
'some administrative information (used for debugging) and '
'determines\n'
- "where and how execution continues after the code block's "
+ 'where and how execution continues after the code block’s '
'execution has\n'
'completed.\n'
'\n'
'nearest\n'
'enclosing scope. The set of all such scopes visible to a code '
'block\n'
- "is called the block's *environment*.\n"
+ 'is called the block’s *environment*.\n'
'\n'
'When a name is not found at all, a "NameError" exception is '
'raised. If\n'
'the class. The scope of names defined in a class block is '
'limited to\n'
'the class block; it does not extend to the code blocks of '
- 'methods --\n'
+ 'methods –\n'
'this includes comprehensions and generator expressions since '
'they are\n'
'implemented using a function scope. This means that the '
'global\n'
'namespace; this should be a dictionary or a module (in the '
'latter case\n'
- "the module's dictionary is used). By default, when in the "
+ 'the module’s dictionary is used). By default, when in the '
'"__main__"\n'
'module, "__builtins__" is the built-in module "builtins"; when '
'in any\n'
'error (such as division by zero). A Python program can also\n'
'explicitly raise an exception with the "raise" statement. '
'Exception\n'
- 'handlers are specified with the "try" ... "except" statement. '
+ 'handlers are specified with the "try" … "except" statement. '
'The\n'
'"finally" clause of such a statement can be used to specify '
'cleanup\n'
'whether an\n'
'exception occurred or not in the preceding code.\n'
'\n'
- 'Python uses the "termination" model of error handling: an '
+ 'Python uses the “termination” model of error handling: an '
'exception\n'
'handler can find out what happened and continue execution at an '
'outer\n'
'(a.k.a. a\n'
'*singleton*); it is optional in all other cases. A single '
'expression\n'
- "without a trailing comma doesn't create a tuple, but rather "
+ 'without a trailing comma doesn’t create a tuple, but rather '
'yields the\n'
'value of that expression. (To create an empty tuple, use an '
'empty pair\n'
'terminates.\n'
'\n'
'A "break" statement executed in the first suite terminates the loop\n'
- 'without executing the "else" clause\'s suite. A "continue" '
- 'statement\n'
+ 'without executing the "else" clause’s suite. A "continue" statement\n'
'executed in the first suite skips the rest of the suite and '
'continues\n'
'with the next item, or with the "else" clause if there is no next\n'
'Names in the target list are not deleted when the loop is finished,\n'
'but if the sequence is empty, they will not have been assigned to at\n'
'all by the loop. Hint: the built-in function "range()" returns an\n'
- 'iterator of integers suitable to emulate the effect of Pascal\'s "for '
+ 'iterator of integers suitable to emulate the effect of Pascal’s "for '
'i\n'
':= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n'
'\n'
'are\n'
'differences.\n'
'\n'
- 'Format strings contain "replacement fields" surrounded by '
+ 'Format strings contain “replacement fields” surrounded by '
'curly braces\n'
'"{}". Anything that is not contained in braces is '
'considered literal\n'
'\n'
'The *field_name* itself begins with an *arg_name* that is '
'either a\n'
- "number or a keyword. If it's a number, it refers to a "
+ 'number or a keyword. If it’s a number, it refers to a '
'positional\n'
- "argument, and if it's a keyword, it refers to a named "
+ 'argument, and if it’s a keyword, it refers to a named '
'keyword\n'
'argument. If the numerical arg_names in a format string '
'are 0, 1, 2,\n'
- '... in sequence, they can all be omitted (not just some) '
- 'and the\n'
- 'numbers 0, 1, 2, ... will be automatically inserted in that '
- 'order.\n'
- 'Because *arg_name* is not quote-delimited, it is not '
- 'possible to\n'
- 'specify arbitrary dictionary keys (e.g., the strings '
- '"\'10\'" or\n'
- '"\':-]\'") within a format string. The *arg_name* can be '
- 'followed by any\n'
- 'number of index or attribute expressions. An expression of '
- 'the form\n'
- '"\'.name\'" selects the named attribute using "getattr()", '
- 'while an\n'
- 'expression of the form "\'[index]\'" does an index lookup '
- 'using\n'
- '"__getitem__()".\n'
+ '… in sequence, they can all be omitted (not just some) and '
+ 'the numbers\n'
+ '0, 1, 2, … will be automatically inserted in that order. '
+ 'Because\n'
+ '*arg_name* is not quote-delimited, it is not possible to '
+ 'specify\n'
+ 'arbitrary dictionary keys (e.g., the strings "\'10\'" or '
+ '"\':-]\'") within\n'
+ 'a format string. The *arg_name* can be followed by any '
+ 'number of index\n'
+ 'or attribute expressions. An expression of the form '
+ '"\'.name\'" selects\n'
+ 'the named attribute using "getattr()", while an expression '
+ 'of the form\n'
+ '"\'[index]\'" does an index lookup using "__getitem__()".\n'
'\n'
'Changed in version 3.1: The positional argument specifiers '
'can be\n'
'alignment,\n'
'padding, decimal precision and so on. Each value type can '
'define its\n'
- 'own "formatting mini-language" or interpretation of the '
+ 'own “formatting mini-language” or interpretation of the '
'*format_spec*.\n'
'\n'
'Most built-in types support a common formatting '
'Format Specification Mini-Language\n'
'==================================\n'
'\n'
- '"Format specifications" are used within replacement fields '
+ '“Format specifications” are used within replacement fields '
'contained\n'
'within a format string to define how individual values are '
'presented\n'
'character that can be any character and defaults to a space '
'if\n'
'omitted. It is not possible to use a literal curly brace '
- '(""{"" or\n'
- '""}"") as the *fill* character in a formatted string '
+ '(“"{"” or\n'
+ '“"}"”) as the *fill* character in a formatted string '
'literal or when\n'
'using the "str.format()" method. However, it is possible '
'to insert a\n'
'curly brace with a nested replacement field. This '
- "limitation doesn't\n"
+ 'limitation doesn’t\n'
'affect the "format()" function.\n'
'\n'
'The meaning of the various alignment options is as '
'the sign (if any) |\n'
' | | but before the digits. This is used for '
'printing fields |\n'
- " | | in the form '+000000120'. This alignment "
+ ' | | in the form ‘+000000120’. This alignment '
'option is only |\n'
' | | valid for numeric types. It becomes the '
- "default when '0' |\n"
+ 'default when ‘0’ |\n'
' | | immediately precedes the field '
'width. |\n'
' '
' '
'+-----------+------------------------------------------------------------+\n'
'\n'
- 'The "\'#\'" option causes the "alternate form" to be used '
+ 'The "\'#\'" option causes the “alternate form” to be used '
'for the\n'
'conversion. The alternate form is defined differently for '
'different\n'
'+===========+============================================================+\n'
' | "\'e\'" | Exponent notation. Prints the number in '
'scientific |\n'
- " | | notation using the letter 'e' to indicate "
+ ' | | notation using the letter ‘e’ to indicate '
'the exponent. |\n'
' | | The default precision is '
'"6". |\n'
'+-----------+------------------------------------------------------------+\n'
' | "\'E\'" | Exponent notation. Same as "\'e\'" '
'except it uses an upper |\n'
- " | | case 'E' as the separator "
+ ' | | case ‘E’ as the separator '
'character. |\n'
' '
'+-----------+------------------------------------------------------------+\n'
"{longitude}'.format(**coord)\n"
" 'Coordinates: 37.24N, -115.81W'\n"
'\n'
- "Accessing arguments' attributes:\n"
+ 'Accessing arguments’ attributes:\n'
'\n'
' >>> c = 3-5j\n'
" >>> ('The complex number {0} is formed from the real "
' >>> str(Point(4, 2))\n'
" 'Point(4, 2)'\n"
'\n'
- "Accessing arguments' items:\n"
+ 'Accessing arguments’ items:\n'
'\n'
' >>> coord = (3, 5)\n'
" >>> 'X: {0[0]}; Y: {0[1]}'.format(coord)\n"
'"func".\n'
'\n'
'When one or more *parameters* have the form *parameter* "="\n'
- '*expression*, the function is said to have "default parameter '
- 'values."\n'
+ '*expression*, the function is said to have “default parameter '
+ 'values.”\n'
'For a parameter with a default value, the corresponding '
'*argument* may\n'
- "be omitted from a call, in which case the parameter's default "
+ 'be omitted from a call, in which case the parameter’s default '
'value is\n'
'substituted. If a parameter has a default value, all following\n'
- 'parameters up until the ""*"" must also have a default value --- '
- 'this\n'
- 'is a syntactic restriction that is not expressed by the '
- 'grammar.\n'
+ 'parameters up until the “"*"” must also have a default value — '
+ 'this is\n'
+ 'a syntactic restriction that is not expressed by the grammar.\n'
'\n'
'**Default parameter values are evaluated from left to right when '
'the\n'
'function definition is executed.** This means that the '
'expression is\n'
'evaluated once, when the function is defined, and that the same '
- '"pre-\n'
- 'computed" value is used for each call. This is especially '
+ '“pre-\n'
+ 'computed” value is used for each call. This is especially '
'important\n'
'to understand when a default parameter is a mutable object, such '
'as a\n'
'mentioned in\n'
'the parameter list, either from position arguments, from '
'keyword\n'
- 'arguments, or from default values. If the form ""*identifier"" '
+ 'arguments, or from default values. If the form “"*identifier"” '
'is\n'
'present, it is initialized to a tuple receiving any excess '
'positional\n'
'parameters, defaulting to the empty tuple. If the form\n'
- '""**identifier"" is present, it is initialized to a new ordered\n'
+ '“"**identifier"” is present, it is initialized to a new ordered\n'
'mapping receiving any excess keyword arguments, defaulting to a '
'new\n'
- 'empty mapping of the same type. Parameters after ""*"" or\n'
- '""*identifier"" are keyword-only parameters and may only be '
+ 'empty mapping of the same type. Parameters after “"*"” or\n'
+ '“"*identifier"” are keyword-only parameters and may only be '
'passed\n'
'used keyword arguments.\n'
'\n'
- 'Parameters may have annotations of the form "": expression"" '
+ 'Parameters may have annotations of the form “": expression"” '
'following\n'
'the parameter name. Any parameter may have an annotation even '
'those\n'
'of the form "*identifier" or "**identifier". Functions may '
'have\n'
- '"return" annotation of the form ""-> expression"" after the '
+ '“return” annotation of the form “"-> expression"” after the '
'parameter\n'
'list. These annotations can be any valid Python expression and '
'are\n'
'code.\n'
'The presence of annotations does not change the semantics of a\n'
'function. The annotation values are available as values of a\n'
- "dictionary keyed by the parameters' names in the "
+ 'dictionary keyed by the parameters’ names in the '
'"__annotations__"\n'
'attribute of the function object.\n'
'\n'
'lambda\n'
'expression is merely a shorthand for a simplified function '
'definition;\n'
- 'a function defined in a ""def"" statement can be passed around '
+ 'a function defined in a “"def"” statement can be passed around '
'or\n'
'assigned to another name just like a function defined by a '
'lambda\n'
- 'expression. The ""def"" form is actually more powerful since '
+ 'expression. The “"def"” form is actually more powerful since '
'it\n'
'allows the execution of multiple statements and annotations.\n'
'\n'
- "**Programmer's note:** Functions are first-class objects. A "
- '""def""\n'
+ '**Programmer’s note:** Functions are first-class objects. A '
+ '“"def"”\n'
'statement executed inside a function definition defines a local\n'
'function that can be returned or passed around. Free variables '
'used\n'
'change\n'
'the meaning of the program.\n'
'\n'
- '**Programmer\'s note:** "global" is a directive to the parser. '
- 'It\n'
+ '**Programmer’s note:** "global" is a directive to the parser. It\n'
'applies only to code parsed at the same time as the "global"\n'
'statement. In particular, a "global" statement contained in a '
'string\n'
'within the\n'
' context of a class definition, are re-written to use a '
'mangled form\n'
- ' to help avoid name clashes between "private" attributes of '
+ ' to help avoid name clashes between “private” attributes of '
'base and\n'
' derived classes. See section Identifiers (Names).\n',
'identifiers': 'Identifiers and keywords\n'
'within the\n'
' context of a class definition, are re-written to use a '
'mangled form\n'
- ' to help avoid name clashes between "private" attributes of '
+ ' to help avoid name clashes between “private” attributes of '
'base and\n'
' derived classes. See section Identifiers (Names).\n',
'if': 'The "if" statement\n'
'either\n'
'that the module could not be located, *or* that an error occurred\n'
'while initializing the module, which includes execution of the\n'
- "module's code.\n"
+ 'module’s code.\n'
'\n'
'If the requested module is retrieved successfully, it will be '
'made\n'
'\n'
'* If no other name is specified, and the module being imported is '
'a\n'
- " top level module, the module's name is bound in the local "
+ ' top level module, the module’s name is bound in the local '
'namespace\n'
' as a reference to the imported module\n'
'\n'
'\n'
'The *public names* defined by a module are determined by checking '
'the\n'
- 'module\'s namespace for a variable named "__all__"; if defined, it '
+ 'module’s namespace for a variable named "__all__"; if defined, it '
'must\n'
'be a sequence of strings which are names defined or imported by '
'that\n'
'and\n'
'are required to exist. If "__all__" is not defined, the set of '
'public\n'
- "names includes all names found in the module's namespace which do "
+ 'names includes all names found in the module’s namespace which do '
'not\n'
'begin with an underscore character ("\'_\'"). "__all__" should '
'contain\n'
'were\n'
'imported and used within the module).\n'
'\n'
- 'The wild card form of import --- "from module import *" --- is '
- 'only\n'
+ 'The wild card form of import — "from module import *" — is only\n'
'allowed at the module level. Attempting to use it in class or\n'
'function definitions will raise a "SyntaxError".\n'
'\n'
'\n'
' import __future__ [as name]\n'
'\n'
- "That is not a future statement; it's an ordinary import statement "
+ 'That is not a future statement; it’s an ordinary import statement '
'with\n'
'no special semantics or syntax restrictions.\n'
'\n'
'the\n'
'future statement. This can be controlled by optional arguments '
'to\n'
- '"compile()" --- see the documentation of that function for '
- 'details.\n'
+ '"compile()" — see the documentation of that function for details.\n'
'\n'
'A future statement typed at an interactive interpreter prompt '
'will\n'
'\n'
'Lambda expressions (sometimes called lambda forms) are used to '
'create\n'
- 'anonymous functions. The expression "lambda arguments: '
+ 'anonymous functions. The expression "lambda parameters: '
'expression"\n'
'yields a function object. The unnamed object behaves like a '
'function\n'
'object defined with:\n'
'\n'
- ' def <lambda>(arguments):\n'
+ ' def <lambda>(parameters):\n'
' return expression\n'
'\n'
'See section Function definitions for the syntax of parameter '
'nearest\n'
'enclosing scope. The set of all such scopes visible to a code '
'block\n'
- "is called the block's *environment*.\n"
+ 'is called the block’s *environment*.\n'
'\n'
'When a name is not found at all, a "NameError" exception is '
'raised. If\n'
'the class. The scope of names defined in a class block is limited '
'to\n'
'the class block; it does not extend to the code blocks of methods '
- '--\n'
+ '–\n'
'this includes comprehensions and generator expressions since they '
'are\n'
'implemented using a function scope. This means that the '
'global\n'
'namespace; this should be a dictionary or a module (in the latter '
'case\n'
- "the module's dictionary is used). By default, when in the "
+ 'the module’s dictionary is used). By default, when in the '
'"__main__"\n'
'module, "__builtins__" is the built-in module "builtins"; when in '
'any\n'
'\n'
'Note that numeric literals do not include a sign; a phrase like '
'"-1"\n'
- 'is actually an expression composed of the unary operator \'"-"\' '
+ 'is actually an expression composed of the unary operator ‘"-"‘ '
'and the\n'
'literal "1".\n',
'numeric-types': 'Emulating numeric types\n'
'"__rpow__()" (the\n'
' coercion rules would become too complicated).\n'
'\n'
- " Note: If the right operand's type is a subclass of the "
+ ' Note: If the right operand’s type is a subclass of the '
'left\n'
- " operand's type and that subclass provides the "
+ ' operand’s type and that subclass provides the '
'reflected method\n'
' for the operation, this method will be called before '
'the left\n'
- " operand's non-reflected method. This behavior allows "
+ ' operand’s non-reflected method. This behavior allows '
'subclasses\n'
- " to override their ancestors' operations.\n"
+ ' to override their ancestors’ operations.\n'
'\n'
'object.__iadd__(self, other)\n'
'object.__isub__(self, other)\n'
'certain\n'
' situations, augmented assignment can result in '
'unexpected errors\n'
- " (see Why does a_tuple[i] += ['item'] raise an exception "
+ ' (see Why does a_tuple[i] += [‘item’] raise an exception '
'when the\n'
' addition works?), but this behavior is in fact part of '
'the data\n'
'objects': 'Objects, values and types\n'
'*************************\n'
'\n'
- "*Objects* are Python's abstraction for data. All data in a "
+ '*Objects* are Python’s abstraction for data. All data in a '
'Python\n'
'program is represented by objects or by relations between '
'objects. (In\n'
- 'a sense, and in conformance to Von Neumann\'s model of a "stored\n'
- 'program computer," code is also represented by objects.)\n'
+ 'a sense, and in conformance to Von Neumann’s model of a “stored\n'
+ 'program computer,” code is also represented by objects.)\n'
'\n'
- "Every object has an identity, a type and a value. An object's\n"
+ 'Every object has an identity, a type and a value. An object’s\n'
'*identity* never changes once it has been created; you may think '
'of it\n'
- 'as the object\'s address in memory. The \'"is"\' operator '
- 'compares the\n'
+ 'as the object’s address in memory. The ‘"is"’ operator compares '
+ 'the\n'
'identity of two objects; the "id()" function returns an integer\n'
'representing its identity.\n'
'\n'
'memory\n'
'address where "x" is stored.\n'
'\n'
- "An object's type determines the operations that the object "
+ 'An object’s type determines the operations that the object '
'supports\n'
- '(e.g., "does it have a length?") and also defines the possible '
+ '(e.g., “does it have a length?”) and also defines the possible '
'values\n'
'for objects of that type. The "type()" function returns an '
- "object's\n"
+ 'object’s\n'
'type (which is an object itself). Like its identity, an '
- "object's\n"
+ 'object’s\n'
'*type* is also unchangeable. [1]\n'
'\n'
'The *value* of some objects can change. Objects whose value can\n'
'once they are created are called *immutable*. (The value of an\n'
'immutable container object that contains a reference to a '
'mutable\n'
- "object can change when the latter's value is changed; however "
+ 'object can change when the latter’s value is changed; however '
'the\n'
'container is still considered immutable, because the collection '
'of\n'
'objects it contains cannot be changed. So, immutability is not\n'
'strictly the same as having an unchangeable value, it is more '
'subtle.)\n'
- "An object's mutability is determined by its type; for instance,\n"
+ 'An object’s mutability is determined by its type; for instance,\n'
'numbers, strings and tuples are immutable, while dictionaries '
'and\n'
'lists are mutable.\n'
'Objects are never explicitly destroyed; however, when they '
'become\n'
'unreachable they may be garbage-collected. An implementation is\n'
- 'allowed to postpone garbage collection or omit it altogether --- '
- 'it is\n'
- 'a matter of implementation quality how garbage collection is\n'
+ 'allowed to postpone garbage collection or omit it altogether — it '
+ 'is a\n'
+ 'matter of implementation quality how garbage collection is\n'
'implemented, as long as no objects are collected that are still\n'
'reachable.\n'
'\n'
'(so\n'
'you should always close files explicitly).\n'
'\n'
- "Note that the use of the implementation's tracing or debugging\n"
+ 'Note that the use of the implementation’s tracing or debugging\n'
'facilities may keep objects alive that would normally be '
'collectable.\n'
- 'Also note that catching an exception with a \'"try"..."except"\'\n'
- 'statement may keep objects alive.\n'
+ 'Also note that catching an exception with a ‘"try"…"except"’ '
+ 'statement\n'
+ 'may keep objects alive.\n'
'\n'
- 'Some objects contain references to "external" resources such as '
+ 'Some objects contain references to “external” resources such as '
'open\n'
'files or windows. It is understood that these resources are '
'freed\n'
'release the external resource, usually a "close()" method. '
'Programs\n'
'are strongly recommended to explicitly close such objects. The\n'
- '\'"try"..."finally"\' statement and the \'"with"\' statement '
- 'provide\n'
+ '‘"try"…"finally"’ statement and the ‘"with"’ statement provide\n'
'convenient ways to do this.\n'
'\n'
'Some objects contain references to other objects; these are '
'called\n'
'*containers*. Examples of containers are tuples, lists and\n'
- "dictionaries. The references are part of a container's value. "
+ 'dictionaries. The references are part of a container’s value. '
'In\n'
'most cases, when we talk about the value of a container, we imply '
'the\n'
'| "lambda" | '
'Lambda expression |\n'
'+-------------------------------------------------+---------------------------------------+\n'
- '| "if" -- "else" | '
+ '| "if" – "else" | '
'Conditional expression |\n'
'+-------------------------------------------------+---------------------------------------+\n'
'| "or" | '
'application.\n'
'\n'
'[2] If x is very close to an exact integer multiple of '
- "y, it's\n"
+ 'y, it’s\n'
' possible for "x//y" to be one larger than '
'"(x-x%y)//y" due to\n'
' rounding. In such cases, Python returns the latter '
'\n'
'[3] The Unicode standard distinguishes between *code '
'points* (e.g.\n'
- ' U+0041) and *abstract characters* (e.g. "LATIN '
- 'CAPITAL LETTER A").\n'
+ ' U+0041) and *abstract characters* (e.g. “LATIN '
+ 'CAPITAL LETTER A”).\n'
' While most abstract characters in Unicode are only '
'represented\n'
' using one code point, there is a number of abstract '
' that can in addition be represented using a sequence '
'of more than\n'
' one code point. For example, the abstract character '
- '"LATIN\n'
- ' CAPITAL LETTER C WITH CEDILLA" can be represented as '
+ '“LATIN\n'
+ ' CAPITAL LETTER C WITH CEDILLA” can be represented as '
'a single\n'
' *precomposed character* at code position U+00C7, or '
'as a sequence\n'
'to humans. For\n'
' example, ""\\u00C7" == "\\u0043\\u0327"" is "False", '
'even though both\n'
- ' strings represent the same abstract character "LATIN '
+ ' strings represent the same abstract character “LATIN '
'CAPITAL\n'
- ' LETTER C WITH CEDILLA".\n'
+ ' LETTER C WITH CEDILLA”.\n'
'\n'
' To compare strings at the level of abstract '
'characters (that is,\n'
'\n'
' pass_stmt ::= "pass"\n'
'\n'
- '"pass" is a null operation --- when it is executed, nothing '
- 'happens.\n'
- 'It is useful as a placeholder when a statement is required\n'
- 'syntactically, but no code needs to be executed, for example:\n'
+ '"pass" is a null operation — when it is executed, nothing happens. '
+ 'It\n'
+ 'is useful as a placeholder when a statement is required '
+ 'syntactically,\n'
+ 'but no code needs to be executed, for example:\n'
'\n'
' def f(arg): pass # a function that does nothing (yet)\n'
'\n'
'"BaseException". If it is a class, the exception instance will be\n'
'obtained when needed by instantiating the class with no arguments.\n'
'\n'
- "The *type* of the exception is the exception instance's class, the\n"
+ 'The *type* of the exception is the exception instance’s class, the\n'
'*value* is the instance itself.\n'
'\n'
'A traceback object is normally created automatically when an '
'inside\n'
'an exception handler or a "finally" clause: the previous exception '
'is\n'
- 'then attached as the new exception\'s "__context__" attribute:\n'
+ 'then attached as the new exception’s "__context__" attribute:\n'
'\n'
' >>> try:\n'
' ... print(1 / 0)\n'
'"clear()",\n'
'"setdefault()", "pop()", "popitem()", "copy()", and '
'"update()"\n'
- "behaving similar to those for Python's standard dictionary "
+ 'behaving similar to those for Python’s standard dictionary '
'objects.\n'
'The "collections" module provides a "MutableMapping" '
'abstract base\n'
'the\n'
'"__contains__()" method to allow efficient use of the "in" '
'operator;\n'
- 'for mappings, "in" should search the mapping\'s keys; for '
+ 'for mappings, "in" should search the mapping’s keys; for '
'sequences, it\n'
'should search through the values. It is further '
'recommended that both\n'
'Should return\n'
' the length of the object, an integer ">=" 0. Also, an '
'object that\n'
- ' doesn\'t define a "__bool__()" method and whose '
+ ' doesn’t define a "__bool__()" method and whose '
'"__len__()" method\n'
' returns zero is considered to be false in a Boolean '
'context.\n'
'values or\n'
' the key-item pairs.\n'
'\n'
- ' For objects that don\'t define "__contains__()", the '
+ ' For objects that don’t define "__contains__()", the '
'membership test\n'
' first tries iteration via "__iter__()", then the old '
'sequence\n'
'object.__dict__\n'
'\n'
' A dictionary or other mapping object used to store an '
- "object's\n"
+ 'object’s\n'
' (writable) attributes.\n'
'\n'
'instance.__class__\n'
'to\n'
' "[1.0, 2.0]", and similarly for tuples.\n'
'\n'
- "[3] They must have since the parser can't tell the type of "
+ '[3] They must have since the parser can’t tell the type of '
'the\n'
' operands.\n'
'\n'
'[4] Cased characters are those with general category '
'property\n'
- ' being one of "Lu" (Letter, uppercase), "Ll" (Letter, '
+ ' being one of “Lu” (Letter, uppercase), “Ll” (Letter, '
'lowercase),\n'
- ' or "Lt" (Letter, titlecase).\n'
+ ' or “Lt” (Letter, titlecase).\n'
'\n'
'[5] To format only a tuple you should therefore provide a\n'
' singleton tuple whose only element is the tuple to be '
'special\n'
'syntax (such as arithmetic operations or subscripting and '
'slicing) by\n'
- "defining methods with special names. This is Python's "
+ 'defining methods with special names. This is Python’s '
'approach to\n'
'*operator overloading*, allowing classes to define their own '
'behavior\n'
'elements, but\n'
'extracting a slice may not make sense. (One example of this '
'is the\n'
- '"NodeList" interface in the W3C\'s Document Object Model.)\n'
+ '"NodeList" interface in the W3C’s Document Object Model.)\n'
'\n'
'\n'
'Basic customization\n'
'\n'
' Typical implementations create a new instance of the '
'class by\n'
- ' invoking the superclass\'s "__new__()" method using\n'
+ ' invoking the superclass’s "__new__()" method using\n'
' "super().__new__(cls[, ...])" with appropriate arguments '
'and then\n'
' modifying the newly-created instance as necessary before '
'\n'
' If "__new__()" returns an instance of *cls*, then the '
'new\n'
- ' instance\'s "__init__()" method will be invoked like\n'
+ ' instance’s "__init__()" method will be invoked like\n'
' "__init__(self[, ...])", where *self* is the new instance '
'and the\n'
' remaining arguments are the same as were passed to '
'\n'
' If "__new__()" does not return an instance of *cls*, then '
'the new\n'
- ' instance\'s "__init__()" method will not be invoked.\n'
+ ' instance’s "__init__()" method will not be invoked.\n'
'\n'
' "__new__()" is intended mainly to allow subclasses of '
'immutable\n'
'those\n'
' passed to the class constructor expression. If a base '
'class has an\n'
- ' "__init__()" method, the derived class\'s "__init__()" '
+ ' "__init__()" method, the derived class’s "__init__()" '
'method, if\n'
' any, must explicitly call it to ensure proper '
'initialization of the\n'
'is also\n'
' called a finalizer or (improperly) a destructor. If a '
'base class\n'
- ' has a "__del__()" method, the derived class\'s '
- '"__del__()" method,\n'
+ ' has a "__del__()" method, the derived class’s "__del__()" '
+ 'method,\n'
' if any, must explicitly call it to ensure proper deletion '
'of the\n'
' base class part of the instance.\n'
'for\n'
' objects that still exist when the interpreter exits.\n'
'\n'
- ' Note: "del x" doesn\'t directly call "x.__del__()" --- '
- 'the former\n'
+ ' Note: "del x" doesn’t directly call "x.__del__()" — the '
+ 'former\n'
' decrements the reference count for "x" by one, and the '
'latter is\n'
- ' only called when "x"\'s reference count reaches zero.\n'
+ ' only called when "x"’s reference count reaches zero.\n'
'\n'
' **CPython implementation detail:** It is possible for a '
'reference\n'
'reference\n'
' cycles is when an exception has been caught in a local '
'variable.\n'
- " The frame's locals then reference the exception, which "
+ ' The frame’s locals then reference the exception, which '
'references\n'
' its own traceback, which references the locals of all '
'frames caught\n'
'object.__repr__(self)\n'
'\n'
' Called by the "repr()" built-in function to compute the '
- '"official"\n'
+ '“official”\n'
' string representation of an object. If at all possible, '
'this\n'
' should look like a valid Python expression that could be '
' value must be a string object. If a class defines '
'"__repr__()" but\n'
' not "__str__()", then "__repr__()" is also used when an '
- '"informal"\n'
+ '“informal”\n'
' string representation of instances of that class is '
'required.\n'
'\n'
'\n'
' Called by "str(object)" and the built-in functions '
'"format()" and\n'
- ' "print()" to compute the "informal" or nicely printable '
+ ' "print()" to compute the “informal” or nicely printable '
'string\n'
' representation of an object. The return value must be a '
'string\n'
'extension,\n'
' evaluation of formatted string literals and the '
'"str.format()"\n'
- ' method, to produce a "formatted" string representation of '
+ ' method, to produce a “formatted” string representation of '
'an\n'
' object. The "format_spec" argument is a string that '
'contains a\n'
'object.__gt__(self, other)\n'
'object.__ge__(self, other)\n'
'\n'
- ' These are the so-called "rich comparison" methods. The\n'
+ ' These are the so-called “rich comparison” methods. The\n'
' correspondence between operator symbols and method names '
'is as\n'
' follows: "x<y" calls "x.__lt__(y)", "x<=y" calls '
' when the left argument does not support the operation but '
'the right\n'
' argument does); rather, "__lt__()" and "__gt__()" are '
- "each other's\n"
- ' reflection, "__le__()" and "__ge__()" are each other\'s '
+ 'each other’s\n'
+ ' reflection, "__le__()" and "__ge__()" are each other’s '
'reflection,\n'
' and "__eq__()" and "__ne__()" are their own reflection. '
'If the\n'
- " operands are of different types, and right operand's type "
+ ' operands are of different types, and right operand’s type '
'is a\n'
- " direct or indirect subclass of the left operand's type, "
+ ' direct or indirect subclass of the left operand’s type, '
'the\n'
' reflected method of the right operand has priority, '
'otherwise the\n'
- " left operand's method has priority. Virtual subclassing "
+ ' left operand’s method has priority. Virtual subclassing '
'is not\n'
' considered.\n'
'\n'
' return hash((self.name, self.nick, self.color))\n'
'\n'
' Note: "hash()" truncates the value returned from an '
- "object's\n"
+ 'object’s\n'
' custom "__hash__()" method to the size of a '
'"Py_ssize_t". This\n'
' is typically 8 bytes on 64-bit builds and 4 bytes on '
'32-bit\n'
- ' builds. If an object\'s "__hash__()" must '
- 'interoperate on builds\n'
+ ' builds. If an object’s "__hash__()" must interoperate '
+ 'on builds\n'
' of different bit sizes, be sure to check the width on '
'all\n'
' supported builds. An easy way to do this is with '
' implements an "__eq__()" method, it should not implement\n'
' "__hash__()", since the implementation of hashable '
'collections\n'
- " requires that a key's hash value is immutable (if the "
- "object's hash\n"
+ ' requires that a key’s hash value is immutable (if the '
+ 'object’s hash\n'
' value changes, it will be in the wrong hash bucket).\n'
'\n'
' User-defined classes have "__eq__()" and "__hash__()" '
'\n'
' Note: By default, the "__hash__()" values of str, bytes '
'and\n'
- ' datetime objects are "salted" with an unpredictable '
+ ' datetime objects are “salted” with an unpredictable '
'random value.\n'
' Although they remain constant within an individual '
'Python\n'
'\n'
'Note: Setting module "__class__" only affects lookups made '
'using the\n'
- ' attribute access syntax -- directly accessing the module '
+ ' attribute access syntax – directly accessing the module '
'globals\n'
' (whether by code within the module, or via a reference to '
'the\n'
- " module's globals dictionary) is unaffected.\n"
+ ' module’s globals dictionary) is unaffected.\n'
'\n'
'Changed in version 3.5: "__class__" module attribute is now '
'writable.\n'
'class\n'
'containing the method (a so-called *descriptor* class) '
'appears in an\n'
- "*owner* class (the descriptor must be in either the owner's "
+ '*owner* class (the descriptor must be in either the owner’s '
'class\n'
'dictionary or in the class dictionary for one of its '
'parents). In the\n'
- 'examples below, "the attribute" refers to the attribute '
+ 'examples below, “the attribute” refers to the attribute '
'whose name is\n'
- 'the key of the property in the owner class\' "__dict__".\n'
+ 'the key of the property in the owner class’ "__dict__".\n'
'\n'
'object.__get__(self, instance, owner)\n'
'\n'
'--------------------\n'
'\n'
'In general, a descriptor is an object attribute with '
- '"binding\n'
- 'behavior", one whose attribute access has been overridden by '
+ '“binding\n'
+ 'behavior”, one whose attribute access has been overridden by '
'methods\n'
'in the descriptor protocol: "__get__()", "__set__()", and\n'
'"__delete__()". If any of those methods are defined for an '
'\n'
'The default behavior for attribute access is to get, set, or '
'delete\n'
- "the attribute from an object's dictionary. For instance, "
+ 'the attribute from an object’s dictionary. For instance, '
'"a.x" has a\n'
'lookup chain starting with "a.__dict__[\'x\']", then\n'
'"type(a).__dict__[\'x\']", and continuing through the base '
'does not define "__get__()", then accessing the attribute '
'will return\n'
'the descriptor object itself unless there is a value in the '
- "object's\n"
+ 'object’s\n'
'instance dictionary. If the descriptor defines "__set__()" '
'and/or\n'
'"__delete__()", it is a data descriptor; if it defines '
'\n'
'* Nonempty *__slots__* does not work for classes derived '
'from\n'
- ' "variable-length" built-in types such as "int", "bytes" '
+ ' “variable-length” built-in types such as "int", "bytes" '
'and "tuple".\n'
'\n'
'* Any non-string iterable may be assigned to *__slots__*. '
'to class\n'
'decorators, but where class decorators only affect the '
'specific class\n'
- 'they\'re applied to, "__init_subclass__" solely applies to '
+ 'they’re applied to, "__init_subclass__" solely applies to '
'future\n'
'subclasses of the class defining the method.\n'
'\n'
'\n'
' Keyword arguments which are given to a new class are '
'passed to the\n'
- ' parent\'s class "__init_subclass__". For compatibility '
+ ' parent’s class "__init_subclass__". For compatibility '
'with other\n'
' classes using "__init_subclass__", one should take out '
'the needed\n'
'initialised\n'
'correctly. Failing to do so will result in a '
'"DeprecationWarning" in\n'
- 'Python 3.6, and a "RuntimeWarning" in the future.\n'
+ 'Python 3.6, and a "RuntimeError" in Python 3.8.\n'
'\n'
'When using the default metaclass "type", or any metaclass '
'that\n'
'\n'
'When the class definition for *A* gets executed, the process '
'begins\n'
- 'with calling the metaclass\'s "__prepare__()" method which '
+ 'with calling the metaclass’s "__prepare__()" method which '
'returns an\n'
'empty "collections.OrderedDict". That mapping records the '
'methods and\n'
'class\n'
'statement. Once those definitions are executed, the ordered '
'dictionary\n'
- 'is fully populated and the metaclass\'s "__new__()" method '
+ 'is fully populated and the metaclass’s "__new__()" method '
'gets\n'
'invoked. That method builds the new type and it saves the '
'ordered\n'
'methods in\n'
'order to allow the addition of Abstract Base Classes (ABCs) '
'as\n'
- '"virtual base classes" to any class or type (including '
+ '“virtual base classes” to any class or type (including '
'built-in\n'
'types), including other ABCs.\n'
'\n'
'\n'
'object.__call__(self[, args...])\n'
'\n'
- ' Called when the instance is "called" as a function; if '
+ ' Called when the instance is “called” as a function; if '
'this method\n'
' is defined, "x(arg1, arg2, ...)" is a shorthand for\n'
' "x.__call__(arg1, arg2, ...)".\n'
'"clear()",\n'
'"setdefault()", "pop()", "popitem()", "copy()", and '
'"update()"\n'
- "behaving similar to those for Python's standard dictionary "
+ 'behaving similar to those for Python’s standard dictionary '
'objects.\n'
'The "collections" module provides a "MutableMapping" '
'abstract base\n'
'recommended that both mappings and sequences implement the\n'
'"__contains__()" method to allow efficient use of the "in" '
'operator;\n'
- 'for mappings, "in" should search the mapping\'s keys; for '
+ 'for mappings, "in" should search the mapping’s keys; for '
'sequences, it\n'
'should search through the values. It is further recommended '
'that both\n'
'Should return\n'
' the length of the object, an integer ">=" 0. Also, an '
'object that\n'
- ' doesn\'t define a "__bool__()" method and whose '
+ ' doesn’t define a "__bool__()" method and whose '
'"__len__()" method\n'
' returns zero is considered to be false in a Boolean '
'context.\n'
'values or\n'
' the key-item pairs.\n'
'\n'
- ' For objects that don\'t define "__contains__()", the '
+ ' For objects that don’t define "__contains__()", the '
'membership test\n'
' first tries iteration via "__iter__()", then the old '
'sequence\n'
'"__rpow__()" (the\n'
' coercion rules would become too complicated).\n'
'\n'
- " Note: If the right operand's type is a subclass of the "
+ ' Note: If the right operand’s type is a subclass of the '
'left\n'
- " operand's type and that subclass provides the reflected "
+ ' operand’s type and that subclass provides the reflected '
'method\n'
' for the operation, this method will be called before '
'the left\n'
- " operand's non-reflected method. This behavior allows "
+ ' operand’s non-reflected method. This behavior allows '
'subclasses\n'
- " to override their ancestors' operations.\n"
+ ' to override their ancestors’ operations.\n'
'\n'
'object.__iadd__(self, other)\n'
'object.__isub__(self, other)\n'
'certain\n'
' situations, augmented assignment can result in unexpected '
'errors\n'
- " (see Why does a_tuple[i] += ['item'] raise an exception "
+ ' (see Why does a_tuple[i] += [‘item’] raise an exception '
'when the\n'
' addition works?), but this behavior is in fact part of '
'the data\n'
'\n'
' Enter the runtime context related to this object. The '
'"with"\n'
- " statement will bind this method's return value to the "
+ ' statement will bind this method’s return value to the '
'target(s)\n'
' specified in the "as" clause of the statement, if any.\n'
'\n'
'\n'
' Note that "__exit__()" methods should not reraise the '
'passed-in\n'
- " exception; this is the caller's responsibility.\n"
+ ' exception; this is the caller’s responsibility.\n'
'\n'
'See also:\n'
'\n'
- ' **PEP 343** - The "with" statement\n'
+ ' **PEP 343** - The “with” statement\n'
' The specification, background, and examples for the '
'Python "with"\n'
' statement.\n'
'\n'
'For custom classes, implicit invocations of special methods '
'are only\n'
- "guaranteed to work correctly if defined on an object's type, "
+ 'guaranteed to work correctly if defined on an object’s type, '
'not in\n'
- "the object's instance dictionary. That behaviour is the "
+ 'the object’s instance dictionary. That behaviour is the '
'reason why\n'
'the following code raises an exception:\n'
'\n'
'\n'
'Incorrectly attempting to invoke an unbound method of a '
'class in this\n'
- "way is sometimes referred to as 'metaclass confusion', and "
+ 'way is sometimes referred to as ‘metaclass confusion’, and '
'is avoided\n'
'by bypassing the instance when looking up special methods:\n'
'\n'
'interest of\n'
'correctness, implicit special method lookup generally also '
'bypasses\n'
- 'the "__getattribute__()" method even of the object\'s '
+ 'the "__getattribute__()" method even of the object’s '
'metaclass:\n'
'\n'
' >>> class Meta(type):\n'
'Alphabetic\n'
' characters are those characters defined in the Unicode '
'character\n'
- ' database as "Letter", i.e., those with general category '
+ ' database as “Letter”, i.e., those with general category '
'property\n'
- ' being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note '
+ ' being one of “Lm”, “Lt”, “Lu”, “Ll”, or “Lo”. Note '
'that this is\n'
- ' different from the "Alphabetic" property defined in the '
+ ' different from the “Alphabetic” property defined in the '
'Unicode\n'
' Standard.\n'
'\n'
'in base 10,\n'
' e.g. U+0660, ARABIC-INDIC DIGIT ZERO. Formally a '
'decimal character\n'
- ' is a character in the Unicode General Category "Nd".\n'
+ ' is a character in the Unicode General Category “Nd”.\n'
'\n'
'str.isdigit()\n'
'\n'
'characters are\n'
' those characters defined in the Unicode character '
'database as\n'
- ' "Other" or "Separator", excepting the ASCII space '
+ ' “Other” or “Separator”, excepting the ASCII space '
'(0x20) which is\n'
' considered printable. (Note that printable characters '
'in this\n'
'Whitespace\n'
' characters are those characters defined in the Unicode '
'character\n'
- ' database as "Other" or "Separator" and those with '
+ ' database as “Other” or “Separator” and those with '
'bidirectional\n'
- ' property being one of "WS", "B", or "S".\n'
+ ' property being one of “WS”, “B”, or “S”.\n'
'\n'
'str.istitle()\n'
'\n'
'"str.upper().isupper()" might be\n'
' "False" if "s" contains uncased characters or if the '
'Unicode\n'
- ' category of the resulting character(s) is not "Lu" '
+ ' category of the resulting character(s) is not “Lu” '
'(Letter,\n'
- ' uppercase), but e.g. "Lt" (Letter, titlecase).\n'
+ ' uppercase), but e.g. “Lt” (Letter, titlecase).\n'
'\n'
' The uppercasing algorithm used is described in section '
'3.13 of the\n'
'literals,\n'
'"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated '
'specially.\n'
- "Given that Python 2.x's raw unicode literals behave differently "
+ 'Given that Python 2.x’s raw unicode literals behave differently '
'than\n'
- 'Python 3.x\'s the "\'ur\'" syntax is not supported.\n'
+ 'Python 3.x’s the "\'ur\'" syntax is not supported.\n'
'\n'
'New in version 3.3: The "\'rb\'" prefix of raw bytes literals has '
'been\n'
'In triple-quoted literals, unescaped newlines and quotes are '
'allowed\n'
'(and are retained), except that three unescaped quotes in a row\n'
- 'terminate the literal. (A "quote" is the character used to open '
+ 'terminate the literal. (A “quote” is the character used to open '
'the\n'
'literal, i.e. either "\'" or """.)\n'
'\n'
'item whose\n'
'index is that value (counting from zero). Since the support '
'for\n'
- "negative indices and slicing occurs in the object's "
+ 'negative indices and slicing occurs in the object’s '
'"__getitem__()"\n'
'method, subclasses overriding this method will need to '
'explicitly add\n'
'that support.\n'
'\n'
- "A string's items are characters. A character is not a "
+ 'A string’s items are characters. A character is not a '
'separate data\n'
'type but a string of exactly one character.\n',
'truth': 'Truth Value Testing\n'
'less except clause, if present, must be last; it matches any\n'
'exception. For an except clause with an expression, that expression\n'
'is evaluated, and the clause matches the exception if the resulting\n'
- 'object is "compatible" with the exception. An object is compatible\n'
+ 'object is “compatible” with the exception. An object is compatible\n'
'with an exception if it is the class or a base class of the '
'exception\n'
'object or a tuple containing an item compatible with the exception.\n'
'When a matching except clause is found, the exception is assigned to\n'
'the target specified after the "as" keyword in that except clause, '
'if\n'
- "present, and the except clause's suite is executed. All except\n"
+ 'present, and the except clause’s suite is executed. All except\n'
'clauses must have an executable block. When the end of this block '
'is\n'
'reached, execution continues normally after the entire try '
'cycle with the stack frame, keeping all locals in that frame alive\n'
'until the next garbage collection occurs.\n'
'\n'
- "Before an except clause's suite is executed, details about the\n"
+ 'Before an except clause’s suite is executed, details about the\n'
'exception are stored in the "sys" module and can be accessed via\n'
'"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of '
'the\n'
'the end of the "try" clause. [2] Exceptions in the "else" clause are\n'
'not handled by the preceding "except" clauses.\n'
'\n'
- 'If "finally" is present, it specifies a \'cleanup\' handler. The '
+ 'If "finally" is present, it specifies a ‘cleanup’ handler. The '
'"try"\n'
'clause is executed, including any "except" and "else" clauses. If '
'an\n'
'execution of the "finally" clause.\n'
'\n'
'When a "return", "break" or "continue" statement is executed in the\n'
- '"try" suite of a "try"..."finally" statement, the "finally" clause '
- 'is\n'
- 'also executed \'on the way out.\' A "continue" statement is illegal '
- 'in\n'
+ '"try" suite of a "try"…"finally" statement, the "finally" clause is\n'
+ 'also executed ‘on the way out.’ A "continue" statement is illegal in\n'
'the "finally" clause. (The reason is a problem with the current\n'
- 'implementation --- this restriction may be lifted in the future).\n'
+ 'implementation — this restriction may be lifted in the future).\n'
'\n'
'The return value of a function is determined by the last "return"\n'
'statement executed. Since the "finally" clause always executes, a\n'
'will often be provided via the standard library instead.\n'
'\n'
'Some of the type descriptions below contain a paragraph listing\n'
- "'special attributes.' These are attributes that provide access to "
+ '‘special attributes.’ These are attributes that provide access to '
'the\n'
'implementation and are not intended for general use. Their '
'definition\n'
'It\n'
' is used to signify the absence of a value in many situations, '
'e.g.,\n'
- " it is returned from functions that don't explicitly return\n"
+ ' it is returned from functions that don’t explicitly return\n'
' anything. Its truth value is false.\n'
'\n'
'NotImplemented\n'
'shift\n'
' and mask operations, a binary representation is assumed, '
'and\n'
- " negative numbers are represented in a variant of 2's\n"
+ ' negative numbers are represented in a variant of 2’s\n'
' complement which gives the illusion of an infinite string '
'of\n'
' sign bits extending to the left.\n'
'items\n'
' of a sequence. When the length of a sequence is *n*, the index '
'set\n'
- ' contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* '
+ ' contains the numbers 0, 1, …, *n*-1. Item *i* of sequence *a* '
'is\n'
' selected by "a[i]".\n'
'\n'
'implies\n'
' that the index set is renumbered so that it starts at 0.\n'
'\n'
- ' Some sequences also support "extended slicing" with a third '
- '"step"\n'
+ ' Some sequences also support “extended slicing” with a third '
+ '“step”\n'
' parameter: "a[i:j:k]" selects all items of *a* with index *x* '
'where\n'
' "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n'
'code\n'
' points. All the code points in the range "U+0000 - '
'U+10FFFF"\n'
- " can be represented in a string. Python doesn't have a "
+ ' can be represented in a string. Python doesn’t have a '
'"char"\n'
' type; instead, every code point in the string is '
'represented\n'
' The items of a tuple are arbitrary Python objects. Tuples '
'of\n'
' two or more items are formed by comma-separated lists of\n'
- " expressions. A tuple of one item (a 'singleton') can be\n"
+ ' expressions. A tuple of one item (a ‘singleton’) can be\n'
' formed by affixing a comma to an expression (an expression '
'by\n'
' itself does not create a tuple, since parentheses must be\n'
'object\n'
' identity, the reason being that the efficient implementation '
'of\n'
- " dictionaries requires a key's hash value to remain constant.\n"
+ ' dictionaries requires a key’s hash value to remain constant.\n'
' Numeric types used for keys obey the normal rules for '
'numeric\n'
' comparison: if two numbers compare equal (e.g., "1" and '
' definition (see section Function definitions). It should be\n'
' called with an argument list containing the same number of '
'items\n'
- " as the function's formal parameter list.\n"
+ ' as the function’s formal parameter list.\n'
'\n'
' Special attributes:\n'
'\n'
'| |\n'
' '
'+===========================+=================================+=============+\n'
- ' | "__doc__" | The function\'s '
- 'documentation | Writable |\n'
+ ' | "__doc__" | The function’s documentation '
+ '| Writable |\n'
' | | string, or "None" if '
'| |\n'
' | | unavailable; not inherited by '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
- ' | "__name__" | The function\'s '
- 'name | Writable |\n'
+ ' | "__name__" | The function’s name '
+ '| Writable |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
- ' | "__qualname__" | The function\'s *qualified '
- 'name* | Writable |\n'
+ ' | "__qualname__" | The function’s *qualified name* '
+ '| Writable |\n'
' | | New in version 3.3. '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' | "__globals__" | A reference to the dictionary '
'| Read-only |\n'
- " | | that holds the function's "
+ ' | | that holds the function’s '
'| |\n'
- ' | | global variables --- the global '
+ ' | | global variables — the global '
'| |\n'
' | | namespace of the module in '
'| |\n'
'| Read-only |\n'
' | | contain bindings for the '
'| |\n'
- " | | function's free variables. "
+ ' | | function’s free variables. '
'| |\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
' '
'+---------------------------+---------------------------------+-------------+\n'
'\n'
- ' Most of the attributes labelled "Writable" check the type of '
+ ' Most of the attributes labelled “Writable” check the type of '
'the\n'
' assigned value.\n'
'\n'
' attributes on built-in functions may be supported in the\n'
' future.*\n'
'\n'
- " Additional information about a function's definition can be\n"
+ ' Additional information about a function’s definition can be\n'
' retrieved from its code object; see the description of '
'internal\n'
' types below.\n'
' Special read-only attributes: "__self__" is the class '
'instance\n'
' object, "__func__" is the function object; "__doc__" is the\n'
- ' method\'s documentation (same as "__func__.__doc__"); '
+ ' method’s documentation (same as "__func__.__doc__"); '
'"__name__"\n'
' is the method name (same as "__func__.__name__"); '
'"__module__"\n'
'instances,\n'
' its "__self__" attribute is the instance, and the method '
'object\n'
- ' is said to be bound. The new method\'s "__func__" attribute '
+ ' is said to be bound. The new method’s "__func__" attribute '
'is\n'
' the original function object.\n'
'\n'
'\n'
' When an instance method object is derived from a class '
'method\n'
- ' object, the "class instance" stored in "__self__" will '
+ ' object, the “class instance” stored in "__self__" will '
'actually\n'
' be the class itself, so that calling either "x.f(1)" or '
'"C.f(1)"\n'
'object\n'
' which can be used to execute the body of the function: '
'calling\n'
- ' the iterator\'s "iterator.__next__()" method will cause the\n'
+ ' the iterator’s "iterator.__next__()" method will cause the\n'
' function to execute until it provides a value using the '
'"yield"\n'
' statement. When the function executes a "return" statement '
'for"\n'
' statement to execute the body of the function.\n'
'\n'
- ' Calling the asynchronous iterator\'s "aiterator.__anext__()"\n'
+ ' Calling the asynchronous iterator’s "aiterator.__anext__()"\n'
' method will return an *awaitable* which when awaited will\n'
' execute until it provides a value using the "yield" '
'expression.\n'
'of\n'
' the arguments are determined by the C function. Special '
'read-\n'
- ' only attributes: "__doc__" is the function\'s documentation\n'
+ ' only attributes: "__doc__" is the function’s documentation\n'
' string, or "None" if unavailable; "__name__" is the '
- "function's\n"
+ 'function’s\n'
' name; "__self__" is set to "None" (but see the next item);\n'
' "__module__" is the name of the module the function was '
'defined\n'
' translated to lookups in this dictionary, e.g., "m.x" is '
'equivalent\n'
' to "m.__dict__["x"]". A module object does not contain the code\n'
- " object used to initialize the module (since it isn't needed "
+ ' object used to initialize the module (since it isn’t needed '
'once\n'
' the initialization is done).\n'
'\n'
- " Attribute assignment updates the module's namespace dictionary,\n"
+ ' Attribute assignment updates the module’s namespace dictionary,\n'
' e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n'
'\n'
- ' Predefined (writable) attributes: "__name__" is the module\'s '
+ ' Predefined (writable) attributes: "__name__" is the module’s '
'name;\n'
- ' "__doc__" is the module\'s documentation string, or "None" if\n'
+ ' "__doc__" is the module’s documentation string, or "None" if\n'
' unavailable; "__annotations__" (optional) is a dictionary\n'
' containing *variable annotations* collected during module body\n'
' execution; "__file__" is the pathname of the file from which '
'is\n'
' the pathname of the shared library file.\n'
'\n'
- ' Special read-only attribute: "__dict__" is the module\'s '
+ ' Special read-only attribute: "__dict__" is the module’s '
'namespace\n'
' as a dictionary object.\n'
'\n'
' classes. This search of the base classes uses the C3 method\n'
' resolution order which behaves correctly even in the presence '
'of\n'
- " 'diamond' inheritance structures where there are multiple\n"
+ ' ‘diamond’ inheritance structures where there are multiple\n'
' inheritance paths leading back to a common ancestor. Additional\n'
' details on the C3 MRO used by Python can be found in the\n'
' documentation accompanying the 2.3 release at\n'
' When a class attribute reference (for class "C", say) would '
'yield a\n'
' class method object, it is transformed into an instance method\n'
- ' object whose "__self__" attributes is "C". When it would yield '
+ ' object whose "__self__" attribute is "C". When it would yield '
'a\n'
' static method object, it is transformed into the object wrapped '
'by\n'
'differ\n'
' from those actually contained in its "__dict__".\n'
'\n'
- " Class attribute assignments update the class's dictionary, "
+ ' Class attribute assignments update the class’s dictionary, '
'never\n'
' the dictionary of a base class.\n'
'\n'
'is\n'
' the module name in which the class was defined; "__dict__" is '
'the\n'
- ' dictionary containing the class\'s namespace; "__bases__" is a '
+ ' dictionary containing the class’s namespace; "__bases__" is a '
'tuple\n'
' containing the base classes, in the order of their occurrence '
'in\n'
- ' the base class list; "__doc__" is the class\'s documentation '
+ ' the base class list; "__doc__" is the class’s documentation '
'string,\n'
' or "None" if undefined; "__annotations__" (optional) is a\n'
' dictionary containing *variable annotations* collected during '
' A class instance has a namespace implemented as a dictionary '
'which\n'
' is the first place in which attribute references are searched.\n'
- " When an attribute is not found there, and the instance's class "
+ ' When an attribute is not found there, and the instance’s class '
'has\n'
' an attribute by that name, the search continues with the class\n'
' attributes. If a class attribute is found that is a '
'object\n'
' whose "__self__" attribute is the instance. Static method and\n'
' class method objects are also transformed; see above under\n'
- ' "Classes". See section Implementing Descriptors for another way '
+ ' “Classes”. See section Implementing Descriptors for another way '
'in\n'
' which attributes of a class retrieved via its instances may '
'differ\n'
- ' from the objects actually stored in the class\'s "__dict__". If '
+ ' from the objects actually stored in the class’s "__dict__". If '
'no\n'
- " class attribute is found, and the object's class has a\n"
+ ' class attribute is found, and the object’s class has a\n'
' "__getattr__()" method, that is called to satisfy the lookup.\n'
'\n'
- " Attribute assignments and deletions update the instance's\n"
- " dictionary, never a class's dictionary. If the class has a\n"
+ ' Attribute assignments and deletions update the instance’s\n'
+ ' dictionary, never a class’s dictionary. If the class has a\n'
' "__setattr__()" or "__delattr__()" method, this is called '
'instead\n'
' of updating the instance dictionary directly.\n'
' Special method names.\n'
'\n'
' Special attributes: "__dict__" is the attribute dictionary;\n'
- ' "__class__" is the instance\'s class.\n'
+ ' "__class__" is the instance’s class.\n'
'\n'
'I/O objects (also known as file objects)\n'
' A *file object* represents an open file. Various shortcuts are\n'
' provided by extension modules).\n'
'\n'
' The objects "sys.stdin", "sys.stdout" and "sys.stderr" are\n'
- " initialized to file objects corresponding to the interpreter's\n"
+ ' initialized to file objects corresponding to the interpreter’s\n'
' standard input, output and error streams; they are all open in '
'text\n'
' mode and therefore follow the interface defined by the\n'
' or *bytecode*. The difference between a code object and a\n'
' function object is that the function object contains an '
'explicit\n'
- " reference to the function's globals (the module in which it "
+ ' reference to the function’s globals (the module in which it '
'was\n'
' defined), while a code object contains no context; also the\n'
' default argument values are stored in the function object, '
'is\n'
' used by the debugger); "f_lineno" is the current line number '
'of\n'
- ' the frame --- writing to this from within a trace function '
+ ' the frame — writing to this from within a trace function '
'jumps\n'
' to the given line (only for the bottom-most frame). A '
'debugger\n'
' is retrieved from classes and class instances. The behaviour '
'of\n'
' class method objects upon such retrieval is described above,\n'
- ' under "User-defined methods". Class method objects are '
+ ' under “User-defined methods”. Class method objects are '
'created\n'
' by the built-in "classmethod()" constructor.\n',
'typesfunctions': 'Functions\n'
'different object types.\n'
'\n'
'See Function definitions for more information.\n',
- 'typesmapping': 'Mapping Types --- "dict"\n'
- '************************\n'
+ 'typesmapping': 'Mapping Types — "dict"\n'
+ '**********************\n'
'\n'
'A *mapping* object maps *hashable* values to arbitrary '
'objects.\n'
'in "list", "set", and "tuple" classes, and the "collections" '
'module.)\n'
'\n'
- "A dictionary's keys are *almost* arbitrary values. Values "
+ 'A dictionary’s keys are *almost* arbitrary values. Values '
'that are\n'
'not *hashable*, that is, values containing lists, '
'dictionaries or\n'
'\n'
' items()\n'
'\n'
- ' Return a new view of the dictionary\'s items ("(key, '
+ ' Return a new view of the dictionary’s items ("(key, '
'value)"\n'
' pairs). See the documentation of view objects.\n'
'\n'
' keys()\n'
'\n'
- " Return a new view of the dictionary's keys. See the\n"
+ ' Return a new view of the dictionary’s keys. See the\n'
' documentation of view objects.\n'
'\n'
' pop(key[, default])\n'
'\n'
' values()\n'
'\n'
- " Return a new view of the dictionary's values. See "
+ ' Return a new view of the dictionary’s values. See '
'the\n'
' documentation of view objects.\n'
'\n'
' Dictionaries compare equal if and only if they have the '
'same "(key,\n'
- ' value)" pairs. Order comparisons (\'<\', \'<=\', \'>=\', '
- "'>') raise\n"
+ ' value)" pairs. Order comparisons (‘<’, ‘<=’, ‘>=’, ‘>’) '
+ 'raise\n'
' "TypeError".\n'
'\n'
'See also: "types.MappingProxyType" can be used to create a '
'The objects returned by "dict.keys()", "dict.values()" and\n'
'"dict.items()" are *view objects*. They provide a dynamic '
'view on the\n'
- "dictionary's entries, which means that when the dictionary "
+ 'dictionary’s entries, which means that when the dictionary '
'changes,\n'
'the view reflects these changes.\n'
'\n'
'which is\n'
' non-random, varies across Python implementations, and '
'depends on\n'
- " the dictionary's history of insertions and deletions. If "
+ ' the dictionary’s history of insertions and deletions. If '
'keys,\n'
' values and items views are iterated over with no '
'intervening\n'
'\n'
'x in dictview\n'
'\n'
- ' Return "True" if *x* is in the underlying dictionary\'s '
+ ' Return "True" if *x* is in the underlying dictionary’s '
'keys, values\n'
' or items (in the latter case, *x* should be a "(key, '
'value)"\n'
'The only special operation on a module is attribute access: '
'"m.name",\n'
'where *m* is a module and *name* accesses a name defined in '
- "*m*'s\n"
+ '*m*’s\n'
'symbol table. Module attributes can be assigned to. (Note '
'that the\n'
'"import" statement is not, strictly speaking, an operation '
'\n'
'A special attribute of every module is "__dict__". This is '
'the\n'
- "dictionary containing the module's symbol table. Modifying "
+ 'dictionary containing the module’s symbol table. Modifying '
'this\n'
- "dictionary will actually change the module's symbol table, "
+ 'dictionary will actually change the module’s symbol table, '
'but direct\n'
'assignment to the "__dict__" attribute is not possible (you '
'can write\n'
'"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but '
- "you can't\n"
+ 'you can’t\n'
'write "m.__dict__ = {}"). Modifying "__dict__" directly is '
'not\n'
'recommended.\n'
'written as\n'
'"<module \'os\' from '
'\'/usr/local/lib/pythonX.Y/os.pyc\'>".\n',
- 'typesseq': 'Sequence Types --- "list", "tuple", "range"\n'
- '*******************************************\n'
+ 'typesseq': 'Sequence Types — "list", "tuple", "range"\n'
+ '*****************************************\n'
'\n'
'There are three basic sequence types: lists, tuples, and range\n'
'objects. Additional sequence types tailored for processing of '
'*j* are\n'
' reduced to "len(s) - 1" if they are greater. If *i* or *j* '
'are\n'
- ' omitted or "None", they become "end" values (which end '
+ ' omitted or "None", they become “end” values (which end '
'depends on\n'
' the sign of *k*). Note, *k* cannot be zero. If *k* is '
'"None", it\n'
'documentation\n'
'\n'
'7. Some sequence types (such as "range") only support item\n'
- " sequences that follow specific patterns, and hence don't "
+ ' sequences that follow specific patterns, and hence don’t '
'support\n'
' sequence concatenation or repetition.\n'
'\n'
' sequence.\n'
'\n'
'5. "clear()" and "copy()" are included for consistency with the\n'
- " interfaces of mutable containers that don't support slicing\n"
+ ' interfaces of mutable containers that don’t support slicing\n'
' operations (such as "dict" and "set")\n'
'\n'
' New in version 3.3: "clear()" and "copy()" methods.\n'
'\n'
' The constructor builds a list whose items are the same and in '
'the\n'
- " same order as *iterable*'s items. *iterable* may be either "
+ ' same order as *iterable*’s items. *iterable* may be either '
'a\n'
' sequence, a container that supports iteration, or an '
'iterator\n'
'is\n'
' stable if it guarantees not to change the relative order '
'of\n'
- ' elements that compare equal --- this is helpful for '
- 'sorting in\n'
+ ' elements that compare equal — this is helpful for sorting '
+ 'in\n'
' multiple passes (for example, sort by department, then by '
'salary\n'
' grade).\n'
'\n'
' The constructor builds a tuple whose items are the same and '
'in the\n'
- " same order as *iterable*'s items. *iterable* may be either "
+ ' same order as *iterable*’s items. *iterable* may be either '
'a\n'
' sequence, a container that supports iteration, or an '
'iterator\n'
'Range objects implement the "collections.abc.Sequence" ABC, and\n'
'provide features such as containment tests, element index '
'lookup,\n'
- 'slicing and support for negative indices (see Sequence Types --- '
+ 'slicing and support for negative indices (see Sequence Types — '
'list,\n'
'tuple, range):\n'
'\n'
'constant\n'
'time instead of iterating through all items.\n'
'\n'
- "Changed in version 3.3: Define '==' and '!=' to compare range "
+ 'Changed in version 3.3: Define ‘==’ and ‘!=’ to compare range '
'objects\n'
'based on the sequence of values they define (instead of '
'comparing\n'
'\n'
'5. "clear()" and "copy()" are included for consistency '
'with the\n'
- " interfaces of mutable containers that don't support "
+ ' interfaces of mutable containers that don’t support '
'slicing\n'
' operations (such as "dict" and "set")\n'
'\n'
'\n'
'A "break" statement executed in the first suite terminates the '
'loop\n'
- 'without executing the "else" clause\'s suite. A "continue" '
+ 'without executing the "else" clause’s suite. A "continue" '
'statement\n'
'executed in the first suite skips the rest of the suite and goes '
'back\n'
'\n'
'The "with" statement is used to wrap the execution of a block with\n'
'methods defined by a context manager (see section With Statement\n'
- 'Context Managers). This allows common "try"..."except"..."finally"\n'
- 'usage patterns to be encapsulated for convenient reuse.\n'
+ 'Context Managers). This allows common "try"…"except"…"finally" '
+ 'usage\n'
+ 'patterns to be encapsulated for convenient reuse.\n'
'\n'
' with_stmt ::= "with" with_item ("," with_item)* ":" suite\n'
' with_item ::= expression ["as" target]\n'
'\n'
- 'The execution of the "with" statement with one "item" proceeds as\n'
+ 'The execution of the "with" statement with one “item” proceeds as\n'
'follows:\n'
'\n'
'1. The context expression (the expression given in the "with_item")\n'
' is evaluated to obtain a context manager.\n'
'\n'
- '2. The context manager\'s "__exit__()" is loaded for later use.\n'
+ '2. The context manager’s "__exit__()" is loaded for later use.\n'
'\n'
- '3. The context manager\'s "__enter__()" method is invoked.\n'
+ '3. The context manager’s "__enter__()" method is invoked.\n'
'\n'
'4. If a target was included in the "with" statement, the return\n'
' value from "__enter__()" is assigned to it.\n'
'\n'
'5. The suite is executed.\n'
'\n'
- '6. The context manager\'s "__exit__()" method is invoked. If an\n'
+ '6. The context manager’s "__exit__()" method is invoked. If an\n'
' exception caused the suite to be exited, its type, value, and\n'
' traceback are passed as arguments to "__exit__()". Otherwise, '
'three\n'
'\n'
'See also:\n'
'\n'
- ' **PEP 343** - The "with" statement\n'
+ ' **PEP 343** - The “with” statement\n'
' The specification, background, and examples for the Python '
'"with"\n'
' statement.\n',
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
+ if n == 0:
+ raise ValueError("Boundary cannot be zero")
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
dbm.open(). The optional protocol parameter specifies the
- version of the pickle protocol (0, 1, or 2).
+ version of the pickle protocol.
See the module's __doc__ string for an overview of the interface.
"""
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
- except (AttributeError, OSError):
+ except (AttributeError, OSError, TypeError):
pass
try:
m.__cached__ = os.path.abspath(m.__cached__)
- except (AttributeError, OSError):
+ except (AttributeError, OSError, TypeError):
pass
timeout = 300
active_children = None
max_children = 40
+ # If true, server_close() waits until all child processes complete.
+ _block_on_close = False
- def collect_children(self):
+ def collect_children(self, *, blocking=False):
"""Internal routine to wait for children that have exited."""
if self.active_children is None:
return
# Now reap all defunct children.
for pid in self.active_children.copy():
try:
- pid, _ = os.waitpid(pid, os.WNOHANG)
+ flags = 0 if blocking else os.WNOHANG
+ pid, _ = os.waitpid(pid, flags)
# if the child hasn't exited yet, pid will be 0 and ignored by
# discard() below
self.active_children.discard(pid)
finally:
os._exit(status)
+ def server_close(self):
+ super().server_close()
+ self.collect_children(blocking=self._block_on_close)
+
class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
# Decides how threads will act upon termination of the
# main process
daemon_threads = False
+ # If true, server_close() waits until all non-daemonic threads terminate.
+ _block_on_close = False
+ # For non-daemonic threads, list of threading.Threading objects
+ # used by server_close() to wait for all threads completion.
+ _threads = None
def process_request_thread(self, request, client_address):
"""Same as in BaseServer but as a thread.
t = threading.Thread(target = self.process_request_thread,
args = (request, client_address))
t.daemon = self.daemon_threads
+ if not t.daemon and self._block_on_close:
+ if self._threads is None:
+ self._threads = []
+ self._threads.append(t)
t.start()
+ def server_close(self):
+ super().server_close()
+ if self._block_on_close:
+ threads = self._threads
+ self._threads = None
+ if threads:
+ for thread in threads:
+ thread.join()
+
if hasattr(os, "fork"):
class ForkingUDPServer(ForkingMixIn, UDPServer): pass
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
- into file names. Each string is six characters long. Multiple
+ into file names. Each string is eight characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
# Failing that, try OS-specific locations.
if _os.name == 'nt':
- dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
+ dirlist.extend([ _os.path.expanduser(r'~\AppData\Local\Temp'),
+ _os.path.expandvars(r'%SYSTEMROOT%\Temp'),
+ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
import struct
import operator
import weakref
-import test.support
+from test import support
import test.support.script_helper
# Skip tests if _multiprocessing wasn't built.
-_multiprocessing = test.support.import_module('_multiprocessing')
+_multiprocessing = support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
-test.support.import_module('multiprocessing.synchronize')
+support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
- testfn = test.support.TESTFN
- self.addCleanup(test.support.unlink, testfn)
+ testfn = support.TESTFN
+ self.addCleanup(support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
- testfn = test.support.TESTFN
- self.addCleanup(test.support.unlink, testfn)
+ testfn = support.TESTFN
+ self.addCleanup(support.unlink, testfn)
for reason in (
[1, 2, 3],
close_queue(queue)
def test_no_import_lock_contention(self):
- with test.support.temp_cwd():
+ with support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
del q
""")
- with test.support.DirsOnSysPath(os.getcwd()):
+ with support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
- with test.support.captured_stderr():
+ with support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
- with test.support.captured_stderr() as f1:
+ with support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
authkey = os.urandom(32)
manager = QueueManager(
- address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
+ address=(support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
- address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
+ address=(support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
- self.addCleanup(test.support.unlink, test.support.TESTFN)
- with open(test.support.TESTFN, "wb") as f:
+ self.addCleanup(support.unlink, support.TESTFN)
+ with open(support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
- with open(test.support.TESTFN, "rb") as f:
+ with open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
- self.addCleanup(test.support.unlink, test.support.TESTFN)
- with open(test.support.TESTFN, "wb") as f:
+ self.addCleanup(support.unlink, support.TESTFN)
+ with open(support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
finally:
os.close(newfd)
p.join()
- with open(test.support.TESTFN, "rb") as f:
+ with open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
l.close()
l = socket.socket()
- l.bind((test.support.HOST, 0))
+ l.bind((support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
- with test.support.start_threads(threads):
+ with support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
- l.bind((test.support.HOST, 0))
+ l.bind((support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
- rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
+ rc, out, err = support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
- rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
+ rc, out, err = support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
class TestIgnoreEINTR(unittest.TestCase):
+ # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
+ CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
+
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
conn.send('ready')
x = conn.recv()
conn.send(x)
- conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
+ conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
- self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
+ self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
- rc, out, err = test.support.script_helper.assert_python_ok(name)
+ rc, out, err = support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
- test.support.gc_collect()
+ support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
- test.support.gc_collect()
+ support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
if need_sleep:
time.sleep(0.5)
multiprocessing.process._cleanup()
- test.support.gc_collect()
+ support.gc_collect()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
def write_output(filename, tests):
if not filename:
return
- print("Write %s tests into %s" % (len(tests), filename))
+ print("Writing %s tests into %s" % (len(tests), filename))
write_tests(filename, tests)
return filename
print("ran %s tests/%s" % (ntest, len(tests)))
print("exit", exitcode)
if exitcode:
- print("Tests failed: use this new subtest")
+ print("Tests failed: continuing with this subtest")
tests = subtests
output = write_output(args.output, tests)
else:
- print("Tests succeeded: skip this subtest, try a new subbset")
+ print("Tests succeeded: skipping this subtest, trying a new subset")
print()
iteration += 1
except KeyboardInterrupt:
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
+ @support.run_with_tz('EDT4')
def test_astimezone(self):
- return # The rest is no longer applicable
- # Pretty boring! The TZ test is more interesting here. astimezone()
- # simply can't be applied to a naive object.
dt = self.theclass.now()
- f = FixedOffset(44, "")
- self.assertRaises(ValueError, dt.astimezone) # naive
+ f = FixedOffset(44, "0044")
+ dt_utc = dt.replace(tzinfo=timezone(timedelta(hours=-4), 'EDT'))
+ self.assertEqual(dt.astimezone(), dt_utc) # naive
self.assertRaises(TypeError, dt.astimezone, f, f) # too many args
self.assertRaises(TypeError, dt.astimezone, dt) # arg wrong type
- self.assertRaises(ValueError, dt.astimezone, f) # naive
- self.assertRaises(ValueError, dt.astimezone, tz=f) # naive
+ dt_f = dt.replace(tzinfo=f) + timedelta(hours=4, minutes=44)
+ self.assertEqual(dt.astimezone(f), dt_f) # naive
+ self.assertEqual(dt.astimezone(tz=f), dt_f) # naive
class Bogus(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return timedelta(0)
bog = Bogus()
self.assertRaises(ValueError, dt.astimezone, bog) # naive
- self.assertRaises(ValueError,
- dt.replace(tzinfo=bog).astimezone, f)
+ self.assertEqual(dt.replace(tzinfo=bog).astimezone(f), dt_f)
class AlsoBogus(tzinfo):
def utcoffset(self, dt): return timedelta(0)
alsobog = AlsoBogus()
self.assertRaises(ValueError, dt.astimezone, alsobog) # also naive
+ class Broken(tzinfo):
+ def utcoffset(self, dt): return 1
+ def dst(self, dt): return 1
+ broken = Broken()
+ dt_broken = dt.replace(tzinfo=broken)
+ with self.assertRaises(TypeError):
+ dt_broken.astimezone()
+
def test_subclass_datetime(self):
class C(self.theclass):
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
- consume >2GB of disk space temporarily.
+ consume >2 GiB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
import sys
import sysconfig
import tempfile
-import textwrap
import time
import unittest
from test.libregrtest.cmdline import _parse_args
INTERRUPTED, CHILD_ERROR,
PROGRESS_MIN_TIME, format_test_result)
from test.libregrtest.setup import setup_tests
+from test.libregrtest.utils import removepy, count, format_duration, printlist
from test import support
try:
import gc
TEMPDIR = os.path.abspath(TEMPDIR)
-def format_duration(seconds):
- if seconds < 1.0:
- return '%.0f ms' % (seconds * 1e3)
- if seconds < 60.0:
- return '%.0f sec' % seconds
-
- minutes, seconds = divmod(seconds, 60.0)
- return '%.0f min %.0f sec' % (minutes, seconds)
-
-
class Regrtest:
"""Execute a test suite.
self.skipped = []
self.resource_denieds = []
self.environment_changed = []
+ self.rerun = []
+ self.first_result = None
self.interrupted = False
# used by --slow
# "[ 51/405/1] test_tcl passed"
line = f"{test_index:{self.test_count_width}}{self.test_count}"
- if self.bad and not self.ns.pgo:
- line = f"{line}/{len(self.bad)}"
+ fails = len(self.bad) + len(self.environment_changed)
+ if fails and not self.ns.pgo:
+ line = f"{line}/{fails}"
line = f"[{line}] {test}"
# add the system load prefix: "load avg: 1.80 "
self.ns.verbose = True
self.ns.failfast = False
self.ns.verbose3 = False
- self.ns.match_tests = None
+ self.first_result = self.get_tests_result()
+
+ print()
print("Re-running failed tests in verbose mode")
- for test in self.bad[:]:
+ self.rerun = self.bad[:]
+ for test in self.rerun:
print("Re-running test %r in verbose mode" % test, flush=True)
try:
self.ns.verbose = True
print(count(len(self.bad), 'test'), "failed again:")
printlist(self.bad)
+ self.display_result()
+
def display_result(self):
+ # If running the test suite for PGO then no one cares about results.
+ if self.ns.pgo:
+ return
+
+ print()
+ print("== Tests result: %s ==" % self.get_tests_result())
+
if self.interrupted:
- # print a newline after ^C
print()
+ # print a newline after ^C
print("Test suite interrupted by signal SIGINT.")
executed = set(self.good) | set(self.bad) | set(self.skipped)
omitted = set(self.selected) - executed
print(count(len(omitted), "test"), "omitted:")
printlist(omitted)
- # If running the test suite for PGO then no one cares about
- # results.
- if self.ns.pgo:
- return
-
if self.good and not self.ns.quiet:
+ print()
if (not self.bad
and not self.skipped
and not self.interrupted
print(count(len(self.skipped), "test"), "skipped:")
printlist(self.skipped)
+ if self.rerun:
+ print()
+ print("%s:" % count(len(self.rerun), "re-run test"))
+ printlist(self.rerun)
+
def run_tests_sequential(self):
if self.ns.trace:
import trace
% (locale.getpreferredencoding(False),
sys.getfilesystemencoding()))
+ def get_tests_result(self):
+ result = []
+ if self.bad:
+ result.append("FAILURE")
+ elif self.ns.fail_env_changed and self.environment_changed:
+ result.append("ENV CHANGED")
+
+ if self.interrupted:
+ result.append("INTERRUPTED")
+
+ if not result:
+ result.append("SUCCESS")
+
+ result = ', '.join(result)
+ if self.first_result:
+ result = '%s then %s' % (self.first_result, result)
+ return result
+
def run_tests(self):
# For a partial run, we do not need to clutter the output.
if (self.ns.header
print()
duration = time.monotonic() - self.start_time
print("Total duration: %s" % format_duration(duration))
-
- if self.bad:
- result = "FAILURE"
- elif self.interrupted:
- result = "INTERRUPTED"
- elif self.ns.fail_env_changed and self.environment_changed:
- result = "ENV CHANGED"
- else:
- result = "SUCCESS"
- print("Tests result: %s" % result)
+ print("Tests result: %s" % self.get_tests_result())
if self.ns.runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(0)
-def removepy(names):
- if not names:
- return
- for idx, name in enumerate(names):
- basename, ext = os.path.splitext(name)
- if ext == '.py':
- names[idx] = basename
-
-
-def count(n, word):
- if n == 1:
- return "%d %s" % (n, word)
- else:
- return "%d %ss" % (n, word)
-
-
-def printlist(x, width=70, indent=4, file=None):
- """Print the elements of iterable x to stdout.
-
- Optional arg width (default 70) is the maximum line length.
- Optional arg indent (default 4) is the number of blanks with which to
- begin each line.
- """
-
- blanks = ' ' * indent
- # Print the sorted list: 'x' may be a '--random' list or a set()
- print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
- initial_indent=blanks, subsequent_indent=blanks),
- file=file)
-
-
def main(tests=None, **kwargs):
"""Run the Python suite."""
Regrtest().main(tests=tests, **kwargs)
from test import support
-try:
- MAXFD = os.sysconf("SC_OPEN_MAX")
-except Exception:
- MAXFD = 256
-
-
-def fd_count():
- """Count the number of open file descriptors"""
- if sys.platform.startswith(('linux', 'freebsd')):
- try:
- names = os.listdir("/proc/self/fd")
- return len(names)
- except FileNotFoundError:
- pass
-
- count = 0
- for fd in range(MAXFD):
- try:
- # Prefer dup() over fstat(). fstat() can require input/output
- # whereas dup() doesn't.
- fd2 = os.dup(fd)
- except OSError as e:
- if e.errno != errno.EBADF:
- raise
- else:
- os.close(fd2)
- count += 1
- return count
-
-
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
func1 = sys.getallocatedblocks
func2 = sys.gettotalrefcount
gc.collect()
- return func1(), func2(), fd_count()
+ return func1(), func2(), support.fd_count()
def clear_caches():
faulthandler.dump_traceback_later(ns.timeout, exit=True)
try:
support.set_match_tests(ns.match_tests)
+ # reset the environment_altered flag to detect if a test altered
+ # the environment
+ support.environment_altered = False
if ns.failfast:
support.failfast = True
if output_on_failure:
runtest, INTERRUPTED, CHILD_ERROR, PROGRESS_MIN_TIME,
format_test_result)
from test.libregrtest.setup import setup_tests
+from test.libregrtest.utils import format_duration
# Display the running tests if nothing happened last N seconds
continue
dt = time.monotonic() - worker.start_time
if dt >= PROGRESS_MIN_TIME:
- running.append('%s (%.0f sec)' % (current_test, dt))
+ text = '%s (%s)' % (current_test, format_duration(dt))
+ running.append(text)
return running
finished = 0
except queue.Empty:
running = get_running(workers)
if running and not regrtest.ns.pgo:
- print('running: %s' % ', '.join(running))
+ print('running: %s' % ', '.join(running), flush=True)
continue
test, stdout, stderr, result = item
line = "Waiting for %s (%s tests)" % (', '.join(running), len(running))
if dt >= WAIT_PROGRESS:
line = "%s since %.0f sec" % (line, dt)
- print(line)
+ print(line, flush=True)
for worker in workers:
worker.join(WAIT_PROGRESS)
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
- support.gc_collect() # Some resources use weak references
+
+ # Some resources use weak references
+ support.gc_collect()
+
+ # Read support.environment_altered, set by support helper functions
+ self.changed |= support.environment_altered
+
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
--- /dev/null
+import os.path
+import textwrap
+
+
+def format_duration(seconds):
+ if seconds < 1.0:
+ return '%.0f ms' % (seconds * 1e3)
+ if seconds < 60.0:
+ return '%.0f sec' % seconds
+
+ minutes, seconds = divmod(seconds, 60.0)
+ hours, minutes = divmod(minutes, 60.0)
+ if hours:
+ return '%.0f hour %.0f min' % (hours, minutes)
+ else:
+ return '%.0f min %.0f sec' % (minutes, seconds)
+
+
+def removepy(names):
+ if not names:
+ return
+ for idx, name in enumerate(names):
+ basename, ext = os.path.splitext(name)
+ if ext == '.py':
+ names[idx] = basename
+
+
+def count(n, word):
+ if n == 1:
+ return "%d %s" % (n, word)
+ else:
+ return "%d %ss" % (n, word)
+
+
+def printlist(x, width=70, indent=4, file=None):
+ """Print the elements of iterable x to stdout.
+
+ Optional arg width (default 70) is the maximum line length.
+ Optional arg indent (default 4) is the number of blanks with which to
+ begin each line.
+ """
+
+ blanks = ' ' * indent
+ # Print the sorted list: 'x' may be a '--random' list or a set()
+ print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
+ initial_indent=blanks, subsequent_indent=blanks),
+ file=file)
self.started = []
self.finished = []
self._can_exit = not wait_before_exit
+ self.wait_thread = support.wait_threads_exit()
+ self.wait_thread.__enter__()
+
def task():
tid = threading.get_ident()
self.started.append(tid)
self.finished.append(tid)
while not self._can_exit:
_wait()
+
try:
for i in range(n):
start_new_thread(task, ())
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
+ # Wait for threads exit
+ self.wait_thread.__exit__(None, None, None)
def do_finish(self):
self._can_exit = True
# Lock needs to be released before re-acquiring.
lock = self.locktype()
phase = []
+
def f():
lock.acquire()
phase.append(None)
lock.acquire()
phase.append(None)
- start_new_thread(f, ())
- while len(phase) == 0:
- _wait()
- _wait()
- self.assertEqual(len(phase), 1)
- lock.release()
- while len(phase) == 1:
+
+ with support.wait_threads_exit():
+ start_new_thread(f, ())
+ while len(phase) == 0:
+ _wait()
_wait()
- self.assertEqual(len(phase), 2)
+ self.assertEqual(len(phase), 1)
+ lock.release()
+ while len(phase) == 1:
+ _wait()
+ self.assertEqual(len(phase), 2)
def test_different_thread(self):
# Lock can be released from a different thread.
self.assertRaises(RuntimeError, lock.release)
finally:
b.do_finish()
+ b.wait_for_finished()
def test__is_owned(self):
lock = self.locktype()
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
- f = io.BytesIO()
- pickler = self.pickler_class(f)
+ for proto in protocols:
+ f = io.BytesIO()
+ pickler = self.pickler_class(f, proto)
- pickler.dump(data)
- first_pickled = f.getvalue()
+ pickler.dump(data)
+ first_pickled = f.getvalue()
- # Reset BytesIO object.
- f.seek(0)
- f.truncate()
+ # Reset BytesIO object.
+ f.seek(0)
+ f.truncate()
- pickler.dump(data)
- second_pickled = f.getvalue()
+ pickler.dump(data)
+ second_pickled = f.getvalue()
- # Reset the Pickler and BytesIO objects.
- pickler.clear_memo()
- f.seek(0)
- f.truncate()
+ # Reset the Pickler and BytesIO objects.
+ pickler.clear_memo()
+ f.seek(0)
+ f.truncate()
- pickler.dump(data)
- third_pickled = f.getvalue()
+ pickler.dump(data)
+ third_pickled = f.getvalue()
- self.assertNotEqual(first_pickled, second_pickled)
- self.assertEqual(first_pickled, third_pickled)
+ self.assertNotEqual(first_pickled, second_pickled)
+ self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
copy_attributes(info_add, readline, 'readline.%s', attributes,
formatter=format_attr)
+ if not hasattr(readline, "_READLINE_LIBRARY_VERSION"):
+ # _READLINE_LIBRARY_VERSION has been added to CPython 3.7
+ doc = getattr(readline, '__doc__', '')
+ if 'libedit readline' in doc:
+ info_add('readline.library', 'libedit readline')
+ elif 'GNU readline' in doc:
+ info_add('readline.library', 'GNU readline')
+
def collect_gdb(info_add):
import subprocess
call_func(info_add, 'test_support.python_is_optimized', support, 'python_is_optimized')
+def collect_cc(info_add):
+ import subprocess
+ import sysconfig
+
+ CC = sysconfig.get_config_var('CC')
+ if not CC:
+ return
+
+ try:
+ import shlex
+ args = shlex.split(CC)
+ except ImportError:
+ args = CC.split()
+ args.append('--version')
+ proc = subprocess.Popen(args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ stdout = proc.communicate()[0]
+ if proc.returncode:
+ # CC --version failed: ignore error
+ return
+
+ text = stdout.splitlines()[0]
+ text = normalize_text(text)
+ info_add('CC.version', text)
+
+
def collect_info(info):
error = False
info_add = info.add
collect_decimal,
collect_testcapi,
collect_resource,
+ collect_cc,
# Collecting from tests should be last as they have side effects.
collect_test_socket,
# Nothing should happen: SIGUSR2 is ignored
child.wait()
- signal.alarm(1)
- self.wait_signal(None, 'SIGALRM', KeyboardInterrupt)
- self.assertEqual(self.got_signals, {'SIGHUP': 1, 'SIGUSR1': 1,
- 'SIGALRM': 0})
+ try:
+ signal.alarm(1)
+ self.wait_signal(None, 'SIGALRM', KeyboardInterrupt)
+ self.assertEqual(self.got_signals, {'SIGHUP': 1, 'SIGUSR1': 1,
+ 'SIGALRM': 0})
+ finally:
+ signal.alarm(0)
if __name__ == "__main__":
"check_warnings", "check_no_resource_warning", "EnvironmentVarGuard",
"run_with_locale", "swap_item",
"swap_attr", "Matcher", "set_memlimit", "SuppressCrashReport", "sortdict",
- "run_with_tz", "PGO", "missing_compiler_executable",
+ "run_with_tz", "PGO", "missing_compiler_executable", "fd_count",
]
class Error(Exception):
_force_run(fullname, os.unlink, fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(lambda p: _force_run(p, os.rmdir, p), path)
+
+ def _longpath(path):
+ try:
+ import ctypes
+ except ImportError:
+ # No ctypes means we can't expands paths.
+ pass
+ else:
+ buffer = ctypes.create_unicode_buffer(len(path) * 2)
+ length = ctypes.windll.kernel32.GetLongPathNameW(path, buffer,
+ len(buffer))
+ if length:
+ return buffer[:length]
+ return path
else:
_unlink = os.unlink
_rmdir = os.rmdir
_rmtree_inner(path)
os.rmdir(path)
+ def _longpath(path):
+ return path
+
def unlink(filename):
try:
_unlink(filename)
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
+# Flag used by saved_test_environment of test.libregrtest.save_env,
+# to check if a test modified the environment. The flag should be set to False
+# before running a new test.
+#
+# For example, threading_cleanup() sets the flag is the function fails
+# to cleanup threads.
+environment_altered = False
+
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
threading_cleanup(*key)
return decorator
+
+@contextlib.contextmanager
+def wait_threads_exit(timeout=60.0):
+ """
+ bpo-31234: Context manager to wait until all threads created in the with
+ statement exit.
+
+ Use _thread.count() to check if threads exited. Indirectly, wait until
+ threads exit the internal t_bootstrap() C function of the _thread module.
+
+ threading_setup() and threading_cleanup() are designed to emit a warning
+ if a test leaves running threads in the background. This context manager
+ is designed to cleanup threads started by the _thread.start_new_thread()
+ which doesn't allow to wait for thread exit, whereas thread.Thread has a
+ join() method.
+ """
+ old_count = _thread._count()
+ try:
+ yield
+ finally:
+ start_time = time.monotonic()
+ deadline = start_time + timeout
+ while True:
+ count = _thread._count()
+ if count <= old_count:
+ break
+ if time.monotonic() > deadline:
+ dt = time.monotonic() - start_time
+ msg = (f"wait_threads() failed to cleanup {count - old_count} "
+ f"threads after {dt:.1f} seconds "
+ f"(count: {count}, old count: {old_count})")
+ raise AssertionError(msg)
+ time.sleep(0.010)
+ gc_collect()
+
+
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
if not hasattr(os, "setxattr"):
can = False
else:
- tmp_fp, tmp_name = tempfile.mkstemp()
+ tmp_dir = tempfile.mkdtemp()
+ tmp_fp, tmp_name = tempfile.mkstemp(dir=tmp_dir)
try:
with open(TESTFN, "wb") as fp:
try:
# TESTFN & tempfile may use different file systems with
# different capabilities
os.setxattr(tmp_fp, b"user.test", b"")
+ os.setxattr(tmp_name, b"trusted.foo", b"42")
os.setxattr(fp.fileno(), b"user.test", b"")
# Kernels < 2.6.39 don't respect setxattr flags.
kernel_version = platform.release()
finally:
unlink(TESTFN)
unlink(tmp_name)
+ rmdir(tmp_dir)
_can_xattr = can
return can
The 'extra' argument can be a set of names that wouldn't otherwise be
automatically detected as "public", like objects without a proper
- '__module__' attriubute. If provided, it will be added to the
+ '__module__' attribute. If provided, it will be added to the
automatically detected ones.
The 'blacklist' argument can be a set of names that must not be treated
faulthandler.enable(file=fd, all_threads=True)
+def fd_count():
+ """Count the number of open file descriptors.
+ """
+ if sys.platform.startswith(('linux', 'freebsd')):
+ try:
+ names = os.listdir("/proc/self/fd")
+ # Substract one because listdir() opens internally a file
+ # descriptor to list the content of the /proc/self/fd/ directory.
+ return len(names) - 1
+ except FileNotFoundError:
+ pass
+
+ MAXFD = 256
+ if hasattr(os, 'sysconf'):
+ try:
+ MAXFD = os.sysconf("SC_OPEN_MAX")
+ except OSError:
+ pass
+
+ old_modes = None
+ if sys.platform == 'win32':
+ # bpo-25306, bpo-31009: Call CrtSetReportMode() to not kill the process
+ # on invalid file descriptor if Python is compiled in debug mode
+ try:
+ import msvcrt
+ msvcrt.CrtSetReportMode
+ except (AttributeError, ImportError):
+ # no msvcrt or a release build
+ pass
+ else:
+ old_modes = {}
+ for report_type in (msvcrt.CRT_WARN,
+ msvcrt.CRT_ERROR,
+ msvcrt.CRT_ASSERT):
+ old_modes[report_type] = msvcrt.CrtSetReportMode(report_type, 0)
+
+ try:
+ count = 0
+ for fd in range(MAXFD):
+ try:
+ # Prefer dup() over fstat(). fstat() can require input/output
+ # whereas dup() doesn't.
+ fd2 = os.dup(fd)
+ except OSError as e:
+ if e.errno != errno.EBADF:
+ raise
+ else:
+ os.close(fd2)
+ count += 1
+ finally:
+ if old_modes is not None:
+ for report_type in (msvcrt.CRT_WARN,
+ msvcrt.CRT_ERROR,
+ msvcrt.CRT_ASSERT):
+ msvcrt.CrtSetReportMode(report_type, old_modes[report_type])
+
+ return count
+
+
class SaveSignals:
"""
Save an restore signal handlers.
self.do_test_exception(nargs=None, metavar=tuple())
def test_nargs_None_metavar_length1(self):
- self.do_test_no_exception(nargs=None, metavar=("1"))
+ self.do_test_no_exception(nargs=None, metavar=("1",))
def test_nargs_None_metavar_length2(self):
self.do_test_exception(nargs=None, metavar=("1", "2"))
self.do_test_exception(nargs="?", metavar=tuple())
def test_nargs_optional_metavar_length1(self):
- self.do_test_no_exception(nargs="?", metavar=("1"))
+ self.do_test_no_exception(nargs="?", metavar=("1",))
def test_nargs_optional_metavar_length2(self):
self.do_test_exception(nargs="?", metavar=("1", "2"))
self.do_test_exception(nargs="*", metavar=tuple())
def test_nargs_zeroormore_metavar_length1(self):
- self.do_test_no_exception(nargs="*", metavar=("1"))
+ self.do_test_exception(nargs="*", metavar=("1",))
def test_nargs_zeroormore_metavar_length2(self):
self.do_test_no_exception(nargs="*", metavar=("1", "2"))
self.do_test_exception(nargs="+", metavar=tuple())
def test_nargs_oneormore_metavar_length1(self):
- self.do_test_no_exception(nargs="+", metavar=("1"))
+ self.do_test_exception(nargs="+", metavar=("1",))
def test_nargs_oneormore_metavar_length2(self):
self.do_test_no_exception(nargs="+", metavar=("1", "2"))
self.do_test_no_exception(nargs="...", metavar=tuple())
def test_nargs_remainder_metavar_length1(self):
- self.do_test_no_exception(nargs="...", metavar=("1"))
+ self.do_test_no_exception(nargs="...", metavar=("1",))
def test_nargs_remainder_metavar_length2(self):
self.do_test_no_exception(nargs="...", metavar=("1", "2"))
self.do_test_exception(nargs="A...", metavar=tuple())
def test_nargs_parser_metavar_length1(self):
- self.do_test_no_exception(nargs="A...", metavar=("1"))
+ self.do_test_no_exception(nargs="A...", metavar=("1",))
def test_nargs_parser_metavar_length2(self):
self.do_test_exception(nargs="A...", metavar=("1", "2"))
self.do_test_exception(nargs=1, metavar=tuple())
def test_nargs_1_metavar_length1(self):
- self.do_test_no_exception(nargs=1, metavar=("1"))
+ self.do_test_no_exception(nargs=1, metavar=("1",))
def test_nargs_1_metavar_length2(self):
self.do_test_exception(nargs=1, metavar=("1", "2"))
self.do_test_exception(nargs=2, metavar=tuple())
def test_nargs_2_metavar_length1(self):
- self.do_test_no_exception(nargs=2, metavar=("1"))
+ self.do_test_exception(nargs=2, metavar=("1",))
def test_nargs_2_metavar_length2(self):
self.do_test_no_exception(nargs=2, metavar=("1", "2"))
self.do_test_exception(nargs=3, metavar=tuple())
def test_nargs_3_metavar_length1(self):
- self.do_test_no_exception(nargs=3, metavar=("1"))
+ self.do_test_exception(nargs=3, metavar=("1",))
def test_nargs_3_metavar_length2(self):
self.do_test_exception(nargs=3, metavar=("1", "2"))
]
self.assertEqual(sorted(items), sorted(argparse.__all__))
+
+class TestWrappingMetavar(TestCase):
+
+ def setUp(self):
+ self.parser = ErrorRaisingArgumentParser(
+ 'this_is_spammy_prog_with_a_long_name_sorry_about_the_name'
+ )
+ # this metavar was triggering library assertion errors due to usage
+ # message formatting incorrectly splitting on the ] chars within
+ metavar = '<http[s]://example:1234>'
+ self.parser.add_argument('--proxy', metavar=metavar)
+
+ def test_help_with_metavar(self):
+ help_text = self.parser.format_help()
+ self.assertEqual(help_text, textwrap.dedent('''\
+ usage: this_is_spammy_prog_with_a_long_name_sorry_about_the_name
+ [-h] [--proxy <http[s]://example:1234>]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --proxy <http[s]://example:1234>
+ '''))
+
+
def test_main():
support.run_unittest(__name__)
# Remove global references to avoid looking like we have refleaks.
def async_iterate(g):
res = []
while True:
+ an = g.__anext__()
+ try:
+ while True:
+ try:
+ an.__next__()
+ except StopIteration as ex:
+ if ex.args:
+ res.append(ex.args[0])
+ break
+ else:
+ res.append('EMPTY StopIteration')
+ break
+ except StopAsyncIteration:
+ raise
+ except Exception as ex:
+ res.append(str(type(ex)))
+ break
+ except StopAsyncIteration:
+ res.append('STOP')
+ break
+ return res
+
+ def async_iterate(g):
+ res = []
+ while True:
try:
g.__anext__().__next__()
except StopAsyncIteration:
"non-None value .* async generator"):
gen().__anext__().send(100)
+ def test_async_gen_exception_11(self):
+ def sync_gen():
+ yield 10
+ yield 20
+
+ def sync_gen_wrapper():
+ yield 1
+ sg = sync_gen()
+ sg.send(None)
+ try:
+ sg.throw(GeneratorExit())
+ except GeneratorExit:
+ yield 2
+ yield 3
+
+ async def async_gen():
+ yield 10
+ yield 20
+
+ async def async_gen_wrapper():
+ yield 1
+ asg = async_gen()
+ await asg.asend(None)
+ try:
+ await asg.athrow(GeneratorExit())
+ except GeneratorExit:
+ yield 2
+ yield 3
+
+ self.compare_generators(sync_gen_wrapper(), async_gen_wrapper())
+
def test_async_gen_api_01(self):
async def gen():
yield 123
outer_loop.close()
+
if __name__ == '__main__':
unittest.main()
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
+ def test_run_in_executor_cancel(self):
+ called = False
+
+ def patched_call_soon(*args):
+ nonlocal called
+ called = True
+
+ def run():
+ time.sleep(0.05)
+
+ f2 = self.loop.run_in_executor(None, run)
+ f2.cancel()
+ self.loop.close()
+ self.loop.call_soon = patched_call_soon
+ self.loop.call_soon_threadsafe = patched_call_soon
+ time.sleep(0.4)
+ self.assertFalse(called)
+
def test_reader_callback(self):
r, w = test_utils.socketpair()
r.setblocking(False)
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'Coro()')
+ coro = Coro()
+ coro.__qualname__ = 'AAA'
+ coro.cr_code = None
+ self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
+
+ coro = Coro()
+ coro.__qualname__ = 'AAA'
+ coro.cr_code = None
+ coro.cr_frame = None
+ self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
+
+ coro = Coro()
+ coro.__qualname__ = None
+ coro.cr_code = None
+ coro.cr_frame = None
+ self.assertEqual(coroutines._format_coroutine(coro), f'{repr(coro)}()')
+
+ coro = Coro()
+ coro.cr_code = None
+ coro.cr_frame = None
+ self.assertEqual(coroutines._format_coroutine(coro), f'{repr(coro)}()')
+
class TimerTests(unittest.TestCase):
def test_pause_resume_reading(self):
tr = self.socket_transport()
futures = []
- for msg in [b'data1', b'data2', b'data3', b'data4', b'']:
+ for msg in [b'data1', b'data2', b'data3', b'data4', b'data5', b'']:
f = asyncio.Future(loop=self.loop)
f.set_result(msg)
futures.append(f)
self.protocol.data_received.assert_called_with(b'data3')
self.loop._run_once()
self.protocol.data_received.assert_called_with(b'data4')
+
+ tr.pause_reading()
+ tr.resume_reading()
+ self.loop.call_exception_handler = mock.Mock()
+ self.loop._run_once()
+ self.loop.call_exception_handler.assert_not_called()
+ self.protocol.data_received.assert_called_with(b'data5')
tr.close()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
+ def test__add_reader(self):
+ tr = self.create_transport()
+ tr._buffer.extend(b'1')
+ tr._add_reader(7, mock.sentinel)
+ self.assertTrue(self.loop.readers)
+
+ tr._force_close(None)
+
+ self.assertTrue(tr.is_closing())
+ self.assertFalse(self.loop.readers)
+
+ # can not add readers after closing
+ tr._add_reader(7, mock.sentinel)
+ self.assertFalse(self.loop.readers)
+
class SelectorSocketTransportTests(test_utils.TestCase):
self.sock.shutdown.assert_called_with(socket.SHUT_WR)
tr.close()
+ def test_write_eof_after_close(self):
+ tr = self.socket_transport()
+ tr.close()
+ self.loop.run_until_complete(asyncio.sleep(0))
+ tr.write_eof()
+
@mock.patch('asyncio.base_events.logger')
def test_transport_close_remove_writer(self, m_log):
remove_writer = self.loop._remove_writer = mock.Mock()
return []
waiter.cancel()
- self.connection_made(ssl_proto, do_handshake=do_handshake)
with test_utils.disable_logger():
+ self.connection_made(ssl_proto, do_handshake=do_handshake)
self.loop.run_until_complete(handshake_fut)
def test_eof_received_waiter(self):
@asyncio.coroutine
def write_stdin(proc, data):
+ yield from asyncio.sleep(0.5, loop=self.loop)
proc.stdin.write(data)
yield from proc.stdin.drain()
def test_cancel_wait_for(self):
self._test_cancel_wait_for(60.0)
- def test_cancel_gather(self):
+ def test_cancel_gather_1(self):
"""Ensure that a gathering future refuses to be cancelled once all
children are done"""
loop = asyncio.new_event_loop()
self.assertFalse(gather_task.cancelled())
self.assertEqual(gather_task.result(), [42])
+ def test_cancel_gather_2(self):
+ loop = asyncio.new_event_loop()
+ self.addCleanup(loop.close)
+
+ async def test():
+ time = 0
+ while True:
+ time += 0.05
+ await asyncio.gather(asyncio.sleep(0.05, loop=loop),
+ return_exceptions=True,
+ loop=loop)
+ if time > 1:
+ return
+
+ async def main():
+ qwe = self.new_task(loop, test())
+ await asyncio.sleep(0.2, loop=loop)
+ qwe.cancel()
+ try:
+ await qwe
+ except asyncio.CancelledError:
+ pass
+ else:
+ self.fail('gather did not propagate the cancellation request')
+
+ loop.run_until_complete(main())
+
def test_exception_traceback(self):
# See http://bugs.python.org/issue28843
self.assertFalse(m_log.error.called)
with self.assertRaises(ValueError):
- self.new_task(self.loop, coro())
+ gen = coro()
+ try:
+ self.new_task(self.loop, gen)
+ finally:
+ gen.close()
self.assertTrue(m_log.error.called)
message = m_log.error.call_args[0][0]
--- /dev/null
+""" Test the bdb module.
+
+ A test defines a list of tuples that may be seen as paired tuples, each
+ pair being defined by 'expect_tuple, set_tuple' as follows:
+
+ ([event, [lineno[, co_name[, eargs]]]]), (set_type, [sargs])
+
+ * 'expect_tuple' describes the expected current state of the Bdb instance.
+ It may be the empty tuple and no check is done in that case.
+ * 'set_tuple' defines the set_*() method to be invoked when the Bdb
+ instance reaches this state.
+
+ Example of an 'expect_tuple, set_tuple' pair:
+
+ ('line', 2, 'tfunc_main'), ('step', )
+
+ Definitions of the members of the 'expect_tuple':
+ event:
+ Name of the trace event. The set methods that do not give back
+ control to the tracer [1] do not trigger a tracer event and in
+ that case the next 'event' may be 'None' by convention, its value
+ is not checked.
+ [1] Methods that trigger a trace event are set_step(), set_next(),
+ set_return(), set_until() and set_continue().
+ lineno:
+ Line number. Line numbers are relative to the start of the
+ function when tracing a function in the test_bdb module (i.e. this
+ module).
+ co_name:
+ Name of the function being currently traced.
+ eargs:
+ A tuple:
+ * On an 'exception' event the tuple holds a class object, the
+ current exception must be an instance of this class.
+ * On a 'line' event, the tuple holds a dictionary and a list. The
+ dictionary maps each breakpoint number that has been hit on this
+ line to its hits count. The list holds the list of breakpoint
+ number temporaries that are being deleted.
+
+ Definitions of the members of the 'set_tuple':
+ set_type:
+ The type of the set method to be invoked. This may
+ be the type of one of the Bdb set methods: 'step', 'next',
+ 'until', 'return', 'continue', 'break', 'quit', or the type of one
+ of the set methods added by test_bdb.Bdb: 'ignore', 'enable',
+ 'disable', 'clear', 'up', 'down'.
+ sargs:
+ The arguments of the set method if any, packed in a tuple.
+"""
+
+import bdb as _bdb
+import sys
+import os
+import unittest
+import textwrap
+import importlib
+import linecache
+from contextlib import contextmanager
+from itertools import islice, repeat
+import test.support
+
+class BdbException(Exception): pass
+class BdbError(BdbException): """Error raised by the Bdb instance."""
+class BdbSyntaxError(BdbException): """Syntax error in the test case."""
+class BdbNotExpectedError(BdbException): """Unexpected result."""
+
+# When 'dry_run' is set to true, expect tuples are ignored and the actual
+# state of the tracer is printed after running each set_*() method of the test
+# case. The full list of breakpoints and their attributes is also printed
+# after each 'line' event where a breakpoint has been hit.
+dry_run = 0
+
+def reset_Breakpoint():
+ _bdb.Breakpoint.next = 1
+ _bdb.Breakpoint.bplist = {}
+ _bdb.Breakpoint.bpbynumber = [None]
+
+def info_breakpoints():
+ bp_list = [bp for bp in _bdb.Breakpoint.bpbynumber if bp]
+ if not bp_list:
+ return ''
+
+ header_added = False
+ for bp in bp_list:
+ if not header_added:
+ info = 'BpNum Temp Enb Hits Ignore Where\n'
+ header_added = True
+
+ disp = 'yes ' if bp.temporary else 'no '
+ enab = 'yes' if bp.enabled else 'no '
+ info += ('%-5d %s %s %-4d %-6d at %s:%d' %
+ (bp.number, disp, enab, bp.hits, bp.ignore,
+ os.path.basename(bp.file), bp.line))
+ if bp.cond:
+ info += '\n\tstop only if %s' % (bp.cond,)
+ info += '\n'
+ return info
+
+class Bdb(_bdb.Bdb):
+ """Extend Bdb to enhance test coverage."""
+
+ def trace_dispatch(self, frame, event, arg):
+ self.currentbp = None
+ return super().trace_dispatch(frame, event, arg)
+
+ def set_break(self, filename, lineno, temporary=False, cond=None,
+ funcname=None):
+ if isinstance(funcname, str):
+ if filename == __file__:
+ globals_ = globals()
+ else:
+ module = importlib.import_module(filename[:-3])
+ globals_ = module.__dict__
+ func = eval(funcname, globals_)
+ code = func.__code__
+ filename = code.co_filename
+ lineno = code.co_firstlineno
+ funcname = code.co_name
+
+ res = super().set_break(filename, lineno, temporary=temporary,
+ cond=cond, funcname=funcname)
+ if isinstance(res, str):
+ raise BdbError(res)
+ return res
+
+ def get_stack(self, f, t):
+ self.stack, self.index = super().get_stack(f, t)
+ self.frame = self.stack[self.index][0]
+ return self.stack, self.index
+
+ def set_ignore(self, bpnum):
+ """Increment the ignore count of Breakpoint number 'bpnum'."""
+ bp = self.get_bpbynumber(bpnum)
+ bp.ignore += 1
+
+ def set_enable(self, bpnum):
+ bp = self.get_bpbynumber(bpnum)
+ bp.enabled = True
+
+ def set_disable(self, bpnum):
+ bp = self.get_bpbynumber(bpnum)
+ bp.enabled = False
+
+ def set_clear(self, fname, lineno):
+ err = self.clear_break(fname, lineno)
+ if err:
+ raise BdbError(err)
+
+ def set_up(self):
+ """Move up in the frame stack."""
+ if not self.index:
+ raise BdbError('Oldest frame')
+ self.index -= 1
+ self.frame = self.stack[self.index][0]
+
+ def set_down(self):
+ """Move down in the frame stack."""
+ if self.index + 1 == len(self.stack):
+ raise BdbError('Newest frame')
+ self.index += 1
+ self.frame = self.stack[self.index][0]
+
+class Tracer(Bdb):
+ """A tracer for testing the bdb module."""
+
+ def __init__(self, expect_set, skip=None, dry_run=False, test_case=None):
+ super().__init__(skip=skip)
+ self.expect_set = expect_set
+ self.dry_run = dry_run
+ self.header = ('Dry-run results for %s:' % test_case if
+ test_case is not None else None)
+ self.init_test()
+
+ def init_test(self):
+ self.cur_except = None
+ self.expect_set_no = 0
+ self.breakpoint_hits = None
+ self.expected_list = list(islice(self.expect_set, 0, None, 2))
+ self.set_list = list(islice(self.expect_set, 1, None, 2))
+
+ def trace_dispatch(self, frame, event, arg):
+ # On an 'exception' event, call_exc_trace() in Python/ceval.c discards
+ # a BdbException raised by the Tracer instance, so we raise it on the
+ # next trace_dispatch() call that occurs unless the set_quit() or
+ # set_continue() method has been invoked on the 'exception' event.
+ if self.cur_except is not None:
+ raise self.cur_except
+
+ if event == 'exception':
+ try:
+ res = super().trace_dispatch(frame, event, arg)
+ return res
+ except BdbException as e:
+ self.cur_except = e
+ return self.trace_dispatch
+ else:
+ return super().trace_dispatch(frame, event, arg)
+
+ def user_call(self, frame, argument_list):
+ # Adopt the same behavior as pdb and, as a side effect, skip also the
+ # first 'call' event when the Tracer is started with Tracer.runcall()
+ # which may be possibly considered as a bug.
+ if not self.stop_here(frame):
+ return
+ self.process_event('call', frame, argument_list)
+ self.next_set_method()
+
+ def user_line(self, frame):
+ self.process_event('line', frame)
+
+ if self.dry_run and self.breakpoint_hits:
+ info = info_breakpoints().strip('\n')
+ # Indent each line.
+ for line in info.split('\n'):
+ print(' ' + line)
+ self.delete_temporaries()
+ self.breakpoint_hits = None
+
+ self.next_set_method()
+
+ def user_return(self, frame, return_value):
+ self.process_event('return', frame, return_value)
+ self.next_set_method()
+
+ def user_exception(self, frame, exc_info):
+ self.exc_info = exc_info
+ self.process_event('exception', frame)
+ self.next_set_method()
+
+ def do_clear(self, arg):
+ # The temporary breakpoints are deleted in user_line().
+ bp_list = [self.currentbp]
+ self.breakpoint_hits = (bp_list, bp_list)
+
+ def delete_temporaries(self):
+ if self.breakpoint_hits:
+ for n in self.breakpoint_hits[1]:
+ self.clear_bpbynumber(n)
+
+ def pop_next(self):
+ self.expect_set_no += 1
+ try:
+ self.expect = self.expected_list.pop(0)
+ except IndexError:
+ raise BdbNotExpectedError(
+ 'expect_set list exhausted, cannot pop item %d' %
+ self.expect_set_no)
+ self.set_tuple = self.set_list.pop(0)
+
+ def process_event(self, event, frame, *args):
+ # Call get_stack() to enable walking the stack with set_up() and
+ # set_down().
+ tb = None
+ if event == 'exception':
+ tb = self.exc_info[2]
+ self.get_stack(frame, tb)
+
+ # A breakpoint has been hit and it is not a temporary.
+ if self.currentbp is not None and not self.breakpoint_hits:
+ bp_list = [self.currentbp]
+ self.breakpoint_hits = (bp_list, [])
+
+ # Pop next event.
+ self.event= event
+ self.pop_next()
+ if self.dry_run:
+ self.print_state(self.header)
+ return
+
+ # Validate the expected results.
+ if self.expect:
+ self.check_equal(self.expect[0], event, 'Wrong event type')
+ self.check_lno_name()
+
+ if event in ('call', 'return'):
+ self.check_expect_max_size(3)
+ elif len(self.expect) > 3:
+ if event == 'line':
+ bps, temporaries = self.expect[3]
+ bpnums = sorted(bps.keys())
+ if not self.breakpoint_hits:
+ self.raise_not_expected(
+ 'No breakpoints hit at expect_set item %d' %
+ self.expect_set_no)
+ self.check_equal(bpnums, self.breakpoint_hits[0],
+ 'Breakpoint numbers do not match')
+ self.check_equal([bps[n] for n in bpnums],
+ [self.get_bpbynumber(n).hits for
+ n in self.breakpoint_hits[0]],
+ 'Wrong breakpoint hit count')
+ self.check_equal(sorted(temporaries), self.breakpoint_hits[1],
+ 'Wrong temporary breakpoints')
+
+ elif event == 'exception':
+ if not isinstance(self.exc_info[1], self.expect[3]):
+ self.raise_not_expected(
+ "Wrong exception at expect_set item %d, got '%s'" %
+ (self.expect_set_no, self.exc_info))
+
+ def check_equal(self, expected, result, msg):
+ if expected == result:
+ return
+ self.raise_not_expected("%s at expect_set item %d, got '%s'" %
+ (msg, self.expect_set_no, result))
+
+ def check_lno_name(self):
+ """Check the line number and function co_name."""
+ s = len(self.expect)
+ if s > 1:
+ lineno = self.lno_abs2rel()
+ self.check_equal(self.expect[1], lineno, 'Wrong line number')
+ if s > 2:
+ self.check_equal(self.expect[2], self.frame.f_code.co_name,
+ 'Wrong function name')
+
+ def check_expect_max_size(self, size):
+ if len(self.expect) > size:
+ raise BdbSyntaxError('Invalid size of the %s expect tuple: %s' %
+ (self.event, self.expect))
+
+ def lno_abs2rel(self):
+ fname = self.canonic(self.frame.f_code.co_filename)
+ lineno = self.frame.f_lineno
+ return ((lineno - self.frame.f_code.co_firstlineno + 1)
+ if fname == self.canonic(__file__) else lineno)
+
+ def lno_rel2abs(self, fname, lineno):
+ return (self.frame.f_code.co_firstlineno + lineno - 1
+ if (lineno and self.canonic(fname) == self.canonic(__file__))
+ else lineno)
+
+ def get_state(self):
+ lineno = self.lno_abs2rel()
+ co_name = self.frame.f_code.co_name
+ state = "('%s', %d, '%s'" % (self.event, lineno, co_name)
+ if self.breakpoint_hits:
+ bps = '{'
+ for n in self.breakpoint_hits[0]:
+ if bps != '{':
+ bps += ', '
+ bps += '%s: %s' % (n, self.get_bpbynumber(n).hits)
+ bps += '}'
+ bps = '(' + bps + ', ' + str(self.breakpoint_hits[1]) + ')'
+ state += ', ' + bps
+ elif self.event == 'exception':
+ state += ', ' + self.exc_info[0].__name__
+ state += '), '
+ return state.ljust(32) + str(self.set_tuple) + ','
+
+ def print_state(self, header=None):
+ if header is not None and self.expect_set_no == 1:
+ print()
+ print(header)
+ print('%d: %s' % (self.expect_set_no, self.get_state()))
+
+ def raise_not_expected(self, msg):
+ msg += '\n'
+ msg += ' Expected: %s\n' % str(self.expect)
+ msg += ' Got: ' + self.get_state()
+ raise BdbNotExpectedError(msg)
+
+ def next_set_method(self):
+ set_type = self.set_tuple[0]
+ args = self.set_tuple[1] if len(self.set_tuple) == 2 else None
+ set_method = getattr(self, 'set_' + set_type)
+
+ # The following set methods give back control to the tracer.
+ if set_type in ('step', 'continue', 'quit'):
+ set_method()
+ return
+ elif set_type in ('next', 'return'):
+ set_method(self.frame)
+ return
+ elif set_type == 'until':
+ lineno = None
+ if args:
+ lineno = self.lno_rel2abs(self.frame.f_code.co_filename,
+ args[0])
+ set_method(self.frame, lineno)
+ return
+
+ # The following set methods do not give back control to the tracer and
+ # next_set_method() is called recursively.
+ if (args and set_type in ('break', 'clear', 'ignore', 'enable',
+ 'disable')) or set_type in ('up', 'down'):
+ if set_type in ('break', 'clear'):
+ fname, lineno, *remain = args
+ lineno = self.lno_rel2abs(fname, lineno)
+ args = [fname, lineno]
+ args.extend(remain)
+ set_method(*args)
+ elif set_type in ('ignore', 'enable', 'disable'):
+ set_method(*args)
+ elif set_type in ('up', 'down'):
+ set_method()
+
+ # Process the next expect_set item.
+ # It is not expected that a test may reach the recursion limit.
+ self.event= None
+ self.pop_next()
+ if self.dry_run:
+ self.print_state()
+ else:
+ if self.expect:
+ self.check_lno_name()
+ self.check_expect_max_size(3)
+ self.next_set_method()
+ else:
+ raise BdbSyntaxError('"%s" is an invalid set_tuple' %
+ self.set_tuple)
+
+class TracerRun():
+ """Provide a context for running a Tracer instance with a test case."""
+
+ def __init__(self, test_case, skip=None):
+ self.test_case = test_case
+ self.dry_run = test_case.dry_run
+ self.tracer = Tracer(test_case.expect_set, skip=skip,
+ dry_run=self.dry_run, test_case=test_case.id())
+ self._original_tracer = None
+
+ def __enter__(self):
+ # test_pdb does not reset Breakpoint class attributes on exit :-(
+ reset_Breakpoint()
+ self._original_tracer = sys.gettrace()
+ return self.tracer
+
+ def __exit__(self, type_=None, value=None, traceback=None):
+ reset_Breakpoint()
+ sys.settrace(self._original_tracer)
+
+ not_empty = ''
+ if self.tracer.set_list:
+ not_empty += 'All paired tuples have not been processed, '
+ not_empty += ('the last one was number %d' %
+ self.tracer.expect_set_no)
+
+ # Make a BdbNotExpectedError a unittest failure.
+ if type_ is not None and issubclass(BdbNotExpectedError, type_):
+ if isinstance(value, BaseException) and value.args:
+ err_msg = value.args[0]
+ if not_empty:
+ err_msg += '\n' + not_empty
+ if self.dry_run:
+ print(err_msg)
+ return True
+ else:
+ self.test_case.fail(err_msg)
+ else:
+ assert False, 'BdbNotExpectedError with empty args'
+
+ if not_empty:
+ if self.dry_run:
+ print(not_empty)
+ else:
+ self.test_case.fail(not_empty)
+
+def run_test(modules, set_list, skip=None):
+ """Run a test and print the dry-run results.
+
+ 'modules': A dictionary mapping module names to their source code as a
+ string. The dictionary MUST include one module named
+ 'test_module' with a main() function.
+ 'set_list': A list of set_type tuples to be run on the module.
+
+ For example, running the following script outputs the following results:
+
+ ***************************** SCRIPT ********************************
+
+ from test.test_bdb import run_test, break_in_func
+
+ code = '''
+ def func():
+ lno = 3
+
+ def main():
+ func()
+ lno = 7
+ '''
+
+ set_list = [
+ break_in_func('func', 'test_module.py'),
+ ('continue', ),
+ ('step', ),
+ ('step', ),
+ ('step', ),
+ ('quit', ),
+ ]
+
+ modules = { 'test_module': code }
+ run_test(modules, set_list)
+
+ **************************** results ********************************
+
+ 1: ('line', 2, 'tfunc_import'), ('next',),
+ 2: ('line', 3, 'tfunc_import'), ('step',),
+ 3: ('call', 5, 'main'), ('break', ('test_module.py', None, False, None, 'func')),
+ 4: ('None', 5, 'main'), ('continue',),
+ 5: ('line', 3, 'func', ({1: 1}, [])), ('step',),
+ BpNum Temp Enb Hits Ignore Where
+ 1 no yes 1 0 at test_module.py:2
+ 6: ('return', 3, 'func'), ('step',),
+ 7: ('line', 7, 'main'), ('step',),
+ 8: ('return', 7, 'main'), ('quit',),
+
+ *************************************************************************
+
+ """
+ def gen(a, b):
+ try:
+ while 1:
+ x = next(a)
+ y = next(b)
+ yield x
+ yield y
+ except StopIteration:
+ return
+
+ # Step over the import statement in tfunc_import using 'next' and step
+ # into main() in test_module.
+ sl = [('next', ), ('step', )]
+ sl.extend(set_list)
+
+ test = BaseTestCase()
+ test.dry_run = True
+ test.id = lambda : None
+ test.expect_set = list(gen(repeat(()), iter(sl)))
+ with create_modules(modules):
+ sys.path.append(os.getcwd())
+ with TracerRun(test, skip=skip) as tracer:
+ tracer.runcall(tfunc_import)
+
+@contextmanager
+def create_modules(modules):
+ with test.support.temp_cwd():
+ try:
+ for m in modules:
+ fname = m + '.py'
+ with open(fname, 'w') as f:
+ f.write(textwrap.dedent(modules[m]))
+ linecache.checkcache(fname)
+ importlib.invalidate_caches()
+ yield
+ finally:
+ for m in modules:
+ test.support.forget(m)
+
+def break_in_func(funcname, fname=__file__, temporary=False, cond=None):
+ return 'break', (fname, None, temporary, cond, funcname)
+
+TEST_MODULE = 'test_module'
+TEST_MODULE_FNAME = TEST_MODULE + '.py'
+def tfunc_import():
+ import test_module
+ test_module.main()
+
+def tfunc_main():
+ lno = 2
+ tfunc_first()
+ tfunc_second()
+ lno = 5
+ lno = 6
+ lno = 7
+
+def tfunc_first():
+ lno = 2
+ lno = 3
+ lno = 4
+
+def tfunc_second():
+ lno = 2
+
+class BaseTestCase(unittest.TestCase):
+ """Base class for all tests."""
+
+ dry_run = dry_run
+
+ def fail(self, msg=None):
+ # Override fail() to use 'raise from None' to avoid repetition of the
+ # error message and traceback.
+ raise self.failureException(msg) from None
+
+class StateTestCase(BaseTestCase):
+ """Test the step, next, return, until and quit 'set_' methods."""
+
+ def test_step(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('step', ),
+ ('line', 2, 'tfunc_first'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_step_next_on_last_statement(self):
+ for set_type in ('step', 'next'):
+ with self.subTest(set_type=set_type):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('break', (__file__, 3)),
+ ('None', 1, 'tfunc_first'), ('continue', ),
+ ('line', 3, 'tfunc_first', ({1:1}, [])), (set_type, ),
+ ('line', 4, 'tfunc_first'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_next(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('next', ),
+ ('line', 4, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_second'), ('step', ),
+ ('line', 2, 'tfunc_second'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_next_over_import(self):
+ code = """
+ def main():
+ lno = 3
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'), ('next', ),
+ ('line', 3, 'tfunc_import'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_next_on_plain_statement(self):
+ # Check that set_next() is equivalent to set_step() on a plain
+ # statement.
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('next', ),
+ ('line', 2, 'tfunc_first'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_next_in_caller_frame(self):
+ # Check that set_next() in the caller frame causes the tracer
+ # to stop next in the caller frame.
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('up', ),
+ ('None', 3, 'tfunc_main'), ('next', ),
+ ('line', 4, 'tfunc_main'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_return(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('step', ),
+ ('line', 2, 'tfunc_first'), ('return', ),
+ ('return', 4, 'tfunc_first'), ('step', ),
+ ('line', 4, 'tfunc_main'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_return_in_caller_frame(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('up', ),
+ ('None', 3, 'tfunc_main'), ('return', ),
+ ('return', 7, 'tfunc_main'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_until(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('step', ),
+ ('line', 2, 'tfunc_first'), ('until', (4, )),
+ ('line', 4, 'tfunc_first'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_until_with_too_large_count(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), break_in_func('tfunc_first'),
+ ('None', 2, 'tfunc_main'), ('continue', ),
+ ('line', 2, 'tfunc_first', ({1:1}, [])), ('until', (9999, )),
+ ('return', 4, 'tfunc_first'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_until_in_caller_frame(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('up', ),
+ ('None', 3, 'tfunc_main'), ('until', (6, )),
+ ('line', 6, 'tfunc_main'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_skip(self):
+ # Check that tracing is skipped over the import statement in
+ # 'tfunc_import()'.
+ code = """
+ def main():
+ lno = 3
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'), ('step', ),
+ ('line', 3, 'tfunc_import'), ('quit', ),
+ ]
+ skip = ('importlib*', TEST_MODULE)
+ with TracerRun(self, skip=skip) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_down(self):
+ # Check that set_down() raises BdbError at the newest frame.
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('down', ),
+ ]
+ with TracerRun(self) as tracer:
+ self.assertRaises(BdbError, tracer.runcall, tfunc_main)
+
+ def test_up(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('up', ),
+ ('None', 3, 'tfunc_main'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+class BreakpointTestCase(BaseTestCase):
+ """Test the breakpoint set method."""
+
+ def test_bp_on_non_existent_module(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'), ('break', ('/non/existent/module.py', 1))
+ ]
+ with TracerRun(self) as tracer:
+ self.assertRaises(BdbError, tracer.runcall, tfunc_import)
+
+ def test_bp_after_last_statement(self):
+ code = """
+ def main():
+ lno = 3
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'), ('break', (TEST_MODULE_FNAME, 4))
+ ]
+ with TracerRun(self) as tracer:
+ self.assertRaises(BdbError, tracer.runcall, tfunc_import)
+
+ def test_temporary_bp(self):
+ code = """
+ def func():
+ lno = 3
+
+ def main():
+ for i in range(2):
+ func()
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME, True),
+ ('None', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME, True),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:1}, [1])), ('continue', ),
+ ('line', 3, 'func', ({2:1}, [2])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_disabled_temporary_bp(self):
+ code = """
+ def func():
+ lno = 3
+
+ def main():
+ for i in range(3):
+ func()
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME),
+ ('None', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME, True),
+ ('None', 2, 'tfunc_import'), ('disable', (2, )),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:1}, [])), ('enable', (2, )),
+ ('None', 3, 'func'), ('disable', (1, )),
+ ('None', 3, 'func'), ('continue', ),
+ ('line', 3, 'func', ({2:1}, [2])), ('enable', (1, )),
+ ('None', 3, 'func'), ('continue', ),
+ ('line', 3, 'func', ({1:2}, [])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_bp_condition(self):
+ code = """
+ def func(a):
+ lno = 3
+
+ def main():
+ for i in range(3):
+ func(i)
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME, False, 'a == 2'),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:3}, [])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_bp_exception_on_condition_evaluation(self):
+ code = """
+ def func(a):
+ lno = 3
+
+ def main():
+ func(0)
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME, False, '1 / 0'),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:1}, [])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_bp_ignore_count(self):
+ code = """
+ def func():
+ lno = 3
+
+ def main():
+ for i in range(2):
+ func()
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME),
+ ('None', 2, 'tfunc_import'), ('ignore', (1, )),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:2}, [])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_ignore_count_on_disabled_bp(self):
+ code = """
+ def func():
+ lno = 3
+
+ def main():
+ for i in range(3):
+ func()
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME),
+ ('None', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME),
+ ('None', 2, 'tfunc_import'), ('ignore', (1, )),
+ ('None', 2, 'tfunc_import'), ('disable', (1, )),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({2:1}, [])), ('enable', (1, )),
+ ('None', 3, 'func'), ('continue', ),
+ ('line', 3, 'func', ({2:2}, [])), ('continue', ),
+ ('line', 3, 'func', ({1:2}, [])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_clear_two_bp_on_same_line(self):
+ code = """
+ def func():
+ lno = 3
+ lno = 4
+
+ def main():
+ for i in range(3):
+ func()
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'), ('break', (TEST_MODULE_FNAME, 3)),
+ ('None', 2, 'tfunc_import'), ('break', (TEST_MODULE_FNAME, 3)),
+ ('None', 2, 'tfunc_import'), ('break', (TEST_MODULE_FNAME, 4)),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:1}, [])), ('continue', ),
+ ('line', 4, 'func', ({3:1}, [])), ('clear', (TEST_MODULE_FNAME, 3)),
+ ('None', 4, 'func'), ('continue', ),
+ ('line', 4, 'func', ({3:2}, [])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_clear_at_no_bp(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'), ('clear', (__file__, 1))
+ ]
+ with TracerRun(self) as tracer:
+ self.assertRaises(BdbError, tracer.runcall, tfunc_import)
+
+class RunTestCase(BaseTestCase):
+ """Test run, runeval and set_trace."""
+
+ def test_run_step(self):
+ # Check that the bdb 'run' method stops at the first line event.
+ code = """
+ lno = 2
+ """
+ self.expect_set = [
+ ('line', 2, '<module>'), ('step', ),
+ ('return', 2, '<module>'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.run(compile(textwrap.dedent(code), '<string>', 'exec'))
+
+ def test_runeval_step(self):
+ # Test bdb 'runeval'.
+ code = """
+ def main():
+ lno = 3
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 1, '<module>'), ('step', ),
+ ('call', 2, 'main'), ('step', ),
+ ('line', 3, 'main'), ('step', ),
+ ('return', 3, 'main'), ('step', ),
+ ('return', 1, '<module>'), ('quit', ),
+ ]
+ import test_module
+ with TracerRun(self) as tracer:
+ tracer.runeval('test_module.main()', globals(), locals())
+
+class IssuesTestCase(BaseTestCase):
+ """Test fixed bdb issues."""
+
+ def test_step_at_return_with_no_trace_in_caller(self):
+ # Issue #13183.
+ # Check that the tracer does step into the caller frame when the
+ # trace function is not set in that frame.
+ code_1 = """
+ from test_module_2 import func
+ def main():
+ func()
+ lno = 5
+ """
+ code_2 = """
+ def func():
+ lno = 3
+ """
+ modules = {
+ TEST_MODULE: code_1,
+ 'test_module_2': code_2,
+ }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', 'test_module_2.py'),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:1}, [])), ('step', ),
+ ('return', 3, 'func'), ('step', ),
+ ('line', 5, 'main'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_next_until_return_in_generator(self):
+ # Issue #16596.
+ # Check that set_next(), set_until() and set_return() do not treat the
+ # `yield` and `yield from` statements as if they were returns and stop
+ # instead in the current frame.
+ code = """
+ def test_gen():
+ yield 0
+ lno = 4
+ return 123
+
+ def main():
+ it = test_gen()
+ next(it)
+ next(it)
+ lno = 11
+ """
+ modules = { TEST_MODULE: code }
+ for set_type in ('next', 'until', 'return'):
+ with self.subTest(set_type=set_type):
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('test_gen', TEST_MODULE_FNAME),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'test_gen', ({1:1}, [])), (set_type, ),
+ ]
+
+ if set_type == 'return':
+ self.expect_set.extend(
+ [('exception', 10, 'main', StopIteration), ('step',),
+ ('return', 10, 'main'), ('quit', ),
+ ]
+ )
+ else:
+ self.expect_set.extend(
+ [('line', 4, 'test_gen'), ('quit', ),]
+ )
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_next_command_in_generator_for_loop(self):
+ # Issue #16596.
+ code = """
+ def test_gen():
+ yield 0
+ lno = 4
+ yield 1
+ return 123
+
+ def main():
+ for i in test_gen():
+ lno = 10
+ lno = 11
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('test_gen', TEST_MODULE_FNAME),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'test_gen', ({1:1}, [])), ('next', ),
+ ('line', 4, 'test_gen'), ('next', ),
+ ('line', 5, 'test_gen'), ('next', ),
+ ('line', 6, 'test_gen'), ('next', ),
+ ('exception', 9, 'main', StopIteration), ('step', ),
+ ('line', 11, 'main'), ('quit', ),
+
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_next_command_in_generator_with_subiterator(self):
+ # Issue #16596.
+ code = """
+ def test_subgen():
+ yield 0
+ return 123
+
+ def test_gen():
+ x = yield from test_subgen()
+ return 456
+
+ def main():
+ for i in test_gen():
+ lno = 12
+ lno = 13
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('test_gen', TEST_MODULE_FNAME),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 7, 'test_gen', ({1:1}, [])), ('next', ),
+ ('line', 8, 'test_gen'), ('next', ),
+ ('exception', 11, 'main', StopIteration), ('step', ),
+ ('line', 13, 'main'), ('quit', ),
+
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_return_command_in_generator_with_subiterator(self):
+ # Issue #16596.
+ code = """
+ def test_subgen():
+ yield 0
+ return 123
+
+ def test_gen():
+ x = yield from test_subgen()
+ return 456
+
+ def main():
+ for i in test_gen():
+ lno = 12
+ lno = 13
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('test_subgen', TEST_MODULE_FNAME),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'test_subgen', ({1:1}, [])), ('return', ),
+ ('exception', 7, 'test_gen', StopIteration), ('return', ),
+ ('exception', 11, 'main', StopIteration), ('step', ),
+ ('line', 13, 'main'), ('quit', ),
+
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+def test_main():
+ test.support.run_unittest(
+ StateTestCase,
+ RunTestCase,
+ BreakpointTestCase,
+ IssuesTestCase,
+ )
+
+if __name__ == "__main__":
+ test_main()
out = out.decode(sys.getfilesystemencoding())
self.assertIn("ValueError", out)
self.assertIn("Hello World", out)
+ self.assertIn("<strong><module></strong>", out)
# By default we emit HTML markup.
self.assertIn('<p>', out)
self.assertIn('</p>', out)
pass
with self.assertRaisesRegex(
- TypeError, "object int can't be used in 'await' expression"):
+ TypeError,
+ "'async with' received an object from __aenter__ "
+ "that does not implement __await__: int"):
# it's important that __aexit__ wasn't called
run_async(foo())
def __aexit__(self, *e):
return 444
+ # Exit with exception
async def foo():
async with CM():
1/0
run_async(foo())
except TypeError as exc:
self.assertRegex(
- exc.args[0], "object int can't be used in 'await' expression")
+ exc.args[0],
+ "'async with' received an object from __aexit__ "
+ "that does not implement __await__: int")
self.assertTrue(exc.__context__ is not None)
self.assertTrue(isinstance(exc.__context__, ZeroDivisionError))
else:
def __aexit__(self, *e):
return 456
+ # Normal exit
async def foo():
nonlocal CNT
async with CM():
CNT += 1
+ with self.assertRaisesRegex(
+ TypeError,
+ "'async with' received an object from __aexit__ "
+ "that does not implement __await__: int"):
+ run_async(foo())
+ self.assertEqual(CNT, 1)
-
+ # Exit with 'break'
+ async def foo():
+ nonlocal CNT
+ for i in range(2):
+ async with CM():
+ CNT += 1
+ break
with self.assertRaisesRegex(
- TypeError, "object int can't be used in 'await' expression"):
+ TypeError,
+ "'async with' received an object from __aexit__ "
+ "that does not implement __await__: int"):
+ run_async(foo())
+ self.assertEqual(CNT, 2)
+ # Exit with 'continue'
+ async def foo():
+ nonlocal CNT
+ for i in range(2):
+ async with CM():
+ CNT += 1
+ continue
+ with self.assertRaisesRegex(
+ TypeError,
+ "'async with' received an object from __aexit__ "
+ "that does not implement __await__: int"):
run_async(foo())
+ self.assertEqual(CNT, 3)
- self.assertEqual(CNT, 1)
+ # Exit with 'return'
+ async def foo():
+ nonlocal CNT
+ async with CM():
+ CNT += 1
+ return
+ with self.assertRaisesRegex(
+ TypeError,
+ "'async with' received an object from __aexit__ "
+ "that does not implement __await__: int"):
+ run_async(foo())
+ self.assertEqual(CNT, 4)
def test_with_9(self):
run_async(run_gen()),
([], [121]))
+ def test_comp_4_2(self):
+ async def f(it):
+ for i in it:
+ yield i
+
+ async def run_list():
+ return [i + 10 async for i in f(range(5)) if 0 < i < 4]
+ self.assertEqual(
+ run_async(run_list()),
+ ([], [11, 12, 13]))
+
+ async def run_set():
+ return {i + 10 async for i in f(range(5)) if 0 < i < 4}
+ self.assertEqual(
+ run_async(run_set()),
+ ([], {11, 12, 13}))
+
+ async def run_dict():
+ return {i + 10: i + 100 async for i in f(range(5)) if 0 < i < 4}
+ self.assertEqual(
+ run_async(run_dict()),
+ ([], {11: 101, 12: 102, 13: 103}))
+
+ async def run_gen():
+ gen = (i + 10 async for i in f(range(5)) if 0 < i < 4)
+ return [g + 100 async for g in gen]
+ self.assertEqual(
+ run_async(run_gen()),
+ ([], [111, 112, 113]))
+
def test_comp_5(self):
async def f(it):
for i in it:
class AnyDBMTestCase:
- _dict = {'0': b'',
- 'a': b'Python:',
+ _dict = {'a': b'Python:',
'b': b'Programming',
'c': b'the',
'd': b'way',
def test_anydbm_creation_n_file_exists_with_invalid_contents(self):
# create an empty file
test.support.create_empty_file(_fname)
-
- f = dbm.open(_fname, 'n')
- self.addCleanup(f.close)
- self.assertEqual(len(f), 0)
+ with dbm.open(_fname, 'n') as f:
+ self.assertEqual(len(f), 0)
def test_anydbm_modification(self):
self.init_db()
f = dbm.open(_fname, 'c')
self._dict['g'] = f[b'g'] = b"indented"
self.read_helper(f)
+ # setdefault() works as in the dict interface
+ self.assertEqual(f.setdefault(b'xxx', b'foo'), b'foo')
+ self.assertEqual(f[b'xxx'], b'foo')
f.close()
def test_anydbm_read(self):
self.init_db()
f = dbm.open(_fname, 'r')
self.read_helper(f)
+ # get() works as in the dict interface
+ self.assertEqual(f.get(b'a'), self._dict['a'])
+ self.assertEqual(f.get(b'xxx', b'foo'), b'foo')
+ self.assertIsNone(f.get(b'xxx'))
+ with self.assertRaises(KeyError):
+ f[b'xxx']
f.close()
def test_anydbm_keys(self):
keys = self.keys_helper(f)
f.close()
+ def test_empty_value(self):
+ if getattr(dbm._defaultmod, 'library', None) == 'Berkeley DB':
+ self.skipTest("Berkeley DB doesn't distinguish the empty value "
+ "from the absent one")
+ f = dbm.open(_fname, 'c')
+ self.assertEqual(f.keys(), [])
+ f[b'empty'] = b''
+ self.assertEqual(f.keys(), [b'empty'])
+ self.assertIn(b'empty', f)
+ self.assertEqual(f[b'empty'], b'')
+ self.assertEqual(f.get(b'empty'), b'')
+ self.assertEqual(f.setdefault(b'empty'), b'')
+ f.close()
+
def test_anydbm_access(self):
self.init_db()
f = dbm.open(_fname, 'r')
f = dumbdbm.open(_fname, 'w')
self._dict[b'g'] = f[b'g'] = b"indented"
self.read_helper(f)
+ # setdefault() works as in the dict interface
+ self.assertEqual(f.setdefault(b'xxx', b'foo'), b'foo')
+ self.assertEqual(f[b'xxx'], b'foo')
f.close()
def test_dumbdbm_read(self):
with self.assertWarnsRegex(DeprecationWarning,
'The database is opened for reading only'):
del f[b'a']
+ # get() works as in the dict interface
+ self.assertEqual(f.get(b'b'), self._dict[b'b'])
+ self.assertEqual(f.get(b'xxx', b'foo'), b'foo')
+ self.assertIsNone(f.get(b'xxx'))
+ with self.assertRaises(KeyError):
+ f[b'xxx']
f.close()
def test_dumbdbm_keys(self):
self.assertIn(key, key_set)
key_set.remove(key)
key = self.g.nextkey(key)
- self.assertRaises(KeyError, lambda: self.g['xxx'])
# get() and setdefault() work as in the dict interface
+ self.assertEqual(self.g.get(b'a'), b'b')
+ self.assertIsNone(self.g.get(b'xxx'))
self.assertEqual(self.g.get(b'xxx', b'foo'), b'foo')
+ with self.assertRaises(KeyError):
+ self.g['xxx']
self.assertEqual(self.g.setdefault(b'xxx', b'foo'), b'foo')
self.assertEqual(self.g[b'xxx'], b'foo')
def test_keys(self):
self.d = dbm.ndbm.open(self.filename, 'c')
- self.assertTrue(self.d.keys() == [])
+ self.assertEqual(self.d.keys(), [])
self.d['a'] = 'b'
self.d[b'bytes'] = b'data'
self.d['12345678910'] = '019237410982340912840198242'
self.assertIn('a', self.d)
self.assertIn(b'a', self.d)
self.assertEqual(self.d[b'bytes'], b'data')
+ # get() and setdefault() work as in the dict interface
+ self.assertEqual(self.d.get(b'a'), b'b')
+ self.assertIsNone(self.d.get(b'xxx'))
+ self.assertEqual(self.d.get(b'xxx', b'foo'), b'foo')
+ with self.assertRaises(KeyError):
+ self.d['xxx']
+ self.assertEqual(self.d.setdefault(b'xxx', b'foo'), b'foo')
+ self.assertEqual(self.d[b'xxx'], b'foo')
+ self.d.close()
+
+ def test_empty_value(self):
+ if dbm.ndbm.library == 'Berkeley DB':
+ self.skipTest("Berkeley DB doesn't distinguish the empty value "
+ "from the absent one")
+ self.d = dbm.ndbm.open(self.filename, 'c')
+ self.assertEqual(self.d.keys(), [])
+ self.d['empty'] = ''
+ self.assertEqual(self.d.keys(), [b'empty'])
+ self.assertIn(b'empty', self.d)
+ self.assertEqual(self.d[b'empty'], b'')
+ self.assertEqual(self.d.get(b'empty'), b'')
+ self.assertEqual(self.d.setdefault(b'empty'), b'')
self.d.close()
def test_modes(self):
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
- getcontext = self.decimal.getcontext
+ localcontext = self.decimal.localcontext
- c = getcontext()
- c.prec = 28
+ with localcontext() as c:
+ c.prec = 28
- self.assertEqual(str(Decimal("9.99").__round__()), "10")
- self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
- self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
- self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
- self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
+ self.assertEqual(str(Decimal("9.99").__round__()), "10")
+ self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
+ self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
+ self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
+ self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
- self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
- self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
+ self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
+ self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
- self.assertEqual(str(c.create_decimal(x)), '-0E-1000026')
+ self.assertEqual(str(c.create_decimal(x)), '-0E-1000007')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertEqual("+ \t\tI am a bug", diff[2])
self.assertEqual("? +\n", diff[3])
+ def test_mdiff_catch_stop_iteration(self):
+ # Issue #33224
+ self.assertEqual(
+ list(difflib._mdiff(["2"], ["3"], 1)),
+ [((1, '\x00-2\x01'), (1, '\x00+3\x01'), True)],
+ )
+
+
patch914575_from1 = """
1. Beautiful is beTTer than ugly.
2. Explicit is better than implicit.
Argument count: 0
Kw-only arguments: 0
Number of locals: 2
-Stack size: 17
+Stack size: 16
Flags: OPTIMIZED, NEWLOCALS, NOFREE, COROUTINE
Constants:
0: None
import _pyio as pyio
from test.support import TESTFN
+from test import support
from collections import UserList
class AutoFileTests:
def tearDown(self):
if self.f:
self.f.close()
- os.remove(TESTFN)
+ support.unlink(TESTFN)
def testWeakRefs(self):
# verify weak references
class OtherFileTests:
+ def tearDown(self):
+ support.unlink(TESTFN)
+
def testModeStrings(self):
# check invalid mode strings
+ self.open(TESTFN, 'wb').close()
for mode in ("", "aU", "wU+", "U+", "+U", "rU+"):
try:
f = self.open(TESTFN, mode)
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
- os.unlink(TESTFN)
f = self.open(TESTFN, 'wb')
try:
self.fail("File size after ftruncate wrong %d" % size)
finally:
f.close()
- os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("b", b" "*100),))]
+ # Prepare the testfile
+ bag = self.open(TESTFN, "wb")
+ bag.write(filler * nchunks)
+ bag.writelines(testlines)
+ bag.close()
+ # Test for appropriate errors mixing read* and iteration
+ for methodname, args in methods:
+ f = self.open(TESTFN, 'rb')
+ if next(f) != filler:
+ self.fail, "Broken testfile"
+ meth = getattr(f, methodname)
+ meth(*args) # This simply shouldn't fail
+ f.close()
+
+ # Test to see if harmless (by accident) mixing of read* and
+ # iteration still works. This depends on the size of the internal
+ # iteration buffer (currently 8192,) but we can test it in a
+ # flexible manner. Each line in the bag o' ham is 4 bytes
+ # ("h", "a", "m", "\n"), so 4096 lines of that should get us
+ # exactly on the buffer boundary for any power-of-2 buffersize
+ # between 4 and 16384 (inclusive).
+ f = self.open(TESTFN, 'rb')
+ for i in range(nchunks):
+ next(f)
+ testline = testlines.pop(0)
try:
- # Prepare the testfile
- bag = self.open(TESTFN, "wb")
- bag.write(filler * nchunks)
- bag.writelines(testlines)
- bag.close()
- # Test for appropriate errors mixing read* and iteration
- for methodname, args in methods:
- f = self.open(TESTFN, 'rb')
- if next(f) != filler:
- self.fail, "Broken testfile"
- meth = getattr(f, methodname)
- meth(*args) # This simply shouldn't fail
- f.close()
+ line = f.readline()
+ except ValueError:
+ self.fail("readline() after next() with supposedly empty "
+ "iteration-buffer failed anyway")
+ if line != testline:
+ self.fail("readline() after next() with empty buffer "
+ "failed. Got %r, expected %r" % (line, testline))
+ testline = testlines.pop(0)
+ buf = array("b", b"\x00" * len(testline))
+ try:
+ f.readinto(buf)
+ except ValueError:
+ self.fail("readinto() after next() with supposedly empty "
+ "iteration-buffer failed anyway")
+ line = buf.tobytes()
+ if line != testline:
+ self.fail("readinto() after next() with empty buffer "
+ "failed. Got %r, expected %r" % (line, testline))
+
+ testline = testlines.pop(0)
+ try:
+ line = f.read(len(testline))
+ except ValueError:
+ self.fail("read() after next() with supposedly empty "
+ "iteration-buffer failed anyway")
+ if line != testline:
+ self.fail("read() after next() with empty buffer "
+ "failed. Got %r, expected %r" % (line, testline))
+ try:
+ lines = f.readlines()
+ except ValueError:
+ self.fail("readlines() after next() with supposedly empty "
+ "iteration-buffer failed anyway")
+ if lines != testlines:
+ self.fail("readlines() after next() with empty buffer "
+ "failed. Got %r, expected %r" % (line, testline))
+ f.close()
- # Test to see if harmless (by accident) mixing of read* and
- # iteration still works. This depends on the size of the internal
- # iteration buffer (currently 8192,) but we can test it in a
- # flexible manner. Each line in the bag o' ham is 4 bytes
- # ("h", "a", "m", "\n"), so 4096 lines of that should get us
- # exactly on the buffer boundary for any power-of-2 buffersize
- # between 4 and 16384 (inclusive).
- f = self.open(TESTFN, 'rb')
- for i in range(nchunks):
- next(f)
- testline = testlines.pop(0)
- try:
- line = f.readline()
- except ValueError:
- self.fail("readline() after next() with supposedly empty "
- "iteration-buffer failed anyway")
- if line != testline:
- self.fail("readline() after next() with empty buffer "
- "failed. Got %r, expected %r" % (line, testline))
- testline = testlines.pop(0)
- buf = array("b", b"\x00" * len(testline))
+ # Reading after iteration hit EOF shouldn't hurt either
+ f = self.open(TESTFN, 'rb')
+ try:
+ for line in f:
+ pass
try:
+ f.readline()
f.readinto(buf)
+ f.read()
+ f.readlines()
except ValueError:
- self.fail("readinto() after next() with supposedly empty "
- "iteration-buffer failed anyway")
- line = buf.tobytes()
- if line != testline:
- self.fail("readinto() after next() with empty buffer "
- "failed. Got %r, expected %r" % (line, testline))
-
- testline = testlines.pop(0)
- try:
- line = f.read(len(testline))
- except ValueError:
- self.fail("read() after next() with supposedly empty "
- "iteration-buffer failed anyway")
- if line != testline:
- self.fail("read() after next() with empty buffer "
- "failed. Got %r, expected %r" % (line, testline))
- try:
- lines = f.readlines()
- except ValueError:
- self.fail("readlines() after next() with supposedly empty "
- "iteration-buffer failed anyway")
- if lines != testlines:
- self.fail("readlines() after next() with empty buffer "
- "failed. Got %r, expected %r" % (line, testline))
- f.close()
-
- # Reading after iteration hit EOF shouldn't hurt either
- f = self.open(TESTFN, 'rb')
- try:
- for line in f:
- pass
- try:
- f.readline()
- f.readinto(buf)
- f.read()
- f.readlines()
- except ValueError:
- self.fail("read* failed after next() consumed file")
- finally:
- f.close()
+ self.fail("read* failed after next() consumed file")
finally:
- os.unlink(TESTFN)
+ f.close()
class COtherFileTests(OtherFileTests, unittest.TestCase):
open = io.open
open = staticmethod(pyio.open)
-def tearDownModule():
- # Historically, these tests have been sloppy about removing TESTFN.
- # So get rid of it no matter what.
- if os.path.exists(TESTFN):
- os.unlink(TESTFN)
-
if __name__ == '__main__':
unittest.main()
import fileinput
import collections
import builtins
+import tempfile
import unittest
try:
# all the work, and a few functions (input, etc.) that use a global _state
# variable.
-# Write lines (a list of lines) to temp file number i, and return the
-# temp file's name.
-def writeTmp(i, lines, mode='w'): # opening in text mode is the default
- name = TESTFN + str(i)
- f = open(name, mode)
- for line in lines:
- f.write(line)
- f.close()
- return name
-
-def remove_tempfiles(*names):
- for name in names:
- if name:
- safe_unlink(name)
+class BaseTests:
+ # Write a content (str or bytes) to temp file, and return the
+ # temp file's name.
+ def writeTmp(self, content, *, mode='w'): # opening in text mode is the default
+ fd, name = tempfile.mkstemp()
+ self.addCleanup(support.unlink, name)
+ with open(fd, mode) as f:
+ f.write(content)
+ return name
class LineReader:
def close(self):
pass
-class BufferSizesTests(unittest.TestCase):
+class BufferSizesTests(BaseTests, unittest.TestCase):
def test_buffer_sizes(self):
# First, run the tests with default and teeny buffer size.
for round, bs in (0, 0), (1, 30):
- t1 = t2 = t3 = t4 = None
- try:
- t1 = writeTmp(1, ["Line %s of file 1\n" % (i+1) for i in range(15)])
- t2 = writeTmp(2, ["Line %s of file 2\n" % (i+1) for i in range(10)])
- t3 = writeTmp(3, ["Line %s of file 3\n" % (i+1) for i in range(5)])
- t4 = writeTmp(4, ["Line %s of file 4\n" % (i+1) for i in range(1)])
- if bs:
- with self.assertWarns(DeprecationWarning):
- self.buffer_size_test(t1, t2, t3, t4, bs, round)
- else:
+ t1 = self.writeTmp(''.join("Line %s of file 1\n" % (i+1) for i in range(15)))
+ t2 = self.writeTmp(''.join("Line %s of file 2\n" % (i+1) for i in range(10)))
+ t3 = self.writeTmp(''.join("Line %s of file 3\n" % (i+1) for i in range(5)))
+ t4 = self.writeTmp(''.join("Line %s of file 4\n" % (i+1) for i in range(1)))
+ if bs:
+ with self.assertWarns(DeprecationWarning):
self.buffer_size_test(t1, t2, t3, t4, bs, round)
- finally:
- remove_tempfiles(t1, t2, t3, t4)
+ else:
+ self.buffer_size_test(t1, t2, t3, t4, bs, round)
def buffer_size_test(self, t1, t2, t3, t4, bs=0, round=0):
pat = re.compile(r'LINE (\d+) OF FILE (\d+)')
self.invoked = True
raise self.exception_type()
-class FileInputTests(unittest.TestCase):
+class FileInputTests(BaseTests, unittest.TestCase):
def test_zero_byte_files(self):
- t1 = t2 = t3 = t4 = None
- try:
- t1 = writeTmp(1, [""])
- t2 = writeTmp(2, [""])
- t3 = writeTmp(3, ["The only line there is.\n"])
- t4 = writeTmp(4, [""])
- fi = FileInput(files=(t1, t2, t3, t4))
-
- line = fi.readline()
- self.assertEqual(line, 'The only line there is.\n')
- self.assertEqual(fi.lineno(), 1)
- self.assertEqual(fi.filelineno(), 1)
- self.assertEqual(fi.filename(), t3)
-
- line = fi.readline()
- self.assertFalse(line)
- self.assertEqual(fi.lineno(), 1)
- self.assertEqual(fi.filelineno(), 0)
- self.assertEqual(fi.filename(), t4)
- fi.close()
- finally:
- remove_tempfiles(t1, t2, t3, t4)
+ t1 = self.writeTmp("")
+ t2 = self.writeTmp("")
+ t3 = self.writeTmp("The only line there is.\n")
+ t4 = self.writeTmp("")
+ fi = FileInput(files=(t1, t2, t3, t4))
+
+ line = fi.readline()
+ self.assertEqual(line, 'The only line there is.\n')
+ self.assertEqual(fi.lineno(), 1)
+ self.assertEqual(fi.filelineno(), 1)
+ self.assertEqual(fi.filename(), t3)
+
+ line = fi.readline()
+ self.assertFalse(line)
+ self.assertEqual(fi.lineno(), 1)
+ self.assertEqual(fi.filelineno(), 0)
+ self.assertEqual(fi.filename(), t4)
+ fi.close()
def test_files_that_dont_end_with_newline(self):
- t1 = t2 = None
- try:
- t1 = writeTmp(1, ["A\nB\nC"])
- t2 = writeTmp(2, ["D\nE\nF"])
- fi = FileInput(files=(t1, t2))
- lines = list(fi)
- self.assertEqual(lines, ["A\n", "B\n", "C", "D\n", "E\n", "F"])
- self.assertEqual(fi.filelineno(), 3)
- self.assertEqual(fi.lineno(), 6)
- finally:
- remove_tempfiles(t1, t2)
+ t1 = self.writeTmp("A\nB\nC")
+ t2 = self.writeTmp("D\nE\nF")
+ fi = FileInput(files=(t1, t2))
+ lines = list(fi)
+ self.assertEqual(lines, ["A\n", "B\n", "C", "D\n", "E\n", "F"])
+ self.assertEqual(fi.filelineno(), 3)
+ self.assertEqual(fi.lineno(), 6)
## def test_unicode_filenames(self):
## # XXX A unicode string is always returned by writeTmp.
## # So is this needed?
-## try:
-## t1 = writeTmp(1, ["A\nB"])
-## encoding = sys.getfilesystemencoding()
-## if encoding is None:
-## encoding = 'ascii'
-## fi = FileInput(files=str(t1, encoding))
-## lines = list(fi)
-## self.assertEqual(lines, ["A\n", "B"])
-## finally:
-## remove_tempfiles(t1)
+## t1 = self.writeTmp("A\nB")
+## encoding = sys.getfilesystemencoding()
+## if encoding is None:
+## encoding = 'ascii'
+## fi = FileInput(files=str(t1, encoding))
+## lines = list(fi)
+## self.assertEqual(lines, ["A\n", "B"])
def test_fileno(self):
- t1 = t2 = None
- try:
- t1 = writeTmp(1, ["A\nB"])
- t2 = writeTmp(2, ["C\nD"])
- fi = FileInput(files=(t1, t2))
- self.assertEqual(fi.fileno(), -1)
- line =next( fi)
- self.assertNotEqual(fi.fileno(), -1)
- fi.nextfile()
- self.assertEqual(fi.fileno(), -1)
- line = list(fi)
- self.assertEqual(fi.fileno(), -1)
- finally:
- remove_tempfiles(t1, t2)
+ t1 = self.writeTmp("A\nB")
+ t2 = self.writeTmp("C\nD")
+ fi = FileInput(files=(t1, t2))
+ self.assertEqual(fi.fileno(), -1)
+ line = next(fi)
+ self.assertNotEqual(fi.fileno(), -1)
+ fi.nextfile()
+ self.assertEqual(fi.fileno(), -1)
+ line = list(fi)
+ self.assertEqual(fi.fileno(), -1)
def test_opening_mode(self):
try:
self.fail("FileInput should reject invalid mode argument")
except ValueError:
pass
- t1 = None
- try:
- # try opening in universal newline mode
- t1 = writeTmp(1, [b"A\nB\r\nC\rD"], mode="wb")
- with check_warnings(('', DeprecationWarning)):
- fi = FileInput(files=t1, mode="U")
- with check_warnings(('', DeprecationWarning)):
- lines = list(fi)
- self.assertEqual(lines, ["A\n", "B\n", "C\n", "D"])
- finally:
- remove_tempfiles(t1)
+ # try opening in universal newline mode
+ t1 = self.writeTmp(b"A\nB\r\nC\rD", mode="wb")
+ with check_warnings(('', DeprecationWarning)):
+ fi = FileInput(files=t1, mode="U")
+ with check_warnings(('', DeprecationWarning)):
+ lines = list(fi)
+ self.assertEqual(lines, ["A\n", "B\n", "C\n", "D"])
def test_stdin_binary_mode(self):
with mock.patch('sys.stdin') as m_stdin:
self.invoked = True
return open(*args)
- t = writeTmp(1, ["\n"])
- self.addCleanup(remove_tempfiles, t)
+ t = self.writeTmp("\n")
custom_open_hook = CustomOpenHook()
with FileInput([t], openhook=custom_open_hook) as fi:
fi.readline()
self.assertEqual(fi.readline(), b'')
def test_context_manager(self):
- try:
- t1 = writeTmp(1, ["A\nB\nC"])
- t2 = writeTmp(2, ["D\nE\nF"])
- with FileInput(files=(t1, t2)) as fi:
- lines = list(fi)
- self.assertEqual(lines, ["A\n", "B\n", "C", "D\n", "E\n", "F"])
- self.assertEqual(fi.filelineno(), 3)
- self.assertEqual(fi.lineno(), 6)
- self.assertEqual(fi._files, ())
- finally:
- remove_tempfiles(t1, t2)
+ t1 = self.writeTmp("A\nB\nC")
+ t2 = self.writeTmp("D\nE\nF")
+ with FileInput(files=(t1, t2)) as fi:
+ lines = list(fi)
+ self.assertEqual(lines, ["A\n", "B\n", "C", "D\n", "E\n", "F"])
+ self.assertEqual(fi.filelineno(), 3)
+ self.assertEqual(fi.lineno(), 6)
+ self.assertEqual(fi._files, ())
def test_close_on_exception(self):
+ t1 = self.writeTmp("")
try:
- t1 = writeTmp(1, [""])
with FileInput(files=t1) as fi:
raise OSError
except OSError:
self.assertEqual(fi._files, ())
- finally:
- remove_tempfiles(t1)
def test_empty_files_list_specified_to_constructor(self):
with FileInput(files=[]) as fi:
def test__getitem__(self):
"""Tests invoking FileInput.__getitem__() with the current
line number"""
- t = writeTmp(1, ["line1\n", "line2\n"])
- self.addCleanup(remove_tempfiles, t)
+ t = self.writeTmp("line1\nline2\n")
with FileInput(files=[t]) as fi:
retval1 = fi[0]
self.assertEqual(retval1, "line1\n")
def test__getitem__invalid_key(self):
"""Tests invoking FileInput.__getitem__() with an index unequal to
the line number"""
- t = writeTmp(1, ["line1\n", "line2\n"])
- self.addCleanup(remove_tempfiles, t)
+ t = self.writeTmp("line1\nline2\n")
with FileInput(files=[t]) as fi:
with self.assertRaises(RuntimeError) as cm:
fi[1]
def test__getitem__eof(self):
"""Tests invoking FileInput.__getitem__() with the line number but at
end-of-input"""
- t = writeTmp(1, [])
- self.addCleanup(remove_tempfiles, t)
+ t = self.writeTmp('')
with FileInput(files=[t]) as fi:
with self.assertRaises(IndexError) as cm:
fi[0]
os_unlink_orig = os.unlink
os_unlink_replacement = UnconditionallyRaise(OSError)
try:
- t = writeTmp(1, ["\n"])
- self.addCleanup(remove_tempfiles, t)
+ t = self.writeTmp("\n")
+ self.addCleanup(support.unlink, t + '.bak')
with FileInput(files=[t], inplace=True) as fi:
next(fi) # make sure the file is opened
os.unlink = os_unlink_replacement
os_fstat_orig = os.fstat
os_fstat_replacement = UnconditionallyRaise(OSError)
try:
- t = writeTmp(1, ["\n"])
- self.addCleanup(remove_tempfiles, t)
+ t = self.writeTmp("\n")
with FileInput(files=[t], inplace=True) as fi:
os.fstat = os_fstat_replacement
fi.readline()
os_chmod_orig = os.chmod
os_chmod_replacement = UnconditionallyRaise(OSError)
try:
- t = writeTmp(1, ["\n"])
- self.addCleanup(remove_tempfiles, t)
+ t = self.writeTmp("\n")
with FileInput(files=[t], inplace=True) as fi:
os.chmod = os_chmod_replacement
fi.readline()
self.__call__()
unconditionally_raise_ValueError = FilenoRaisesValueError()
- t = writeTmp(1, ["\n"])
- self.addCleanup(remove_tempfiles, t)
+ t = self.writeTmp("\n")
with FileInput(files=[t]) as fi:
file_backup = fi._file
try:
self.assertRaises(StopIteration, next, fi)
self.assertEqual(src.linesread, [])
+
class MockFileInput:
"""A class that mocks out fileinput.FileInput for use during unit tests"""
from test import support
-_testcapi = support.import_module('_testcapi')
+try:
+ import _testcapi
+except ImportError:
+ _testcapi = None
# This tests to make sure that if a SIGINT arrives just before we send into a
# yield from chain, the KeyboardInterrupt is raised in the innermost
# generator (see bpo-30039).
+@unittest.skipUnless(_testcapi is not None and
+ hasattr(_testcapi, "raise_SIGINT_then_send_None"),
+ "needs _testcapi.raise_SIGINT_then_send_None")
class SignalAndYieldFromTest(unittest.TestCase):
def generator1(self):
with self.assertRaises(cookies.CookieError):
C.load(rawdata)
+ def test_comment_quoting(self):
+ c = cookies.SimpleCookie()
+ c['foo'] = '\N{COPYRIGHT SIGN}'
+ self.assertEqual(str(c['foo']), 'Set-Cookie: foo="\\251"')
+ c['foo']['comment'] = 'comment \N{COPYRIGHT SIGN}'
+ self.assertEqual(
+ str(c['foo']),
+ 'Set-Cookie: foo="\\251"; Comment="comment \\251"'
+ )
+
class MorselTests(unittest.TestCase):
"""Tests for the Morsel object."""
import unittest
from test.support import import_module
-# Skip test if _thread or _tkinter wasn't built, if idlelib is missing,
-# or if tcl/tk is not the 8.5+ needed for ttk widgets.
-import_module('threading') # imported by PyShell, imports _thread
+# For 3.6, skip test_idle if threads are not supported.
+import_module('threading') # Imported by PyShell, imports _thread.
+
+# Skip test_idle if _tkinter wasn't built, if tkinter is missing,
+# if tcl/tk is not the 8.5+ needed for ttk widgets,
+# or if idlelib is missing (not installed).
tk = import_module('tkinter') # imports _tkinter
if tk.TkVersion < 8.5:
raise unittest.SkipTest("IDLE requires tk 8.5 or later.")
idlelib = import_module('idlelib')
-# Before test imports, tell IDLE to avoid changing the environment.
+# Before importing and executing more of idlelib,
+# tell IDLE to avoid changing the environment.
idlelib.testing = True
-# unittest.main and test.libregrtest.runtest.runtest_inner
-# call load_tests, when present, to discover tests to run.
+# Unittest.main and test.libregrtest.runtest.runtest_inner
+# call load_tests, when present here, to discover tests to run.
from idlelib.idle_test import load_tests
if __name__ == '__main__':
import unittest
import importlib.util
import importlib
-
+from test.support.script_helper import assert_python_failure
class LoaderTests(abc.LoaderTests):
self.assertEqual(module.__name__, name)
self.assertEqual(module.__doc__, "Module named in %s" % lang)
+ @unittest.skipIf(not hasattr(sys, 'gettotalrefcount'),
+ '--with-pydebug has to be enabled for this test')
+ def test_bad_traverse(self):
+ ''' Issue #32374: Test that traverse fails when accessing per-module
+ state before Py_mod_exec was executed.
+ (Multiphase initialization modules only)
+ '''
+ script = """if True:
+ try:
+ from test import support
+ import importlib.util as util
+ spec = util.find_spec('_testmultiphase')
+ spec.name = '_testmultiphase_with_bad_traverse'
+
+ with support.SuppressCrashReport():
+ m = spec.loader.create_module(spec)
+ except:
+ # Prevent Python-level exceptions from
+ # ending the process with non-zero status
+ # (We are testing for a crash in C-code)
+ pass"""
+ assert_python_failure("-c", script)
+
(Frozen_MultiPhaseExtensionModuleTests,
Source_MultiPhaseExtensionModuleTests
with self.assertRaisesRegex(TypeError, "'a', 'b' and 'c'"):
inspect.getcallargs(f6)
+ # bpo-33197
+ with self.assertRaisesRegex(ValueError,
+ 'variadic keyword parameters cannot'
+ ' have default values'):
+ inspect.Parameter("foo", kind=inspect.Parameter.VAR_KEYWORD,
+ default=42)
+ with self.assertRaisesRegex(ValueError,
+ "value 5 is not a valid Parameter.kind"):
+ inspect.Parameter("bar", kind=5, default=42)
+
+ with self.assertRaisesRegex(TypeError,
+ 'name must be a str, not a int'):
+ inspect.Parameter(123, kind=4)
+
class TestGetcallargsMethods(TestGetcallargsFunctions):
def setUp(self):
self.assertIs(p.annotation, p.empty)
self.assertEqual(p.kind, inspect.Parameter.POSITIONAL_ONLY)
- with self.assertRaisesRegex(ValueError, 'invalid value'):
+ with self.assertRaisesRegex(ValueError, "value '123' is "
+ "not a valid Parameter.kind"):
inspect.Parameter('foo', default=10, kind='123')
with self.assertRaisesRegex(ValueError, 'not a valid parameter name'):
self.assertEqual(p2.kind, p2.POSITIONAL_OR_KEYWORD)
self.assertNotEqual(p2, p)
- with self.assertRaisesRegex(ValueError, 'invalid value for'):
+ with self.assertRaisesRegex(ValueError,
+ "value <class 'inspect._empty'> "
+ "is not a valid Parameter.kind"):
p2 = p2.replace(kind=p2.empty)
p2 = p2.replace(kind=p2.KEYWORD_ONLY)
@cpython_only
def test_signature_parameter_implicit(self):
with self.assertRaisesRegex(ValueError,
- 'implicit arguments must be passed in as'):
+ 'implicit arguments must be passed as '
+ 'positional or keyword arguments, '
+ 'not positional-only'):
inspect.Parameter('.0', kind=inspect.Parameter.POSITIONAL_ONLY)
param = inspect.Parameter(
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
+ self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
def test_truncate(self):
# Truncate implicitly flushes the buffer.
+ self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
+ self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
+ self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
+ signal.alarm(0)
wio.close()
os.close(r)
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
+ signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
+ signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
class NetmaskTestMixin_v4(CommonTestMixin_v4):
"""Input validation on interfaces and networks is very similar"""
+ def test_no_mask(self):
+ self.assertEqual(str(self.factory('1.2.3.4')), '1.2.3.4/32')
+
def test_split_netmask(self):
addr = "1.2.3.4/32/24"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.assertEqual(ipaddress.IPv4Address('1.2.3.1'), hosts[0])
self.assertEqual(ipaddress.IPv4Address('1.2.3.254'), hosts[-1])
+ ipv6_network = ipaddress.IPv6Network('2001:658:22a:cafe::/120')
+ hosts = list(ipv6_network.hosts())
+ self.assertEqual(255, len(hosts))
+ self.assertEqual(ipaddress.IPv6Address('2001:658:22a:cafe::1'), hosts[0])
+ self.assertEqual(ipaddress.IPv6Address('2001:658:22a:cafe::ff'), hosts[-1])
+
# special case where only 1 bit is left for address
- self.assertEqual([ipaddress.IPv4Address('2.0.0.0'),
- ipaddress.IPv4Address('2.0.0.1')],
- list(ipaddress.ip_network('2.0.0.0/31').hosts()))
+ addrs = [ipaddress.IPv4Address('2.0.0.0'),
+ ipaddress.IPv4Address('2.0.0.1')]
+ str_args = '2.0.0.0/31'
+ tpl_args = ('2.0.0.0', 31)
+ self.assertEqual(addrs, list(ipaddress.ip_network(str_args).hosts()))
+ self.assertEqual(addrs, list(ipaddress.ip_network(tpl_args).hosts()))
+ self.assertEqual(list(ipaddress.ip_network(str_args).hosts()),
+ list(ipaddress.ip_network(tpl_args).hosts()))
+
+ addrs = [ipaddress.IPv6Address('2001:658:22a:cafe::'),
+ ipaddress.IPv6Address('2001:658:22a:cafe::1')]
+ str_args = '2001:658:22a:cafe::/127'
+ tpl_args = ('2001:658:22a:cafe::', 127)
+ self.assertEqual(addrs, list(ipaddress.ip_network(str_args).hosts()))
+ self.assertEqual(addrs, list(ipaddress.ip_network(tpl_args).hosts()))
+ self.assertEqual(list(ipaddress.ip_network(str_args).hosts()),
+ list(ipaddress.ip_network(tpl_args).hosts()))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4_network.subnets(prefixlen_diff=3)),
(10, 20, 3),
(10, 3, 20),
(10, 20),
+ (10, 10),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
+ it = iter(range(10))
+ self.assertEqual(list(islice(it, 3, 3)), [])
+ self.assertEqual(list(it), list(range(3, 10)))
+
# Test invalid arguments
ra = range(10)
self.assertRaises(TypeError, islice, ra)
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
+class TestPurePythonRoughEquivalents(unittest.TestCase):
+
+ @staticmethod
+ def islice(iterable, *args):
+ s = slice(*args)
+ start, stop, step = s.start or 0, s.stop or sys.maxsize, s.step or 1
+ it = iter(range(start, stop, step))
+ try:
+ nexti = next(it)
+ except StopIteration:
+ # Consume *iterable* up to the *start* position.
+ for i, element in zip(range(start), iterable):
+ pass
+ return
+ try:
+ for i, element in enumerate(iterable):
+ if i == nexti:
+ yield element
+ nexti = next(it)
+ except StopIteration:
+ # Consume to *stop*.
+ for i, element in zip(range(i + 1, stop), iterable):
+ pass
+
+ def test_islice_recipe(self):
+ self.assertEqual(list(self.islice('ABCDEFG', 2)), list('AB'))
+ self.assertEqual(list(self.islice('ABCDEFG', 2, 4)), list('CD'))
+ self.assertEqual(list(self.islice('ABCDEFG', 2, None)), list('CDEFG'))
+ self.assertEqual(list(self.islice('ABCDEFG', 0, None, 2)), list('ACEG'))
+ # Test items consumed.
+ it = iter(range(10))
+ self.assertEqual(list(self.islice(it, 3)), list(range(3)))
+ self.assertEqual(list(it), list(range(3, 10)))
+ it = iter(range(10))
+ self.assertEqual(list(self.islice(it, 3, 3)), [])
+ self.assertEqual(list(it), list(range(3, 10)))
+ # Test that slice finishes in predictable state.
+ c = count()
+ self.assertEqual(list(self.islice(c, 1, 3, 50)), [1])
+ self.assertEqual(next(c), 3)
+
+
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
+>>> def prepend(value, iterator):
+... "Prepend a single value in front of an iterator"
+... # prepend(1, [2, 3, 4]) -> 1 2 3 4
+... return chain([value], iterator)
+
>>> def enumerate(iterable, start=0):
... return zip(count(start), iterable)
... "Return function(0), function(1), ..."
... return map(function, count(start))
+>>> import collections
+>>> def consume(iterator, n=None):
+... "Advance the iterator n-steps ahead. If n is None, consume entirely."
+... # Use functions that consume iterators at C speed.
+... if n is None:
+... # feed the entire iterator into a zero-length deque
+... collections.deque(iterator, maxlen=0)
+... else:
+... # advance to the empty slice starting at position n
+... next(islice(iterator, n, n), None)
+
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+>>> list(prepend(1, [2, 3, 4]))
+[1, 2, 3, 4]
+
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
+>>> it = iter(range(10))
+>>> consume(it, 3)
+>>> next(it)
+3
+>>> consume(it)
+>>> next(it, 'Done')
+'Done'
+
>>> nth('abcde', 3)
'd'
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples,
+ TestPurePythonRoughEquivalents,
SizeofTest)
support.run_unittest(*test_classes)
def test_keys_reuse(self):
s = '[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]'
self.check_keys_reuse(s, self.loads)
- self.check_keys_reuse(s, self.json.decoder.JSONDecoder().decode)
+ decoder = self.json.decoder.JSONDecoder()
+ self.check_keys_reuse(s, decoder.decode)
+ self.assertFalse(decoder.memo)
def test_extra_data(self):
s = '[1, 2, 3]5'
def test_valencia_modifier(self):
self.check('ca_ES.UTF-8@valencia', 'ca_ES.UTF-8@valencia')
- self.check('ca_ES@valencia', 'ca_ES.ISO8859-15@valencia')
+ self.check('ca_ES@valencia', 'ca_ES.UTF-8@valencia')
self.check('ca@valencia', 'ca_ES.ISO8859-1@valencia')
def test_devanagari_modifier(self):
"""
allow_reuse_address = True
+ _block_on_close = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
+ _block_on_close = True
+
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def tearDown(self):
"""Shutdown the TCP server."""
try:
- if self.server:
- self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
+ if self.server:
+ self.server.stop(2.0)
finally:
BaseTest.tearDown(self)
self.assertRaises(ValueError, m.write_byte, 42)
self.assertRaises(ValueError, m.write, b'abc')
+ def test_concat_repeat_exception(self):
+ m = mmap.mmap(-1, 16)
+ with self.assertRaises(TypeError):
+ m + m
+ with self.assertRaises(TypeError):
+ m * 2
+
class LargeMmapTests(unittest.TestCase):
# An absolutely minimal test of position information. Better
# tests would be a big project.
code = "def f(x):\n return x + 1"
- st1 = parser.suite(code)
- st2 = st1.totuple(line_info=1, col_info=1)
+ st = parser.suite(code)
def walk(tree):
node_type = tree[0]
next = tree[1]
- if isinstance(next, tuple):
+ if isinstance(next, (tuple, list)):
for elt in tree[1:]:
for x in walk(elt):
yield x
else:
yield tree
- terminals = list(walk(st2))
- self.assertEqual([
+ expected = [
(1, 'def', 1, 0),
(1, 'f', 1, 4),
(7, '(', 1, 5),
(4, '', 2, 16),
(6, '', 2, -1),
(4, '', 2, -1),
- (0, '', 2, -1)],
- terminals)
+ (0, '', 2, -1),
+ ]
+
+ self.assertEqual(list(walk(st.totuple(line_info=True, col_info=True))),
+ expected)
+ self.assertEqual(list(walk(st.totuple())),
+ [(t, n) for t, n, l, c in expected])
+ self.assertEqual(list(walk(st.totuple(line_info=True))),
+ [(t, n, l) for t, n, l, c in expected])
+ self.assertEqual(list(walk(st.totuple(col_info=True))),
+ [(t, n, c) for t, n, l, c in expected])
+ self.assertEqual(list(walk(st.tolist(line_info=True, col_info=True))),
+ [list(x) for x in expected])
+ self.assertEqual(list(walk(parser.st2tuple(st, line_info=True,
+ col_info=True))),
+ expected)
+ self.assertEqual(list(walk(parser.st2list(st, line_info=True,
+ col_info=True))),
+ [list(x) for x in expected])
def test_extended_unpacking(self):
self.check_suite("*a = y")
# resolves to 'dirB/..' first before resolving to parent of dirB.
self._check_resolve_relative(p, P(BASE, 'foo', 'in', 'spam'), False)
# Now create absolute symlinks
- d = tempfile.mkdtemp(suffix='-dirD')
+ d = support._longpath(tempfile.mkdtemp(suffix='-dirD'))
self.addCleanup(support.rmtree, d)
os.symlink(os.path.join(d), join('dirA', 'linkX'))
os.symlink(join('dirB'), os.path.join(d, 'linkY'))
otherhome = pwdent.pw_dir.rstrip('/')
if othername != username and otherhome:
break
+ else:
+ othername = username
+ otherhome = userhome
p1 = P('~/Documents')
p2 = P('~' + username + '/Documents')
def run(self):
self.active = True
self.__flag.set()
- while self.active and asyncore.socket_map:
- self.active_lock.acquire()
- asyncore.loop(timeout=0.1, count=1)
- self.active_lock.release()
- asyncore.close_all(ignore_all=True)
+ try:
+ while self.active and asyncore.socket_map:
+ with self.active_lock:
+ asyncore.loop(timeout=0.1, count=1)
+ finally:
+ asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
except OSError as inst:
# issue10812, ZFS doesn't appear to support posix_fallocate,
# so skip Solaris-based since they are likely to have ZFS.
- if inst.errno != errno.EINVAL or not sys.platform.startswith("sunos"):
+ # issue33655: Also ignore EINVAL on *BSD since ZFS is also
+ # often used there.
+ if inst.errno == errno.EINVAL and sys.platform.startswith(
+ ('sunos', 'freebsd', 'netbsd', 'openbsd', 'gnukfreebsd')):
+ raise unittest.SkipTest("test may fail on ZFS filesystems")
+ else:
raise
finally:
os.close(fd)
results = self.do_profiling()
expected = self.get_expected_output()
self.assertEqual(results[0], 1000)
+ fail = []
for i, method in enumerate(self.methodnames):
- if results[i+1] != expected[method]:
- print("Stats.%s output for %s doesn't fit expectation!" %
- (method, self.profilerclass.__name__))
- print('\n'.join(unified_diff(
- results[i+1].split('\n'),
- expected[method].split('\n'))))
+ a = expected[method]
+ b = results[i+1]
+ if a != b:
+ fail.append(f"\nStats.{method} output for "
+ f"{self.profilerclass.__name__} "
+ "does not fit expectation:")
+ fail.extend(unified_diff(a.split('\n'), b.split('\n'),
+ lineterm=""))
+ if fail:
+ self.fail("\n".join(fail))
def test_calling_conventions(self):
# Issue #5330: profile and cProfile wouldn't report C functions called
def setUp(self):
# isatty() and close() can hang on some platforms. Set an alarm
# before running the test to make sure we don't hang forever.
- self.old_alarm = signal.signal(signal.SIGALRM, self.handle_sig)
+ old_alarm = signal.signal(signal.SIGALRM, self.handle_sig)
+ self.addCleanup(signal.signal, signal.SIGALRM, old_alarm)
+ self.addCleanup(signal.alarm, 0)
signal.alarm(10)
- def tearDown(self):
- # remove alarm, restore old alarm handler
- signal.alarm(0)
- signal.signal(signal.SIGALRM, self.old_alarm)
-
def handle_sig(self, sig, frame):
self.fail("isatty hung")
# Population range too large (n >= maxsize)
self.gen._randbelow(maxsize+1, maxsize = maxsize)
self.gen._randbelow(5640, maxsize = maxsize)
-
+ # issue 33203: test that _randbelow raises ValueError on
+ # n == 0 also in its getrandbits-independent branch.
+ with self.assertRaises(ValueError):
+ self.gen._randbelow(0, maxsize=maxsize)
# This might be going too far to test a single line, but because of our
# noble aim of achieving 100% test coverage we need to write a case in
# which the following line in Random._randbelow() gets executed:
def check_executed_tests(self, output, tests, skipped=(), failed=(),
env_changed=(), omitted=(),
+ rerun=(),
randomize=False, interrupted=False,
fail_env_changed=False):
if isinstance(tests, str):
env_changed = [env_changed]
if isinstance(omitted, str):
omitted = [omitted]
+ if isinstance(rerun, str):
+ rerun = [rerun]
executed = self.parse_executed_tests(output)
if randomize:
regex = list_regex('%s test%s omitted', omitted)
self.check_line(output, regex)
+ if rerun:
+ regex = list_regex('%s re-run test%s', rerun)
+ self.check_line(output, regex)
+ self.check_line(output, "Re-running failed tests in verbose mode")
+ for name in rerun:
+ regex = "Re-running test %r in verbose mode" % name
+ self.check_line(output, regex)
+
good = (len(tests) - len(skipped) - len(failed)
- len(omitted) - len(env_changed))
if good:
if interrupted:
self.check_line(output, 'Test suite interrupted by signal SIGINT.')
+ result = []
if failed:
- result = 'FAILURE'
- elif interrupted:
- result = 'INTERRUPTED'
+ result.append('FAILURE')
elif fail_env_changed and env_changed:
- result = 'ENV CHANGED'
- else:
- result = 'SUCCESS'
+ result.append('ENV CHANGED')
+ if interrupted:
+ result.append('INTERRUPTED')
+ if not result:
+ result.append('SUCCESS')
+ result = ', '.join(result)
+ if rerun:
+ self.check_line(output, 'Tests result: %s' % result)
+ result = 'FAILURE then %s' % result
self.check_line(output, 'Tests result: %s' % result)
def parse_random_seed(self, output):
def test_pcbuild_rt(self):
# PCbuild\rt.bat
script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
+ if not os.path.isfile(script):
+ self.skipTest(f'File "{script}" does not exist')
rt_args = ["-q"] # Quick, don't run tests twice
if platform.architecture()[0] == '64bit':
rt_args.append('-x64') # 64-bit build
import os
import unittest
- # Issue #25306: Disable popups and logs to stderr on assertion
- # failures in MSCRT
- try:
- import msvcrt
- msvcrt.CrtSetReportMode
- except (ImportError, AttributeError):
- # no Windows, o release build
- pass
- else:
- for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
- msvcrt.CrtSetReportMode(m, 0)
-
class FDLeakTest(unittest.TestCase):
def test_leak(self):
fd = os.open(__file__, os.O_RDONLY)
- # bug: never cloes the file descriptor
+ # bug: never close the file descriptor
""")
self.check_leak(code, 'file descriptors')
self.check_executed_tests(output, [testname], env_changed=testname,
fail_env_changed=True)
+ def test_rerun_fail(self):
+ code = textwrap.dedent("""
+ import unittest
+
+ class Tests(unittest.TestCase):
+ def test_bug(self):
+ # test always fail
+ self.fail("bug")
+ """)
+ testname = self.create_test(code=code)
+
+ output = self.run_tests("-w", testname, exitcode=2)
+ self.check_executed_tests(output, [testname],
+ failed=testname, rerun=testname)
+
if __name__ == '__main__':
unittest.main()
bad = ['/cyberworld/map/index.html']
+class StringFormattingTest(BaseRobotTest, unittest.TestCase):
+ robots_txt = """\
+User-agent: *
+Crawl-delay: 1
+Request-rate: 3/15
+Disallow: /cyberworld/map/ # This is an infinite virtual URL space
+
+# Cybermapper knows where to go.
+User-agent: cybermapper
+Disallow: /some/path
+ """
+
+ expected_output = """\
+User-agent: cybermapper
+Disallow: /some/path
+
+User-agent: *
+Crawl-delay: 1
+Request-rate: 3/15
+Disallow: /cyberworld/map/
+
+"""
+
+ def test_string_formatting(self):
+ self.assertEqual(str(self.parser), self.expected_output)
+
+
class RobotHandler(BaseHTTPRequestHandler):
def do_GET(self):
orig_alrm_handler = signal.signal(signal.SIGALRM, handler)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
- self.addCleanup(signal.alarm, 0)
- signal.alarm(1)
+ try:
+ signal.alarm(1)
- s.register(rd, selectors.EVENT_READ)
- t = time()
- # select() is interrupted by a signal which raises an exception
- with self.assertRaises(InterruptSelect):
- s.select(30)
- # select() was interrupted before the timeout of 30 seconds
- self.assertLess(time() - t, 5.0)
+ s.register(rd, selectors.EVENT_READ)
+ t = time()
+ # select() is interrupted by a signal which raises an exception
+ with self.assertRaises(InterruptSelect):
+ s.select(30)
+ # select() was interrupted before the timeout of 30 seconds
+ self.assertLess(time() - t, 5.0)
+ finally:
+ signal.alarm(0)
@unittest.skipUnless(hasattr(signal, "alarm"),
"signal.alarm() required for this test")
orig_alrm_handler = signal.signal(signal.SIGALRM, lambda *args: None)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
- self.addCleanup(signal.alarm, 0)
- signal.alarm(1)
+ try:
+ signal.alarm(1)
- s.register(rd, selectors.EVENT_READ)
- t = time()
- # select() is interrupted by a signal, but the signal handler doesn't
- # raise an exception, so select() should by retries with a recomputed
- # timeout
- self.assertFalse(s.select(1.5))
- self.assertGreaterEqual(time() - t, 1.0)
+ s.register(rd, selectors.EVENT_READ)
+ t = time()
+ # select() is interrupted by a signal, but the signal handler doesn't
+ # raise an exception, so select() should by retries with a recomputed
+ # timeout
+ self.assertFalse(s.select(1.5))
+ self.assertGreaterEqual(time() - t, 1.0)
+ finally:
+ signal.alarm(0)
class ScalableSelectorMixIn:
self.skipTest("FD limit reached")
raise
- self.assertEqual(NUM_FDS // 2, len(s.select()))
+ try:
+ fds = s.select()
+ except OSError as e:
+ if e.errno == errno.EINVAL and sys.platform == 'darwin':
+ # unexplainable errors on macOS don't need to fail the test
+ self.skipTest("Invalid argument error calling poll()")
+ raise
+ self.assertEqual(NUM_FDS // 2, len(fds))
class DefaultSelectorTestCase(BaseSelectorTestCase):
self.server_ready.set()
def _setUp(self):
+ self.wait_threads = support.wait_threads_exit()
+ self.wait_threads.__enter__()
+
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
def _tearDown(self):
self.__tearDown()
self.done.wait()
+ self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
- self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
- self.setAlarm(self.alarm_time)
- with self.assertRaises(ZeroDivisionError) as cm:
- func(*args, **kwargs)
+ try:
+ self.setAlarm(self.alarm_time)
+ with self.assertRaises(ZeroDivisionError) as cm:
+ func(*args, **kwargs)
+ finally:
+ self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
- with self.assertRaises(ZeroDivisionError) as cm:
- while True:
- self.setAlarm(self.alarm_time)
- func(*args, **kwargs)
+ try:
+ with self.assertRaises(ZeroDivisionError) as cm:
+ while True:
+ self.setAlarm(self.alarm_time)
+ func(*args, **kwargs)
+ finally:
+ self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
- signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
+ signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
- # avaliable since long time ago
+ # available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
if HAVE_UNIX_SOCKETS and HAVE_FORKING:
class ForkingUnixStreamServer(socketserver.ForkingMixIn,
socketserver.UnixStreamServer):
- pass
+ _block_on_close = True
class ForkingUnixDatagramServer(socketserver.ForkingMixIn,
socketserver.UnixDatagramServer):
- pass
+ _block_on_close = True
@contextlib.contextmanager
if pid == 0:
# Don't raise an exception; it would be caught by the test harness.
os._exit(72)
- yield None
- pid2, status = os.waitpid(pid, 0)
- testcase.assertEqual(pid2, pid)
- testcase.assertEqual(72 << 8, status)
-
-
-def close_server(server):
- server.server_close()
-
- if hasattr(server, 'active_children'):
- # ForkingMixIn: Manually reap all child processes, since server_close()
- # calls waitpid() in non-blocking mode using the WNOHANG flag.
- for pid in server.active_children.copy():
- try:
- os.waitpid(pid, 0)
- except ChildProcessError:
- pass
- server.active_children.clear()
+ try:
+ yield None
+ except:
+ raise
+ finally:
+ pid2, status = os.waitpid(pid, 0)
+ testcase.assertEqual(pid2, pid)
+ testcase.assertEqual(72 << 8, status)
@unittest.skipUnless(threading, 'Threading required for this test.')
def make_server(self, addr, svrcls, hdlrbase):
class MyServer(svrcls):
+ _block_on_close = True
+
def handle_error(self, request, client_address):
self.close_request(request)
raise
if verbose: print("waiting for server")
server.shutdown()
t.join()
- close_server(server)
+ server.server_close()
self.assertEqual(-1, server.socket.fileno())
+ if HAVE_FORKING and isinstance(server, socketserver.ForkingMixIn):
+ # bpo-31151: Check that ForkingMixIn.server_close() waits until
+ # all children completed
+ self.assertFalse(server.active_children)
if verbose: print("done")
def stream_examine(self, proto, addr):
s.shutdown()
for t, s in threads:
t.join()
- close_server(s)
+ s.server_close()
def test_tcpserver_bind_leak(self):
# Issue #22435: the server socket wouldn't be closed if bind()/listen()
class BaseErrorTestServer(socketserver.TCPServer):
+ _block_on_close = True
+
def __init__(self, exception):
self.exception = exception
super().__init__((HOST, 0), BadHandler)
try:
self.handle_request()
finally:
- close_server(self)
+ self.server_close()
self.wait_done()
def handle_error(self, request, client_address):
if HAVE_FORKING:
class ForkingErrorTestServer(socketserver.ForkingMixIn, BaseErrorTestServer):
- pass
+ _block_on_close = True
class SocketWriterTest(unittest.TestCase):
self.server.request_fileno = self.request.fileno()
server = socketserver.TCPServer((HOST, 0), Handler)
- self.addCleanup(close_server, server)
+ self.addCleanup(server.server_close)
s = socket.socket(
server.address_family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
with s:
self.server.sent2 = self.wfile.write(big_chunk)
server = socketserver.TCPServer((HOST, 0), Handler)
- self.addCleanup(close_server, server)
+ self.addCleanup(server.server_close)
interrupted = threading.Event()
def signal_handler(signum, frame):
s.close()
server.handle_request()
self.assertEqual(server.shutdown_called, 1)
- close_server(server)
+ server.server_close()
if __name__ == "__main__":
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
+ @unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"), "needs TLS 1.2")
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
import platform
import signal
import io
+import itertools
import os
import errno
import tempfile
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
+ def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
+ saved_fds = self._save_fds(range(3))
+ try:
+ for from_fd in from_fds:
+ with tempfile.TemporaryFile() as f:
+ os.dup2(f.fileno(), from_fd)
+
+ fd_to_close = (set(range(3)) - set(from_fds)).pop()
+ os.close(fd_to_close)
+
+ arg_names = ['stdin', 'stdout', 'stderr']
+ kwargs = {}
+ for from_fd, to_fd in zip(from_fds, to_fds):
+ kwargs[arg_names[to_fd]] = from_fd
+
+ code = textwrap.dedent(r'''
+ import os, sys
+ skipped_fd = int(sys.argv[1])
+ for fd in range(3):
+ if fd != skipped_fd:
+ os.write(fd, str(fd).encode('ascii'))
+ ''')
+
+ skipped_fd = (set(range(3)) - set(to_fds)).pop()
+
+ rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
+ **kwargs)
+ self.assertEqual(rc, 0)
+
+ for from_fd, to_fd in zip(from_fds, to_fds):
+ os.lseek(from_fd, 0, os.SEEK_SET)
+ read_bytes = os.read(from_fd, 1024)
+ read_fds = list(map(int, read_bytes.decode('ascii')))
+ msg = textwrap.dedent(f"""
+ When testing {from_fds} to {to_fds} redirection,
+ parent descriptor {from_fd} got redirected
+ to descriptor(s) {read_fds} instead of descriptor {to_fd}.
+ """)
+ self.assertEqual([to_fd], read_fds, msg)
+ finally:
+ self._restore_fds(saved_fds)
+
+ # Check that subprocess can remap std fds correctly even
+ # if one of them is closed (#32844).
+ def test_swap_std_fds_with_one_closed(self):
+ for from_fds in itertools.combinations(range(3), 2):
+ for to_fds in itertools.permutations(range(3), 2):
+ self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
+
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
class TestUntestedModules(unittest.TestCase):
def test_untested_modules_can_be_imported(self):
- untested = ('bdb', 'encodings', 'formatter',
- 'nturl2path', 'tabnanny')
+ untested = ('encodings', 'formatter', 'nturl2path', 'tabnanny')
with support.check_warnings(quiet=True):
for name in untested:
try:
self.assertTrue(support.match_test(test_access))
self.assertFalse(support.match_test(test_chdir))
+ def test_fd_count(self):
+ # We cannot test the absolute value of fd_count(): on old Linux
+ # kernel or glibc versions, os.urandom() keeps a FD open on
+ # /dev/urandom device and Python has 4 FD opens instead of 3.
+ start = support.fd_count()
+ fd = os.open(__file__, os.O_RDONLY)
+ try:
+ more = support.fd_count()
+ finally:
+ os.close(fd)
+ self.assertEqual(more - start, 1)
# XXX -follows a list of untested API
# make_legacy_pyc
import gc
from functools import wraps
+
class tracecontext:
- """Contex manager that traces its enter and exit."""
+ """Context manager that traces its enter and exit."""
def __init__(self, output, value):
self.output = output
self.value = value
def __exit__(self, *exc_info):
self.output.append(-self.value)
+class asynctracecontext:
+ """Asynchronous context manager that traces its aenter and aexit."""
+ def __init__(self, output, value):
+ self.output = output
+ self.value = value
+
+ async def __aenter__(self):
+ self.output.append(self.value)
+
+ async def __aexit__(self, *exc_info):
+ self.output.append(-self.value)
+
+async def asynciter(iterable):
+ """Convert an iterable to an asynchronous iterator."""
+ for x in iterable:
+ yield x
+
+def asyncio_run(main):
+ import asyncio
+ import asyncio.events
+ import asyncio.coroutines
+ assert asyncio.events._get_running_loop() is None
+ assert asyncio.coroutines.iscoroutine(main)
+ loop = asyncio.events.new_event_loop()
+ try:
+ asyncio.events.set_event_loop(loop)
+ return loop.run_until_complete(main)
+ finally:
+ try:
+ loop.run_until_complete(loop.shutdown_asyncgens())
+ finally:
+ asyncio.events.set_event_loop(None)
+ loop.close()
+
+
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
sys.settrace(None)
self.compare_jump_output(expected, output)
+ def run_async_test(self, func, jumpFrom, jumpTo, expected, error=None,
+ event='line', decorated=False):
+ tracer = JumpTracer(func, jumpFrom, jumpTo, event, decorated)
+ sys.settrace(tracer.trace)
+ output = []
+ if error is None:
+ asyncio_run(func(output))
+ else:
+ with self.assertRaisesRegex(*error):
+ asyncio_run(func(output))
+ sys.settrace(None)
+ self.compare_jump_output(expected, output)
+
def jump_test(jumpFrom, jumpTo, expected, error=None, event='line'):
"""Decorator that creates a test that makes a jump
from one place to another in the following code.
return test
return decorator
+ def async_jump_test(jumpFrom, jumpTo, expected, error=None, event='line'):
+ """Decorator that creates a test that makes a jump
+ from one place to another in the following asynchronous code.
+ """
+ def decorator(func):
+ @wraps(func)
+ def test(self):
+ self.run_async_test(func, jumpFrom, jumpTo, expected,
+ error=error, event=event, decorated=True)
+ return test
+ return decorator
+
## The first set of 'jump' tests are for things that are allowed:
@jump_test(1, 3, [3])
output.append(6)
output.append(7)
+ @async_jump_test(4, 5, [3, 5])
+ async def test_jump_out_of_async_for_block_forwards(output):
+ for i in [1]:
+ async for i in asynciter([1, 2]):
+ output.append(3)
+ output.append(4)
+ output.append(5)
+
+ @async_jump_test(5, 2, [2, 4, 2, 4, 5, 6])
+ async def test_jump_out_of_async_for_block_backwards(output):
+ for i in [1]:
+ output.append(2)
+ async for i in asynciter([1]):
+ output.append(4)
+ output.append(5)
+ output.append(6)
+
@jump_test(1, 2, [3])
def test_jump_to_codeless_line(output):
output.append(1)
output.append(2)
output.append(3)
+ @async_jump_test(2, 3, [1, 3])
+ async def test_jump_forwards_out_of_async_with_block(output):
+ async with asynctracecontext(output, 1):
+ output.append(2)
+ output.append(3)
+
@jump_test(3, 1, [1, 2, 1, 2, 3, -2])
def test_jump_backwards_out_of_with_block(output):
output.append(1)
with tracecontext(output, 2):
output.append(3)
+ @async_jump_test(3, 1, [1, 2, 1, 2, 3, -2])
+ async def test_jump_backwards_out_of_async_with_block(output):
+ output.append(1)
+ async with asynctracecontext(output, 2):
+ output.append(3)
+
@jump_test(2, 5, [5])
def test_jump_forwards_out_of_try_finally_block(output):
try:
with tracecontext(output, 4):
output.append(5)
+ @async_jump_test(2, 4, [1, 4, 5, -4])
+ async def test_jump_across_async_with(output):
+ output.append(1)
+ async with asynctracecontext(output, 2):
+ output.append(3)
+ async with asynctracecontext(output, 4):
+ output.append(5)
+
@jump_test(4, 5, [1, 3, 5, 6])
def test_jump_out_of_with_block_within_for_block(output):
output.append(1)
output.append(5)
output.append(6)
+ @async_jump_test(4, 5, [1, 3, 5, 6])
+ async def test_jump_out_of_async_with_block_within_for_block(output):
+ output.append(1)
+ for i in [1]:
+ async with asynctracecontext(output, 3):
+ output.append(4)
+ output.append(5)
+ output.append(6)
+
@jump_test(4, 5, [1, 2, 3, 5, -2, 6])
def test_jump_out_of_with_block_within_with_block(output):
output.append(1)
output.append(5)
output.append(6)
+ @async_jump_test(4, 5, [1, 2, 3, 5, -2, 6])
+ async def test_jump_out_of_async_with_block_within_with_block(output):
+ output.append(1)
+ with tracecontext(output, 2):
+ async with asynctracecontext(output, 3):
+ output.append(4)
+ output.append(5)
+ output.append(6)
+
@jump_test(5, 6, [2, 4, 6, 7])
def test_jump_out_of_with_block_within_finally_block(output):
try:
output.append(6)
output.append(7)
+ @async_jump_test(5, 6, [2, 4, 6, 7])
+ async def test_jump_out_of_async_with_block_within_finally_block(output):
+ try:
+ output.append(2)
+ finally:
+ async with asynctracecontext(output, 4):
+ output.append(5)
+ output.append(6)
+ output.append(7)
+
@jump_test(8, 11, [1, 3, 5, 11, 12])
def test_jump_out_of_complex_nested_blocks(output):
output.append(1)
output.append(4)
output.append(5)
+ @async_jump_test(3, 5, [1, 2, 5])
+ async def test_jump_out_of_async_with_assignment(output):
+ output.append(1)
+ async with asynctracecontext(output, 2) \
+ as x:
+ output.append(4)
+ output.append(5)
+
@jump_test(3, 6, [1, 6, 8, 9])
def test_jump_over_return_in_try_finally_block(output):
output.append(1)
output.append(7)
output.append(8)
+ @async_jump_test(1, 7, [7, 8])
+ async def test_jump_over_async_for_block_before_else(output):
+ output.append(1)
+ if not output: # always false
+ async for i in asynciter([3]):
+ output.append(4)
+ else:
+ output.append(6)
+ output.append(7)
+ output.append(8)
+
# The second set of 'jump' tests are for things that are not allowed:
@jump_test(2, 3, [1], (ValueError, 'after'))
for i in 1, 2:
output.append(3)
+ @async_jump_test(1, 3, [], (ValueError, 'into'))
+ async def test_no_jump_forwards_into_async_for_block(output):
+ output.append(1)
+ async for i in asynciter([1, 2]):
+ output.append(3)
+
@jump_test(3, 2, [2, 2], (ValueError, 'into'))
def test_no_jump_backwards_into_for_block(output):
for i in 1, 2:
output.append(2)
output.append(3)
+ @async_jump_test(3, 2, [2, 2], (ValueError, 'into'))
+ async def test_no_jump_backwards_into_async_for_block(output):
+ async for i in asynciter([1, 2]):
+ output.append(2)
+ output.append(3)
+
@jump_test(2, 4, [], (ValueError, 'into'))
def test_no_jump_forwards_into_while_block(output):
i = 1
with tracecontext(output, 2):
output.append(3)
+ @async_jump_test(1, 3, [], (ValueError, 'into'))
+ async def test_no_jump_forwards_into_async_with_block(output):
+ output.append(1)
+ async with asynctracecontext(output, 2):
+ output.append(3)
+
@jump_test(3, 2, [1, 2, -1], (ValueError, 'into'))
def test_no_jump_backwards_into_with_block(output):
with tracecontext(output, 1):
output.append(2)
output.append(3)
+ @async_jump_test(3, 2, [1, 2, -1], (ValueError, 'into'))
+ async def test_no_jump_backwards_into_async_with_block(output):
+ async with asynctracecontext(output, 1):
+ output.append(2)
+ output.append(3)
+
@jump_test(1, 3, [], (ValueError, 'into'))
def test_no_jump_forwards_into_try_finally_block(output):
output.append(1)
with tracecontext(output, 4):
output.append(5)
+ @async_jump_test(3, 5, [1, 2, -2], (ValueError, 'into'))
+ async def test_no_jump_between_async_with_blocks(output):
+ output.append(1)
+ async with asynctracecontext(output, 2):
+ output.append(3)
+ async with asynctracecontext(output, 4):
+ output.append(5)
+
@jump_test(7, 4, [1, 6], (ValueError, 'into'))
def test_no_jump_into_for_block_before_else(output):
output.append(1)
output.append(7)
output.append(8)
+ @async_jump_test(7, 4, [1, 6], (ValueError, 'into'))
+ async def test_no_jump_into_async_for_block_before_else(output):
+ output.append(1)
+ if not output: # always false
+ async for i in asynciter([3]):
+ output.append(4)
+ else:
+ output.append(6)
+ output.append(7)
+ output.append(8)
+
def test_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers, 2, "Spam", [True])
self.done_mutex.release()
def test_starting_threads(self):
- # Basic test for thread creation.
- for i in range(NUMTASKS):
- self.newtask()
- verbose_print("waiting for tasks to complete...")
- self.done_mutex.acquire()
- verbose_print("all tasks done")
+ with support.wait_threads_exit():
+ # Basic test for thread creation.
+ for i in range(NUMTASKS):
+ self.newtask()
+ verbose_print("waiting for tasks to complete...")
+ self.done_mutex.acquire()
+ verbose_print("all tasks done")
def test_stack_size(self):
# Various stack size tests.
verbose_print("trying stack_size = (%d)" % tss)
self.next_ident = 0
self.created = 0
- for i in range(NUMTASKS):
- self.newtask()
+ with support.wait_threads_exit():
+ for i in range(NUMTASKS):
+ self.newtask()
- verbose_print("waiting for all tasks to complete")
- self.done_mutex.acquire()
- verbose_print("all tasks done")
+ verbose_print("waiting for all tasks to complete")
+ self.done_mutex.acquire()
+ verbose_print("all tasks done")
thread.stack_size(0)
mut = thread.allocate_lock()
mut.acquire()
started = []
+
def task():
started.append(None)
mut.acquire()
mut.release()
- thread.start_new_thread(task, ())
- while not started:
- time.sleep(POLL_SLEEP)
- self.assertEqual(thread._count(), orig + 1)
- # Allow the task to finish.
- mut.release()
- # The only reliable way to be sure that the thread ended from the
- # interpreter's point of view is to wait for the function object to be
- # destroyed.
- done = []
- wr = weakref.ref(task, lambda _: done.append(None))
- del task
- while not done:
- time.sleep(POLL_SLEEP)
- self.assertEqual(thread._count(), orig)
+
+ with support.wait_threads_exit():
+ thread.start_new_thread(task, ())
+ while not started:
+ time.sleep(POLL_SLEEP)
+ self.assertEqual(thread._count(), orig + 1)
+ # Allow the task to finish.
+ mut.release()
+ # The only reliable way to be sure that the thread ended from the
+ # interpreter's point of view is to wait for the function object to be
+ # destroyed.
+ done = []
+ wr = weakref.ref(task, lambda _: done.append(None))
+ del task
+ while not done:
+ time.sleep(POLL_SLEEP)
+ self.assertEqual(thread._count(), orig)
def test_save_exception_state_on_error(self):
# See issue #14474
except ValueError:
pass
real_write(self, *args)
- c = thread._count()
started = thread.allocate_lock()
with support.captured_output("stderr") as stderr:
real_write = stderr.write
stderr.write = mywrite
started.acquire()
- thread.start_new_thread(task, ())
- started.acquire()
- while thread._count() > c:
- time.sleep(POLL_SLEEP)
+ with support.wait_threads_exit():
+ thread.start_new_thread(task, ())
+ started.acquire()
self.assertIn("Traceback", stderr.getvalue())
class BarrierTest(BasicThreadTest):
def test_barrier(self):
- self.bar = Barrier(NUMTASKS)
- self.running = NUMTASKS
- for i in range(NUMTASKS):
- thread.start_new_thread(self.task2, (i,))
- verbose_print("waiting for tasks to end")
- self.done_mutex.acquire()
- verbose_print("tasks done")
+ with support.wait_threads_exit():
+ self.bar = Barrier(NUMTASKS)
+ self.running = NUMTASKS
+ for i in range(NUMTASKS):
+ thread.start_new_thread(self.task2, (i,))
+ verbose_print("waiting for tasks to end")
+ self.done_mutex.acquire()
+ verbose_print("tasks done")
def task2(self, ident):
for i in range(NUMTRIPS):
@unittest.skipUnless(hasattr(os, 'fork'), 'need os.fork')
@support.reap_threads
def test_forkinthread(self):
- running = True
status = "not set"
def thread1():
- nonlocal running, status
+ nonlocal status
# fork in a thread
pid = os.fork()
# parent
os.close(self.write_fd)
pid, status = os.waitpid(pid, 0)
- running = False
- thread.start_new_thread(thread1, ())
- self.assertEqual(os.read(self.read_fd, 2), b"OK",
- "Unable to fork() in thread")
- while running:
- time.sleep(POLL_SLEEP)
+ with support.wait_threads_exit():
+ thread.start_new_thread(thread1, ())
+ self.assertEqual(os.read(self.read_fd, 2), b"OK",
+ "Unable to fork() in thread")
self.assertEqual(status, 0)
def tearDown(self):
done.set()
done = threading.Event()
ident = []
- _thread.start_new_thread(f, ())
- done.wait()
- self.assertIsNotNone(ident[0])
+ with support.wait_threads_exit():
+ tid = _thread.start_new_thread(f, ())
+ done.wait()
+ self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
mutex = threading.Lock()
mutex.acquire()
- tid = _thread.start_new_thread(f, (mutex,))
- # Wait for the thread to finish.
- mutex.acquire()
+ with support.wait_threads_exit():
+ tid = _thread.start_new_thread(f, (mutex,))
+ # Wait for the thread to finish.
+ mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
import signal
import os
import sys
-from test.support import run_unittest, import_module
-thread = import_module('_thread')
+from test import support
+thread = support.import_module('_thread')
import time
if (sys.platform[:3] == 'win'):
class ThreadSignals(unittest.TestCase):
def test_signals(self):
- # Test signal handling semantics of threads.
- # We spawn a thread, have the thread send two signals, and
- # wait for it to finish. Check that we got both signals
- # and that they were run by the main thread.
- signalled_all.acquire()
- self.spawnSignallingThread()
- signalled_all.acquire()
+ with support.wait_threads_exit():
+ # Test signal handling semantics of threads.
+ # We spawn a thread, have the thread send two signals, and
+ # wait for it to finish. Check that we got both signals
+ # and that they were run by the main thread.
+ signalled_all.acquire()
+ self.spawnSignallingThread()
+ signalled_all.acquire()
+
# the signals that we asked the kernel to send
# will come back, but we don't know when.
# (it might even be after the thread exits
# wait for it return.
if signal_blackboard[signal.SIGUSR1]['tripped'] == 0 \
or signal_blackboard[signal.SIGUSR2]['tripped'] == 0:
- signal.alarm(1)
- signal.pause()
- signal.alarm(0)
+ try:
+ signal.alarm(1)
+ signal.pause()
+ finally:
+ signal.alarm(0)
self.assertEqual( signal_blackboard[signal.SIGUSR1]['tripped'], 1)
self.assertEqual( signal_blackboard[signal.SIGUSR1]['tripped_by'],
# after timeout return of lock.acquire() (which can fool assertRaises).
self.assertLess(dt, 3.0)
finally:
+ signal.alarm(0)
signal.signal(signal.SIGALRM, oldalrm)
@unittest.skipIf(USING_PTHREAD_COND,
# thread.
def other_thread():
rlock.acquire()
- thread.start_new_thread(other_thread, ())
- # Wait until we can't acquire it without blocking...
- while rlock.acquire(blocking=False):
- rlock.release()
- time.sleep(0.01)
- signal.alarm(1)
- t1 = time.time()
- self.assertRaises(KeyboardInterrupt, rlock.acquire, timeout=5)
- dt = time.time() - t1
- # See rationale above in test_lock_acquire_interruption
- self.assertLess(dt, 3.0)
+
+ with support.wait_threads_exit():
+ thread.start_new_thread(other_thread, ())
+ # Wait until we can't acquire it without blocking...
+ while rlock.acquire(blocking=False):
+ rlock.release()
+ time.sleep(0.01)
+ signal.alarm(1)
+ t1 = time.time()
+ self.assertRaises(KeyboardInterrupt, rlock.acquire, timeout=5)
+ dt = time.time() - t1
+ # See rationale above in test_lock_acquire_interruption
+ self.assertLess(dt, 3.0)
finally:
+ signal.alarm(0)
signal.signal(signal.SIGALRM, oldalrm)
def acquire_retries_on_intr(self, lock):
self.sig_recvd = False
def my_handler(signal, frame):
self.sig_recvd = True
+
old_handler = signal.signal(signal.SIGUSR1, my_handler)
try:
def other_thread():
# the lock acquisition. Then we'll let it run.
time.sleep(0.5)
lock.release()
- thread.start_new_thread(other_thread, ())
- # Wait until we can't acquire it without blocking...
- while lock.acquire(blocking=False):
- lock.release()
- time.sleep(0.01)
- result = lock.acquire() # Block while we receive a signal.
- self.assertTrue(self.sig_recvd)
- self.assertTrue(result)
+
+ with support.wait_threads_exit():
+ thread.start_new_thread(other_thread, ())
+ # Wait until we can't acquire it without blocking...
+ while lock.acquire(blocking=False):
+ lock.release()
+ time.sleep(0.01)
+ result = lock.acquire() # Block while we receive a signal.
+ self.assertTrue(self.sig_recvd)
+ self.assertTrue(result)
finally:
signal.signal(signal.SIGUSR1, old_handler)
os.kill(process_pid, signal.SIGUSR1)
done.release()
- # Send the signals from the non-main thread, since the main thread
- # is the only one that can process signals.
- thread.start_new_thread(send_signals, ())
- timed_acquire()
- # Wait for thread to finish
- done.acquire()
- # This allows for some timing and scheduling imprecision
- self.assertLess(self.end - self.start, 2.0)
- self.assertGreater(self.end - self.start, 0.3)
- # If the signal is received several times before PyErr_CheckSignals()
- # is called, the handler will get called less than 40 times. Just
- # check it's been called at least once.
- self.assertGreater(self.sigs_recvd, 0)
+ with support.wait_threads_exit():
+ # Send the signals from the non-main thread, since the main thread
+ # is the only one that can process signals.
+ thread.start_new_thread(send_signals, ())
+ timed_acquire()
+ # Wait for thread to finish
+ done.acquire()
+ # This allows for some timing and scheduling imprecision
+ self.assertLess(self.end - self.start, 2.0)
+ self.assertGreater(self.end - self.start, 0.3)
+ # If the signal is received several times before PyErr_CheckSignals()
+ # is called, the handler will get called less than 40 times. Just
+ # check it's been called at least once.
+ self.assertGreater(self.sigs_recvd, 0)
finally:
signal.signal(signal.SIGUSR1, old_handler)
oldsigs = registerSignals(handle_signals, handle_signals, handle_signals)
try:
- run_unittest(ThreadSignals)
+ support.run_unittest(ThreadSignals)
finally:
registerSignals(*oldsigs)
import os
import unittest
-import textwrap
+from textwrap import dedent
from test.support.script_helper import assert_python_ok
from test.test_tools import skip_if_missing, toolsdir
-from test.support import temp_cwd
+from test.support import temp_cwd, temp_dir
skip_if_missing()
# This will raise if the date format does not exactly match.
datetime.strptime(creationDate, '%Y-%m-%d %H:%M%z')
+ def test_funcdocstring(self):
+ for doc in ('"""doc"""', "r'''doc'''", "R'doc'", 'u"doc"'):
+ with self.subTest(doc):
+ msgids = self.extract_docstrings_from_str(dedent('''\
+ def foo(bar):
+ %s
+ ''' % doc))
+ self.assertIn('doc', msgids)
+
+ def test_funcdocstring_bytes(self):
+ msgids = self.extract_docstrings_from_str(dedent('''\
+ def foo(bar):
+ b"""doc"""
+ '''))
+ self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
+
+ def test_funcdocstring_fstring(self):
+ msgids = self.extract_docstrings_from_str(dedent('''\
+ def foo(bar):
+ f"""doc"""
+ '''))
+ self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
+
+ def test_classdocstring(self):
+ for doc in ('"""doc"""', "r'''doc'''", "R'doc'", 'u"doc"'):
+ with self.subTest(doc):
+ msgids = self.extract_docstrings_from_str(dedent('''\
+ class C:
+ %s
+ ''' % doc))
+ self.assertIn('doc', msgids)
+
+ def test_classdocstring_bytes(self):
+ msgids = self.extract_docstrings_from_str(dedent('''\
+ class C:
+ b"""doc"""
+ '''))
+ self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
+
+ def test_classdocstring_fstring(self):
+ msgids = self.extract_docstrings_from_str(dedent('''\
+ class C:
+ f"""doc"""
+ '''))
+ self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
+
+ def test_msgid(self):
+ msgids = self.extract_docstrings_from_str(
+ '''_("""doc""" r'str' u"ing")''')
+ self.assertIn('docstring', msgids)
+
+ def test_msgid_bytes(self):
+ msgids = self.extract_docstrings_from_str('_(b"""doc""")')
+ self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
+
+ def test_msgid_fstring(self):
+ msgids = self.extract_docstrings_from_str('_(f"""doc""")')
+ self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
+
def test_funcdocstring_annotated_args(self):
""" Test docstrings for functions with annotated args """
- msgids = self.extract_docstrings_from_str(textwrap.dedent('''\
+ msgids = self.extract_docstrings_from_str(dedent('''\
def foo(bar: str):
"""doc"""
'''))
def test_funcdocstring_annotated_return(self):
""" Test docstrings for functions with annotated return type """
- msgids = self.extract_docstrings_from_str(textwrap.dedent('''\
+ msgids = self.extract_docstrings_from_str(dedent('''\
def foo(bar) -> str:
"""doc"""
'''))
def test_funcdocstring_defvalue_args(self):
""" Test docstring for functions with default arg values """
- msgids = self.extract_docstrings_from_str(textwrap.dedent('''\
+ msgids = self.extract_docstrings_from_str(dedent('''\
def foo(bar=()):
"""doc"""
'''))
""" Test docstring extraction for multiple functions combining
annotated args, annotated return types and default arg values
"""
- msgids = self.extract_docstrings_from_str(textwrap.dedent('''\
+ msgids = self.extract_docstrings_from_str(dedent('''\
def foo1(bar: tuple=()) -> str:
"""doc1"""
""" Test docstring extraction for a class with colons occuring within
the parentheses.
"""
- msgids = self.extract_docstrings_from_str(textwrap.dedent('''\
+ msgids = self.extract_docstrings_from_str(dedent('''\
class D(L[1:2], F({1: 2}), metaclass=M(lambda x: x)):
"""doc"""
'''))
self.assertIn('doc', msgids)
+
+ def test_files_list(self):
+ """Make sure the directories are inspected for source files
+ bpo-31920
+ """
+ text1 = 'Text to translate1'
+ text2 = 'Text to translate2'
+ text3 = 'Text to ignore'
+ with temp_cwd(None), temp_dir(None) as sdir:
+ os.mkdir(os.path.join(sdir, 'pypkg'))
+ with open(os.path.join(sdir, 'pypkg', 'pymod.py'), 'w') as sfile:
+ sfile.write(f'_({text1!r})')
+ os.mkdir(os.path.join(sdir, 'pkg.py'))
+ with open(os.path.join(sdir, 'pkg.py', 'pymod2.py'), 'w') as sfile:
+ sfile.write(f'_({text2!r})')
+ os.mkdir(os.path.join(sdir, 'CVS'))
+ with open(os.path.join(sdir, 'CVS', 'pymod3.py'), 'w') as sfile:
+ sfile.write(f'_({text3!r})')
+ assert_python_ok(self.script, sdir)
+ with open('messages.pot') as fp:
+ data = fp.read()
+ self.assertIn(f'msgid "{text1}"', data)
+ self.assertIn(f'msgid "{text2}"', data)
+ self.assertNotIn(text3, data)
import sys
from test.support import TESTFN, rmtree, unlink, captured_stdout
from test.support.script_helper import assert_python_ok, assert_python_failure
+import textwrap
import unittest
import trace
# Matched before.
self.assertTrue(ignore.names(jn('bar', 'baz.py'), 'baz'))
+# Created for Issue 31908 -- CLI utility not writing cover files
+class TestCoverageCommandLineOutput(unittest.TestCase):
+
+ codefile = 'tmp.py'
+ coverfile = 'tmp.cover'
+
+ def setUp(self):
+ with open(self.codefile, 'w') as f:
+ f.write(textwrap.dedent('''\
+ x = 42
+ if []:
+ print('unreachable')
+ '''))
+
+ def tearDown(self):
+ unlink(self.codefile)
+ unlink(self.coverfile)
+
+ def test_cover_files_written_no_highlight(self):
+ argv = '-m trace --count'.split() + [self.codefile]
+ status, stdout, stderr = assert_python_ok(*argv)
+ self.assertTrue(os.path.exists(self.coverfile))
+ with open(self.coverfile) as f:
+ self.assertEqual(f.read(),
+ " 1: x = 42\n"
+ " 1: if []:\n"
+ " print('unreachable')\n"
+ )
+
+ def test_cover_files_written_with_highlight(self):
+ argv = '-m trace --count --missing'.split() + [self.codefile]
+ status, stdout, stderr = assert_python_ok(*argv)
+ self.assertTrue(os.path.exists(self.coverfile))
+ with open(self.coverfile) as f:
+ self.assertEqual(f.read(), textwrap.dedent('''\
+ 1: x = 42
+ 1: if []:
+ >>>>>> print('unreachable')
+ '''))
+
class TestCommandLine(unittest.TestCase):
def test_failures(self):
with self.assertRaises(Exception):
D[T]
+ def test_new_with_args(self):
+
+ class A(Generic[T]):
+ pass
+
+ class B:
+ def __new__(cls, arg):
+ # call object
+ obj = super().__new__(cls)
+ obj.arg = arg
+ return obj
+
+ # mro: C, A, Generic, B, object
+ class C(A, B):
+ pass
+
+ c = C('foo')
+ self.assertEqual(c.arg, 'foo')
+
+ def test_new_with_args2(self):
+
+ class A:
+ def __init__(self, arg):
+ self.from_a = arg
+ # call object
+ super().__init__()
+
+ # mro: C, Generic, A, object
+ class C(Generic[T], A):
+ def __init__(self, arg):
+ self.from_c = arg
+ # call Generic
+ super().__init__(arg)
+
+ c = C('foo')
+ self.assertEqual(c.from_a, 'foo')
+ self.assertEqual(c.from_c, 'foo')
+
+ def test_new_no_args(self):
+
+ class A(Generic[T]):
+ pass
+
+ with self.assertRaises(TypeError):
+ A('foo')
+
+ class B:
+ def __new__(cls):
+ # call object
+ obj = super().__new__(cls)
+ obj.from_b = 'b'
+ return obj
+
+ # mro: C, A, Generic, B, object
+ class C(A, B):
+ def __init__(self, arg):
+ self.arg = arg
+
+ def __new__(cls, arg):
+ # call A
+ obj = super().__new__(cls)
+ obj.from_c = 'c'
+ return obj
+
+ c = C('foo')
+ self.assertEqual(c.arg, 'foo')
+ self.assertEqual(c.from_b, 'b')
+ self.assertEqual(c.from_c, 'c')
class ClassVarTests(BaseTestCase):
self.assertEqual(gth(HasForeignBaseClass),
{'some_xrepr': XRepr, 'other_a': mod_generics_cache.A,
'some_b': mod_generics_cache.B})
+ self.assertEqual(gth(XRepr.__new__),
+ {'x': int, 'y': int})
self.assertEqual(gth(mod_generics_cache.B),
{'my_inner_a1': mod_generics_cache.B.A,
'my_inner_a2': mod_generics_cache.B.A,
opener.open(request)
self.assertEqual(request.get_header('User-agent'),'Test-Agent')
+ @unittest.skip('XXX: http://www.imdb.com is gone')
def test_sites_no_connection_close(self):
# Some sites do not send Connection: close header.
# Verify that those work properly. (#issue12576)
import unittest
from test import support
-import sys, os
+import sys
import uu
import io
class UUFileTest(unittest.TestCase):
- def _kill(self, f):
- # close and remove file
- if f is None:
- return
- try:
- f.close()
- except (SystemExit, KeyboardInterrupt):
- raise
- except:
- pass
- try:
- os.unlink(f.name)
- except (SystemExit, KeyboardInterrupt):
- raise
- except:
- pass
-
def setUp(self):
self.tmpin = support.TESTFN + "i"
self.tmpout = support.TESTFN + "o"
-
- def tearDown(self):
- del self.tmpin
- del self.tmpout
+ self.addCleanup(support.unlink, self.tmpin)
+ self.addCleanup(support.unlink, self.tmpout)
def test_encode(self):
- fin = fout = None
- try:
- support.unlink(self.tmpin)
- fin = open(self.tmpin, 'wb')
+ with open(self.tmpin, 'wb') as fin:
fin.write(plaintext)
- fin.close()
- fin = open(self.tmpin, 'rb')
- fout = open(self.tmpout, 'wb')
- uu.encode(fin, fout, self.tmpin, mode=0o644)
- fin.close()
- fout.close()
+ with open(self.tmpin, 'rb') as fin:
+ with open(self.tmpout, 'wb') as fout:
+ uu.encode(fin, fout, self.tmpin, mode=0o644)
- fout = open(self.tmpout, 'rb')
+ with open(self.tmpout, 'rb') as fout:
s = fout.read()
- fout.close()
- self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
+ self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
- # in_file and out_file as filenames
- uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0o644)
- fout = open(self.tmpout, 'rb')
+ # in_file and out_file as filenames
+ uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0o644)
+ with open(self.tmpout, 'rb') as fout:
s = fout.read()
- fout.close()
- self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
-
- finally:
- self._kill(fin)
- self._kill(fout)
+ self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
def test_decode(self):
- f = None
- try:
- support.unlink(self.tmpin)
- f = open(self.tmpin, 'wb')
+ with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
- f.close()
- f = open(self.tmpin, 'rb')
+ with open(self.tmpin, 'rb') as f:
uu.decode(f)
- f.close()
- f = open(self.tmpout, 'rb')
+ with open(self.tmpout, 'rb') as f:
s = f.read()
- f.close()
- self.assertEqual(s, plaintext)
- # XXX is there an xp way to verify the mode?
- finally:
- self._kill(f)
+ self.assertEqual(s, plaintext)
+ # XXX is there an xp way to verify the mode?
def test_decode_filename(self):
- f = None
- try:
- support.unlink(self.tmpin)
- f = open(self.tmpin, 'wb')
+ with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
- f.close()
- uu.decode(self.tmpin)
+ uu.decode(self.tmpin)
- f = open(self.tmpout, 'rb')
+ with open(self.tmpout, 'rb') as f:
s = f.read()
- f.close()
- self.assertEqual(s, plaintext)
- finally:
- self._kill(f)
+ self.assertEqual(s, plaintext)
def test_decodetwice(self):
# Verify that decode() will refuse to overwrite an existing file
- f = None
- try:
- f = io.BytesIO(encodedtextwrapped(0o644, self.tmpout))
-
- f = open(self.tmpin, 'rb')
+ with open(self.tmpin, 'wb') as f:
+ f.write(encodedtextwrapped(0o644, self.tmpout))
+ with open(self.tmpin, 'rb') as f:
uu.decode(f)
- f.close()
- f = open(self.tmpin, 'rb')
+ with open(self.tmpin, 'rb') as f:
self.assertRaises(uu.Error, uu.decode, f)
- f.close()
- finally:
- self._kill(f)
def test_main():
support.run_unittest(UUTest,
self.assertEqual(str(cm.exception),
'junk after document element: line 1, column 12')
+ self.addCleanup(support.unlink, TESTFN)
with open(TESTFN, "wb") as f:
f.write(b"<document />junk")
it = iterparse(TESTFN)
class IOTest(unittest.TestCase):
- def tearDown(self):
- support.unlink(TESTFN)
-
def test_encoding(self):
# Test encoding issues.
elem = ET.Element("tag")
"<tag key=\"åöö<>\" />" % enc).encode(enc))
def test_write_to_filename(self):
+ self.addCleanup(support.unlink, TESTFN)
tree = ET.ElementTree(ET.XML('''<site />'''))
tree.write(TESTFN)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_text_file(self):
+ self.addCleanup(support.unlink, TESTFN)
tree = ET.ElementTree(ET.XML('''<site />'''))
with open(TESTFN, 'w', encoding='utf-8') as f:
tree.write(f, encoding='unicode')
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_binary_file(self):
+ self.addCleanup(support.unlink, TESTFN)
tree = ET.ElementTree(ET.XML('''<site />'''))
with open(TESTFN, 'wb') as f:
tree.write(f)
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_binary_file_with_bom(self):
+ self.addCleanup(support.unlink, TESTFN)
tree = ET.ElementTree(ET.XML('''<site />'''))
# test BOM writing to buffered file
with open(TESTFN, 'wb') as f:
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
- if threading:
- self.url = URL
- else:
- # Without threading, http_server() and http_multi_server() will not
- # be executed and URL is still equal to None. 'http://' is a just
- # enough to choose the scheme (HTTP)
- self.url = 'http://'
+ # Actual value of the URL doesn't matter if it is a string in
+ # the correct format.
+ self.url = 'http://fake.localhost'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
- This is a nonzero integer. See the thread.get_ident() function. Thread
+ This is a nonzero integer. See the get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
self.tk.call('image', 'width', self.name))
class PhotoImage(Image):
- """Widget which can display colored images in GIF, PPM/PGM format."""
+ """Widget which can display images in PGM, PPM, GIF, PNG format."""
def __init__(self, name=None, cnf={}, master=None, **kw):
"""Create an image with NAME.
self.tk.call(args)
class BitmapImage(Image):
- """Widget which can display a bitmap."""
+ """Widget which can display images in XBM format."""
def __init__(self, name=None, cnf={}, master=None, **kw):
"""Create a bitmap with NAME.
self.tv.insert('', 'end', text=value), text=None),
value)
+ # test for values which are not None
+ itemid = self.tv.insert('', 'end', 0)
+ self.assertEqual(itemid, '0')
+ itemid = self.tv.insert('', 'end', 0.0)
+ self.assertEqual(itemid, '0.0')
+ # this is because False resolves to 0 and element with 0 iid is already present
+ self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end', False)
+ self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end', '')
+
def test_selection(self):
self.assertRaises(TypeError, self.tv.selection, 'spam')
already exist in the tree. Otherwise, a new unique identifier
is generated."""
opts = _format_optdict(kw)
- if iid:
+ if iid is not None:
res = self.tk.call(self._w, "insert", parent, index,
"-id", iid, *opts)
else:
PRAGMA_NOCOVER = "#pragma NO COVER"
-# Simple rx to find lines with no code.
-rx_blank = re.compile(r'^\s*(#.*)?$')
-
class _Ignore:
def __init__(self, modules=None, dirs=None):
self._mods = set() if not modules else set(modules)
lnotab = _find_executable_linenos(filename)
else:
lnotab = {}
- if lnotab:
- source = linecache.getlines(filename)
- coverpath = os.path.join(dir, modulename + ".cover")
- with open(filename, 'rb') as fp:
- encoding, _ = tokenize.detect_encoding(fp.readline)
- n_hits, n_lines = self.write_results_file(coverpath, source,
- lnotab, count, encoding)
- if summary and n_lines:
- percent = int(100 * n_hits / n_lines)
- sums[modulename] = n_lines, percent, modulename, filename
+ source = linecache.getlines(filename)
+ coverpath = os.path.join(dir, modulename + ".cover")
+ with open(filename, 'rb') as fp:
+ encoding, _ = tokenize.detect_encoding(fp.readline)
+ n_hits, n_lines = self.write_results_file(coverpath, source,
+ lnotab, count, encoding)
+ if summary and n_lines:
+ percent = int(100 * n_hits / n_lines)
+ sums[modulename] = n_lines, percent, modulename, filename
if summary and sums:
def write_results_file(self, path, lines, lnotab, lines_hit, encoding=None):
"""Return a coverage results file in path."""
+ # ``lnotab`` is a dict of executable lines, or a line number "table"
try:
outfile = open(path, "w", encoding=encoding)
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
- elif rx_blank.match(line):
- outfile.write(" ")
- else:
- # lines preceded by no marks weren't hit
- # Highlight them if so indicated, unless the line contains
+ elif lineno in lnotab and not PRAGMA_NOCOVER in line:
+ # Highlight never-executed lines, unless the line contains
# #pragma: NO COVER
- if lineno in lnotab and not PRAGMA_NOCOVER in line:
- outfile.write(">>>>>> ")
- n_lines += 1
- else:
- outfile.write(" ")
+ outfile.write(">>>>>> ")
+ n_lines += 1
+ else:
+ outfile.write(" ")
outfile.write(line.expandtabs(8))
return n_hits, n_lines
# Assure type is erased on instantiation,
# but attempt to store it in __orig_class__
if cls.__origin__ is None:
- return base_cls.__new__(cls)
+ if (base_cls.__new__ is object.__new__ and
+ cls.__init__ is not object.__init__):
+ return base_cls.__new__(cls)
+ else:
+ return base_cls.__new__(cls, *args, **kwds)
else:
origin = cls._gorg
- obj = base_cls.__new__(origin)
+ if (base_cls.__new__ is object.__new__ and
+ cls.__init__ is not object.__init__):
+ obj = base_cls.__new__(origin)
+ else:
+ obj = base_cls.__new__(origin, *args, **kwds)
try:
obj.__orig_class__ = cls
except AttributeError:
"follow default field(s) {default_names}"
.format(field_name=field_name,
default_names=', '.join(defaults_dict.keys())))
+ nm_tpl.__new__.__annotations__ = collections.OrderedDict(types)
nm_tpl.__new__.__defaults__ = tuple(defaults)
nm_tpl._field_defaults = defaults_dict
# update from user namespace without overriding special namedtuple attributes
# Test tools for mocking and patching.
# Maintained by Michael Foord
# Backport for other versions of Python available from
-# http://pypi.python.org/pypi/mock
+# https://pypi.org/project/mock
__all__ = (
'Mock',
import sys
import types
import pickle
-import builtins
from test import support
+import test.test_importlib.util
import unittest
+import unittest.mock
import unittest.test
def test_discovery_from_dotted_namespace_packages(self):
loader = unittest.TestLoader()
- orig_import = __import__
package = types.ModuleType('package')
package.__path__ = ['/a', '/b']
package.__spec__ = types.SimpleNamespace(
sys.modules[packagename] = package
return package
- def cleanup():
- builtins.__import__ = orig_import
- self.addCleanup(cleanup)
- builtins.__import__ = _import
-
_find_tests_args = []
def _find_tests(start_dir, pattern, namespace=None):
_find_tests_args.append((start_dir, pattern))
loader._find_tests = _find_tests
loader.suiteClass = list
- suite = loader.discover('package')
+
+ with unittest.mock.patch('builtins.__import__', _import):
+ # Since loader.discover() can modify sys.path, restore it when done.
+ with support.DirsOnSysPath():
+ # Make sure to remove 'package' from sys.modules when done.
+ with test.test_importlib.util.uncache('package'):
+ suite = loader.discover('package')
+
self.assertEqual(suite, ['/a/tests', '/b/tests'])
def test_discovery_failed_discovery(self):
loader = unittest.TestLoader()
package = types.ModuleType('package')
- orig_import = __import__
def _import(packagename, *args, **kwargs):
sys.modules[packagename] = package
return package
- def cleanup():
- builtins.__import__ = orig_import
- self.addCleanup(cleanup)
- builtins.__import__ = _import
-
- with self.assertRaises(TypeError) as cm:
- loader.discover('package')
- self.assertEqual(str(cm.exception),
- 'don\'t know how to discover from {!r}'
- .format(package))
+ with unittest.mock.patch('builtins.__import__', _import):
+ # Since loader.discover() can modify sys.path, restore it when done.
+ with support.DirsOnSysPath():
+ # Make sure to remove 'package' from sys.modules when done.
+ with test.test_importlib.util.uncache('package'):
+ with self.assertRaises(TypeError) as cm:
+ loader.discover('package')
+ self.assertEqual(str(cm.exception),
+ 'don\'t know how to discover from {!r}'
+ .format(package))
if __name__ == '__main__':
return self.default_entry.req_rate
def __str__(self):
- return ''.join([str(entry) + "\n" for entry in self.entries])
+ entries = self.entries
+ if self.default_entry is not None:
+ entries = entries + [self.default_entry]
+ return '\n'.join(map(str, entries)) + '\n'
class RuleLine:
def __str__(self):
ret = []
for agent in self.useragents:
- ret.extend(["User-agent: ", agent, "\n"])
- for line in self.rulelines:
- ret.extend([str(line), "\n"])
- return ''.join(ret)
+ ret.append(f"User-agent: {agent}")
+ if self.delay is not None:
+ ret.append(f"Crawl-delay: {self.delay}")
+ if self.req_rate is not None:
+ rate = self.req_rate
+ ret.append(f"Request-rate: {rate.requests}/{rate.seconds}")
+ ret.extend(map(str, self.rulelines))
+ ret.append('') # for compatibility
+ return '\n'.join(ret)
def applies_to(self, useragent):
"""check if this entry applies to the specified agent"""
with proc:
for line in proc.stdout:
value = line.split(':')[-1].strip().lower()
- if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
+ if re.fullmatch('(?:[0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
result.extend([
dict(
- name="OpenSSL 1.0.2n",
- url="https://www.openssl.org/source/openssl-1.0.2n.tar.gz",
- checksum='13bdc1b1d1ff39b6fd42a255e74676a4',
+ name="OpenSSL 1.0.2o",
+ url="https://www.openssl.org/source/openssl-1.0.2o.tar.gz",
+ checksum='44279b8557c3247cbe324e2322ecd114',
buildrecipe=build_universal_openssl,
configure=None,
install=None,
shellQuote(os.path.join(WORKDIR, 'installer')),
shellQuote(imagepath + ".tmp.dmg" )))
+ # Try to mitigate race condition in certain versions of macOS, e.g. 10.9,
+ # when hdiutil fails with "Resource busy"
+
+ time.sleep(10)
if not os.path.exists(os.path.join(WORKDIR, "mnt")):
os.mkdir(os.path.join(WORKDIR, "mnt"))
-{\rtf1\ansi\ansicpg1252\cocoartf1561\cocoasubrtf200
+{\rtf1\ansi\ansicpg1252\cocoartf1561\cocoasubrtf400
{\fonttbl\f0\fswiss\fcharset0 Helvetica;\f1\fmodern\fcharset0 CourierNewPSMT;}
{\colortbl;\red255\green255\blue255;}
{\*\expandedcolortbl;;}
\
\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0
-\b \cf0 \ul \ulc0 Which installer variant should I use? [CHANGED in 3.6.5]
+\b \cf0 \ul \ulc0 Which installer variant should I use? [CHANGED in 3.6.6]
\b0 \ulnone \
\
-\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0
-
-\b \cf0 **NEW**
-\b0 With Python 3.6.5, the python.org website now provides two installer variants for download: one that installs a
+With Python 3.6.5, the python.org website now provides two installer variants for download: one that installs a
\i 64-bit-only
\i0 Python capable of running on
\i macOS 10.9 (Mavericks)
\i0 or later. (This ReadMe was installed with the
\i $MACOSX_DEPLOYMENT_TARGET
\i0 variant.) Previous Python 3.6.x releases only provided the 10.6 or later installer. If you are running on macOS 10.9 or later and if you have no need for compatibility with older systems, use the 10.9 variant. Use the 10.6 variant if you are running on macOS 10.6 through 10.8, if you need to maintain compatibility with previous 3.6.x releases, or if you want to produce standalone applications that can run on systems from 10.6. The Pythons installed by these installers are built with private copies of some third-party libraries not included with or newer than those in macOS itself. The list of these libraries varies by installer variant and is included at the end of the License.rtf file.\
+\
+\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\partightenfactor0
+
+\b \cf0 CHANGED in 3.6.6:
+\b0 the 10.9+ 64-bit-only installer variant is now the default download. The 10.6+ variant is available from the $FULL_VERSION release page.\
\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0
-\b \cf0 \ul \ulc0 \
+\b \cf0 \ul \
Certificate verification and OpenSSL\
\b0 \ulnone \
\f1 /Applications/Python 3.6
\f0 to install a curated bundle of default root certificates from the third-party
\f1 certifi
-\f0 package ({\field{\*\fldinst{HYPERLINK "https://pypi.python.org/pypi/certifi"}}{\fldrslt https://pypi.python.org/pypi/certifi}}). If you choose to use
+\f0 package ({\field{\*\fldinst{HYPERLINK "https://pypi.org/project/certifi/"}}{\fldrslt https://pypi.org/project/certifi/}}). If you choose to use
\f1 certifi
\f0 , you should consider subscribing to the{\field{\*\fldinst{HYPERLINK "https://certifi.io/en/latest/"}}{\fldrslt project's email update service}} to be notified when the certificate bundle is updated.\
\
\
The 10.9+ installer variant comes with its own private version of Tcl/Tk 8.6. It does not use system-supplied or third-party supplied versions of Tcl/Tk.\
\
-For the 10.6+ variant, you continue to need to install a newer third-party version of the
+For the 10.6+ variant in 3.6.6, you continue to need to install a newer third-party version of the
\i Tcl/Tk
\i0 8.5 (not 8.6) frameworks to use IDLE or other programs that use the Tkinter graphical user interface toolkit. Visit {\field{\*\fldinst{HYPERLINK "https://www.python.org/download/mac/tcltk/"}}{\fldrslt https://www.python.org/download/mac/tcltk/}} for current information about supported and recommended versions of
\i Tcl/Tk
\i0 for this version of Python and of macOS.\
+\
+\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0
-\b \ul \
+\b \cf0 NOTE:
+\b0 As of the next 3.6.x release, 3.6.7, the 10.6+ variant will also include Tcl/Tk 8.6.\
+\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0
+
+\b \cf0 \ul \
Other changes\
\b0 \ulnone \
-{\rtf1\ansi\ansicpg1252\cocoartf1561\cocoasubrtf200
+{\rtf1\ansi\ansicpg1252\cocoartf1561\cocoasubrtf400
\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;}
{\colortbl;\red255\green255\blue255;}
{\*\expandedcolortbl;;}
\b NEW in 3.6.5:
\b0 two installer variants (10.9+ 64-bit-only, 10.6+ 64-/32-bit), built-in Tcl/Tk 8.6 support in the 10.9+ variant (no additional third-party downloads!)\
-}
\ No newline at end of file
+\
+
+\b CHANGED in 3.6.6:
+\b0 the 10.9+ 64-bit-only installer variant is now the default download}
\ No newline at end of file
#
# sample script to install or update a set of default Root Certificates
# for the ssl module. Uses the certificates provided by the certifi package:
-# https://pypi.python.org/pypi/certifi
+# https://pypi.org/project/certifi/
import os
import os.path
"${FWK}/bin/python${PYVER}" -E -s -m ensurepip --upgrade
+# bpo-33290: An earlier "pip3 install --upgrade pip" may have installed
+# a "pip" in the fw bin directory. For a py3 install, remove it.
+
+rm -f "${FWK}/bin/pip"
+
"${FWK}/bin/python${PYVER}" -E -s -Wi \
"${FWK}/lib/python${PYVER}/compileall.py" -q -j0 \
-f -x badsyntax \
-test -d "$(DESTDIR)$(PYTHONAPPSDIR)/IDLE.app" && rm -rf "$(DESTDIR)$(PYTHONAPPSDIR)/IDLE.app"
/bin/cp -PR "$(srcdir)/IDLE/IDLE.app" "$(DESTDIR)$(PYTHONAPPSDIR)"
ln -sf "$(INSTALLED_PYTHONAPP)" "$(DESTDIR)$(PYTHONAPPSDIR)/IDLE.app/Contents/MacOS/Python"
-ifneq ($(LIPO_32BIT_FLAGS),)
- rm "$(DESTDIR)$(PYTHONAPPSDIR)/IDLE.app/Contents/MacOS/Python"
- lipo $(LIPO_32BIT_FLAGS) -output "$(DESTDIR)$(PYTHONAPPSDIR)/IDLE.app/Contents/MacOS/Python" "$(BUILDPYTHON)"
-endif
sed -e "s!%prefix%!$(prefix)!g" -e 's!%exe%!$(PYTHONFRAMEWORK)!g' < "$(srcdir)/IDLE/IDLE.app/Contents/MacOS/IDLE" > "$(DESTDIR)$(PYTHONAPPSDIR)/IDLE.app/Contents/MacOS/IDLE"
sed "s!%version%!`$(RUNSHARED) $(BUILDPYTHON) -c 'import platform; print(platform.python_version())'`!g" < "$(srcdir)/IDLE/IDLE.app/Contents/Info.plist" > "$(DESTDIR)$(PYTHONAPPSDIR)/IDLE.app/Contents/Info.plist"
if [ -f "$(DESTDIR)$(LIBDEST)/idlelib/config-main.def" ]; then \
# Under GNU make, MAKEFLAGS are sorted and normalized; the 's' for
# -s, --silent or --quiet is always the first char.
# Under BSD make, MAKEFLAGS might be " -s -v x=y".
+# Ignore macros passed by GNU make, passed after --
sharedmods: $(BUILDPYTHON) pybuilddir.txt Modules/_math.o
- @case "$$MAKEFLAGS" in \
+ @case "`echo X $$MAKEFLAGS | sed 's/^X //;s/ -- .*//'`" in \
*\ -s*|s*) quiet="-q";; \
*) quiet="";; \
esac; \
+ echo "$(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' \
+ _TCLTK_INCLUDES='$(TCLTK_INCLUDES)' _TCLTK_LIBS='$(TCLTK_LIBS)' \
+ $(PYTHON_FOR_BUILD) $(srcdir)/setup.py $$quiet build"; \
$(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' \
_TCLTK_INCLUDES='$(TCLTK_INCLUDES)' _TCLTK_LIBS='$(TCLTK_LIBS)' \
$(PYTHON_FOR_BUILD) $(srcdir)/setup.py $$quiet build
Martin Bless
Pablo Bleyer
Erik van Blokland
+Stéphane Blondon
Eric Blossom
Sergey Bobrov
Finn Bock
David Edelsohn
John Edmonds
Grant Edwards
+Zvi Effron
John Ehresman
Tal Einat
Eric Eisner
Brad Howes
Mike Hoy
Ben Hoyt
+Miro Hrončok
Chiu-Hsiang Hsu
Chih-Hao Huang
Christian Hudon
Stefan Krah
Rolf Krahl
Bob Kras
+Oleg Krasnikov
Sebastian Kreft
Holger Krekel
Michael Kremer
Antoine Pitrou
Jean-François Piéronne
Oleg Plakhotnyuk
+Marcel Plch
Remi Pointel
Jon Poler
Ariel Poliak
Aaron Watters
Henrik Weber
Leon Weber
+Steve Weber
Corran Webster
Glyn Webster
Phil Webster
Python News
+++++++++++
+What's New in Python 3.6.6 final?
+=================================
+
+*Release date: 2018-06-27*
+
+There were no new changes in version 3.6.6.
+
+
+
+What's New in Python 3.6.6 release candidate 1?
+===============================================
+
+*Release date: 2018-06-11*
+
+Core and Builtins
+-----------------
+
+- bpo-33786: Fix asynchronous generators to handle GeneratorExit in athrow()
+ correctly
+
+- bpo-30654: Fixed reset of the SIGINT handler to SIG_DFL on interpreter
+ shutdown even when there was a custom handler set previously. Patch by
+ Philipp Kerling.
+
+- bpo-33622: Fixed a leak when the garbage collector fails to add an object
+ with the ``__del__`` method or referenced by it into the
+ :data:`gc.garbage` list. :c:func:`PyGC_Collect` can now be called when an
+ exception is set and preserves it.
+
+- bpo-31849: Fix signed/unsigned comparison warning in pyhash.c.
+
+- bpo-33391: Fix a leak in set_symmetric_difference().
+
+- bpo-28055: Fix unaligned accesses in siphash24(). Patch by Rolf Eike Beer.
+
+- bpo-33231: Fix potential memory leak in ``normalizestring()``.
+
+- bpo-29922: Improved error messages in 'async with' when ``__aenter__()``
+ or ``__aexit__()`` return non-awaitable object.
+
+- bpo-33199: Fix ``ma_version_tag`` in dict implementation is uninitialized
+ when copying from key-sharing dict.
+
+- bpo-33041: Fixed jumping when the function contains an ``async for`` loop.
+
+- bpo-32282: Fix an unnecessary ifdef in the include of VersionHelpers.h in
+ socketmodule on Windows.
+
+- bpo-21983: Fix a crash in `ctypes.cast()` in case the type argument is a
+ ctypes structured data type. Patch by Eryk Sun and Oren Milman.
+
+Library
+-------
+
+- bpo-30167: Prevent site.main() exception if PYTHONSTARTUP is set. Patch by
+ Steve Weber.
+
+- bpo-33812: Datetime instance d with non-None tzinfo, but with
+ d.tzinfo.utcoffset(d) returning None is now treated as naive by the
+ astimezone() method.
+
+- bpo-30805: Avoid race condition with debug logging
+
+- bpo-33767: The concatenation (``+``) and repetition (``*``) sequence
+ operations now raise :exc:`TypeError` instead of :exc:`SystemError` when
+ performed on :class:`mmap.mmap` objects. Patch by Zackery Spytz.
+
+- bpo-32684: Fix gather to propagate cancellation of itself even with
+ return_exceptions.
+
+- bpo-33674: Fix a race condition in SSLProtocol.connection_made() of
+ asyncio.sslproto: start immediately the handshake instead of using
+ call_soon(). Previously, data_received() could be called before the
+ handshake started, causing the handshake to hang or fail.
+
+- bpo-31467: Fixed bug where calling write_eof() on a
+ _SelectorSocketTransport after it's already closed raises AttributeError.
+
+- bpo-33672: Fix Task.__repr__ crash with Cython's bogus coroutines
+
+- bpo-33469: Fix RuntimeError after closing loop that used run_in_executor
+
+- bpo-11874: Use a better regex when breaking usage into wrappable parts.
+ Avoids bogus assertion errors from custom metavar strings.
+
+- bpo-30877: Fixed a bug in the Python implementation of the JSON decoder
+ that prevented the cache of parsed strings from clearing after finishing
+ the decoding. Based on patch by c-fos.
+
+- bpo-33548: tempfile._candidate_tempdir_list should consider common TEMP
+ locations
+
+- bpo-33542: Prevent ``uuid.get_node`` from using a DUID instead of a MAC on
+ Windows. Patch by Zvi Effron
+
+- bpo-26819: Fix race condition with `ReadTransport.resume_reading` in
+ Windows proactor event loop.
+
+- bpo-28556: Minor fixes in typing module: add annotations to
+ ``NamedTuple.__new__``, pass ``*args`` and ``**kwds`` in
+ ``Generic.__new__``. Original PRs by Paulius Šarka and Chad Dombrova.
+
+- bpo-20087: Updated alias mapping with glibc 2.27 supported locales.
+
+- bpo-33422: Fix trailing quotation marks getting deleted when looking up
+ byte/string literals on pydoc. Patch by Andrés Delfino.
+
+- bpo-33197: Update error message when constructing invalid
+ inspect.Parameters Patch by Dong-hee Na.
+
+- bpo-33383: Fixed crash in the get() method of the :mod:`dbm.ndbm` database
+ object when it is called with a single argument.
+
+- bpo-33329: Fix multiprocessing regression on newer glibcs
+
+- bpo-991266: Fix quoting of the ``Comment`` attribute of
+ :class:`http.cookies.SimpleCookie`.
+
+- bpo-33131: Upgrade bundled version of pip to 10.0.1.
+
+- bpo-33308: Fixed a crash in the :mod:`parser` module when converting an ST
+ object to a tree of tuples or lists with ``line_info=False`` and
+ ``col_info=True``.
+
+- bpo-33263: Fix FD leak in `_SelectorSocketTransport` Patch by Vlad
+ Starostin.
+
+- bpo-33256: Fix display of ``<module>`` call in the html produced by
+ ``cgitb.html()``. Patch by Stéphane Blondon.
+
+- bpo-33203: ``random.Random.choice()`` now raises ``IndexError`` for empty
+ sequences consistently even when called from subclasses without a
+ ``getrandbits()`` implementation.
+
+- bpo-33224: Update difflib.mdiff() for PEP 479. Convert an uncaught
+ StopIteration in a generator into a return-statement.
+
+- bpo-33209: End framing at the end of C implementation of
+ :func:`pickle.Pickler.dump`.
+
+- bpo-32861: The urllib.robotparser's ``__str__`` representation now
+ includes wildcard entries and the "Crawl-delay" and "Request-rate" fields.
+ Patch by Michael Lazar.
+
+- bpo-33096: Allow ttk.Treeview.insert to insert iid that has a false
+ boolean value. Note iid=0 and iid=False would be same. Patch by Garvit
+ Khatri.
+
+- bpo-33127: The ssl module now compiles with LibreSSL 2.7.1.
+
+- bpo-33021: Release the GIL during fstat() calls, avoiding hang of all
+ threads when calling mmap.mmap(), os.urandom(), and random.seed(). Patch
+ by Nir Soffer.
+
+- bpo-27683: Fix a regression in :mod:`ipaddress` that result of
+ :meth:`hosts` is empty when the network is constructed by a tuple
+ containing an integer mask and only 1 bit left for addresses.
+
+- bpo-32844: Fix wrong redirection of a low descriptor (0 or 1) to stderr in
+ subprocess if another low descriptor is closed.
+
+- bpo-31908: Fix output of cover files for ``trace`` module command-line
+ tool. Previously emitted cover files only when ``--missing`` option was
+ used. Patch by Michael Selik.
+
+- bpo-31457: If nested log adapters are used, the inner ``process()``
+ methods are no longer omitted.
+
+- bpo-16865: Support arrays >=2GiB in :mod:`ctypes`. Patch by Segev Finer.
+
+- bpo-31238: pydoc: the stop() method of the private ServerThread class now
+ waits until DocServer.serve_until_quit() completes and then explicitly
+ sets its docserver attribute to None to break a reference cycle.
+
+Documentation
+-------------
+
+- bpo-33503: Fix broken pypi link
+
+- bpo-33421: Add missing documentation for ``typing.AsyncContextManager``.
+
+- bpo-33378: Add Korean language switcher for https://docs.python.org/3/
+
+- bpo-33276: Clarify that the ``__path__`` attribute on modules cannot be
+ just any value.
+
+- bpo-33201: Modernize documentation for writing C extension types.
+
+- bpo-33195: Deprecate ``Py_UNICODE`` usage in ``c-api/arg`` document.
+ ``Py_UNICODE`` related APIs are deprecated since Python 3.3, but it is
+ missed in the document.
+
+- bpo-33126: Document PyBuffer_ToContiguous().
+
+- bpo-27212: Modify documentation for the :func:`islice` recipe to consume
+ initial values up to the start index.
+
+- bpo-28247: Update :mod:`zipapp` documentation to describe how to make
+ standalone applications.
+
+- bpo-18802: Documentation changes for ipaddress. Patch by Jon Foster and
+ Berker Peksag.
+
+- bpo-27428: Update documentation to clarify that ``WindowsRegistryFinder``
+ implements ``MetaPathFinder``. (Patch by Himanshu Lakhara)
+
+- bpo-8243: Add a note about curses.addch and curses.addstr exception
+ behavior when writing outside a window, or pad.
+
+- bpo-31432: Clarify meaning of CERT_NONE, CERT_OPTIONAL, and CERT_REQUIRED
+ flags for ssl.SSLContext.verify_mode.
+
+Tests
+-----
+
+- bpo-33655: Ignore test_posix_fallocate failures on BSD platforms that
+ might be due to running on ZFS.
+
+- bpo-19417: Add test_bdb.py.
+
+Build
+-----
+
+- bpo-5755: Move ``-Wstrict-prototypes`` option to ``CFLAGS_NODIST`` from
+ ``OPT``. This option emitted annoying warnings when building extension
+ modules written in C++.
+
+- bpo-33614: Ensures module definition files for the stable ABI on Windows
+ are correctly regenerated.
+
+- bpo-33522: Enable CI builds on Visual Studio Team Services at
+ https://python.visualstudio.com/cpython
+
+- bpo-33012: Add ``-Wno-cast-function-type`` for gcc 8 for silencing
+ warnings about function casts like casting to PyCFunction in method
+ definition lists.
+
+- bpo-33394: Enable the verbose build for extension modules, when GNU make
+ is passed macros on the command line.
+
+Windows
+-------
+
+- bpo-33184: Update Windows installer to OpenSSL 1.0.2o.
+
+macOS
+-----
+
+- bpo-33184: Update macOS installer build to use OpenSSL 1.0.2o.
+
+IDLE
+----
+
+- bpo-33656: On Windows, add API call saying that tk scales for DPI. On
+ Windows 8.1+ or 10, with DPI compatibility properties of the Python binary
+ unchanged, and a monitor resolution greater than 96 DPI, this should make
+ text and lines sharper. It should otherwise have no effect.
+
+- bpo-33768: Clicking on a context line moves that line to the top of the
+ editor window.
+
+- bpo-33763: IDLE: Use read-only text widget for code context instead of
+ label widget.
+
+- bpo-33664: Scroll IDLE editor text by lines. Previously, the mouse wheel
+ and scrollbar slider moved text by a fixed number of pixels, resulting in
+ partial lines at the top of the editor box. The change also applies to
+ the shell and grep output windows, but not to read-only text views.
+
+- bpo-33679: Enable theme-specific color configuration for Code Context. Use
+ the Highlights tab to see the setting for built-in themes or add settings
+ to custom themes.
+
+- bpo-33642: Display up to maxlines non-blank lines for Code Context. If
+ there is no current context, show a single blank line.
+
+- bpo-33628: IDLE: Cleanup codecontext.py and its test.
+
+- bpo-33564: IDLE's code context now recognizes async as a block opener.
+
+- bpo-29706: IDLE now colors async and await as keywords in 3.6. They become
+ full keywords in 3.7.
+
+- bpo-21474: Update word/identifier definition from ascii to unicode. In
+ text and entry boxes, this affects selection by double-click, movement
+ left/right by control-left/right, and deletion left/right by
+ control-BACKSPACE/DEL.
+
+- bpo-33204: IDLE: consistently color invalid string prefixes. A 'u' string
+ prefix cannot be paired with either 'r' or 'f'. Consistently color as much
+ of the prefix, starting at the right, as is valid. Revise and extend
+ colorizer test.
+
+- bpo-32831: Add docstrings and tests for codecontext.
+
+Tools/Demos
+-----------
+
+- bpo-33189: :program:`pygettext.py` now recognizes only literal strings as
+ docstrings and translatable strings, and rejects bytes literals and
+ f-string expressions.
+
+- bpo-31920: Fixed handling directories as arguments in the ``pygettext``
+ script. Based on patch by Oleg Krasnikov.
+
+- bpo-29673: Fix pystackv and pystack gdbinit macros.
+
+- bpo-32885: Add an ``-n`` flag for ``Tools/scripts/pathfix.py`` to disbale
+ automatic backup creation (files with ``~`` suffix).
+
+- bpo-31583: Fix 2to3 for using with --add-suffix option but without
+ --output-dir option for relative path to files in current directory.
+
+C API
+-----
+
+- bpo-32374: Document that m_traverse for multi-phase initialized modules
+ can be called with m_state=NULL, and add a sanity check
+
+
What's New in Python 3.6.5 final?
=================================
Chandra.
- bpo-32583: Fix possible crashing in builtin Unicode decoders caused by
- write out-of- bound errors when using customized decode error handlers.
+ write out-of-bound errors when using customized decode error handlers.
- bpo-26163: Improved frozenset() hash to create more distinct hash values
when faced with datasets containing many similar values.
``sqlite3.Cursor`` object more than once. Patch by Oren Milman.
- bpo-31672: ``idpattern`` in ``string.Template`` matched some non-ASCII
- characters. Now it uses ``-i`` regular expression local flag to avoid non-
- ASCII characters.
+ characters. Now it uses ``-i`` regular expression local flag to avoid
+ non-ASCII characters.
- bpo-31764: Prevent a crash in ``sqlite3.Cursor.close()`` in case the
``Cursor`` object is uninitialized. Patch by Oren Milman.
- bpo-31482: ``random.seed()`` now works with bytes in version=1
- bpo-31334: Fix ``poll.poll([timeout])`` in the ``select`` module for
- arbitrary negative timeouts on all OSes where it can only be a non-
- negative integer or -1. Patch by Riccardo Coccioli.
+ arbitrary negative timeouts on all OSes where it can only be a
+ non-negative integer or -1. Patch by Riccardo Coccioli.
- bpo-31310: multiprocessing's semaphore tracker should be launched again if
crashed.
documentation example code. Patch by Brad Smith.
- bpo-30085: The operator functions without double underscores are preferred
- for clarity. The one with underscores are only kept for back-
- compatibility.
+ for clarity. The one with underscores are only kept for
+ back-compatibility.
Tests
-----
- bpo-31459: Rename IDLE's module browser from Class Browser to Module
Browser. The original module-level class and method browser became a
module browser, with the addition of module-level functions, years ago.
- Nested classes and functions were added yesterday. For back-
- compatibility, the virtual event <<open-class-browser>>, which appears on
- the Keys tab of the Settings dialog, is not changed. Patch by Cheryl
- Sabella.
+ Nested classes and functions were added yesterday. For
+ back-compatibility, the virtual event <<open-class-browser>>, which
+ appears on the Keys tab of the Settings dialog, is not changed. Patch by
+ Cheryl Sabella.
- bpo-31500: Default fonts now are scaled on HiDPI displays.
- bpo-30703: Improve signal delivery.
Avoid using Py_AddPendingCall from signal handler, to avoid calling
- signal- unsafe functions. The tests I'm adding here fail without the rest
+ signal-unsafe functions. The tests I'm adding here fail without the rest
of the patch, on Linux and OS X. This means our signal delivery logic had
defects (some signals could be lost).
- bpo-29212: Fix concurrent.futures.thread.ThreadPoolExecutor threads to
have a non repr() based thread name by default when no thread_name_prefix
- is supplied. They will now identify themselves as "ThreadPoolExecutor-
- y_n".
+ is supplied. They will now identify themselves as
+ "ThreadPoolExecutor-y_n".
- bpo-9146: Fix a segmentation fault in _hashopenssl when standard hash
functions such as md5 are not available in the linked OpenSSL library. As
contains CR or LF. Patch by Dong-hee Na.
- bpo-30595: multiprocessing.Queue.get() with a timeout now polls its reader
- in non- blocking mode if it succeeded to aquire the lock but the acquire
+ in non-blocking mode if it succeeded to aquire the lock but the acquire
took longer than the timeout.
- bpo-29403: Fix ``unittest.mock``'s autospec to not fail on method-bound
certain cases.
- bpo-30879: os.listdir() and os.scandir() now emit bytes names when called
- with bytes- like argument.
+ with bytes-like argument.
- bpo-30746: Prohibited the '=' character in environment variable names in
``os.putenv()`` and ``os.spawn*()``.
The main difference for users is that user configurable key bindings for
builtin features are now handled uniformly. Now, editing a binding in a
keyset only affects its value in the keyset. All bindings are defined
- together in the system-specific default keysets in config- extensions.def.
- All custom keysets are saved as a whole in config- extension.cfg. All
- take effect as soon as one clicks Apply or Ok.
+ together in the system-specific default keysets in config-extensions.def.
+ All custom keysets are saved as a whole in config-extension.cfg. All take
+ effect as soon as one clicks Apply or Ok.
The affected events are '<<force-open-completions>>', '<<expand-word>>',
'<<force-open-calltip>>', '<<flash-paren>>', '<<format-paragraph>>',
- '<<run- module>>', '<<check-module>>', and '<<zoom-height>>'. Any
- (global) customizations made before 3.6.3 will not affect their keyset-
- specific customization after 3.6.3. and vice versa.
+ '<<run-module>>', '<<check-module>>', and '<<zoom-height>>'. Any (global)
+ customizations made before 3.6.3 will not affect their keyset-specific
+ customization after 3.6.3. and vice versa.
Inital patch by Charles Wohlganger.
infinite loop DoS), CVE-2016-9063 (Integer overflow, re-fix),
CVE-2016-0718 (Fix regression bugs from 2.2.0's fix to CVE-2016-0718) and
CVE-2012-0876 (Counter hash flooding with SipHash). Note: the
- CVE-2016-5300 (Use os- specific entropy sources like getrandom) doesn't
+ CVE-2016-5300 (Use os-specific entropy sources like getrandom) doesn't
impact Python, since Python already gets entropy from the OS to set the
expat secret using ``XML_SetHashSalt()``.
mutated during searching, inserting or comparing. Based on patches by
Duane Griffin and Tim Mitchell.
-- bpo-25794: Fixed type.__setattr__() and type.__delattr__() for non-
- interned attribute names. Based on patch by Eryk Sun.
+- bpo-25794: Fixed type.__setattr__() and type.__delattr__() for
+ non-interned attribute names. Based on patch by Eryk Sun.
- bpo-30039: If a KeyboardInterrupt happens when the interpreter is in the
middle of resuming a chain of nested 'yield from' or 'await' calls, it's
- bpo-23890: unittest.TestCase.assertRaises() now manually breaks a
reference cycle to not keep objects alive longer than expected.
-- bpo-30149: inspect.signature() now supports callables with variable-
- argument parameters wrapped with partialmethod. Patch by Dong-hee Na.
+- bpo-30149: inspect.signature() now supports callables with
+ variable-argument parameters wrapped with partialmethod. Patch by Dong-hee
+ Na.
- bpo-30645: Fix path calculation in imp.load_package(), fixing it for cases
when a package is only shipped with bytecodes. Patch by Alexandru
- bpo-30048: Fixed ``Task.cancel()`` can be ignored when the task is running
coroutine and the coroutine returned without any more ``await``.
-- bpo-30266: contextlib.AbstractContextManager now supports anti-
- registration by setting __enter__ = None or __exit__ = None, following the
- pattern introduced in bpo-25958. Patch by Jelle Zijlstra.
+- bpo-30266: contextlib.AbstractContextManager now supports
+ anti-registration by setting __enter__ = None or __exit__ = None,
+ following the pattern introduced in bpo-25958. Patch by Jelle Zijlstra.
- bpo-30298: Weaken the condition of deprecation warnings for inline
modifiers. Now allowed several subsequential inline modifiers at the start
when Ctrl-C is received.
- bpo-28556: Various updates to typing module: add typing.NoReturn type, use
- WrapperDescriptorType, minor bug-fixes. Original PRs by Jim Fasarakis-
- Hilliard and Ivan Levkivskyi.
+ WrapperDescriptorType, minor bug-fixes. Original PRs by Jim
+ Fasarakis-Hilliard and Ivan Levkivskyi.
- bpo-30205: Fix getsockname() for unbound AF_UNIX sockets on Linux.
scrollbar; selecting an item by hitting Return. Hangs on MacOSX should no
longer happen. Patch by Louie Lu.
-- bpo-25514: Add doc subsubsection about IDLE failure to start. Popup no-
- connection message directs users to this section.
+- bpo-25514: Add doc subsubsection about IDLE failure to start. Popup
+ no-connection message directs users to this section.
- bpo-30642: Fix reference leaks in IDLE tests. Patches by Louie Lu and
Terry Jan Reedy.
-----
- bpo-29941: Add ``--with-assertions`` configure flag to explicitly enable C
- ``assert()`` checks. Defaults to off. ``--with-pydebug`` implies ``--with-
- assertions``.
+ ``assert()`` checks. Defaults to off. ``--with-pydebug`` implies
+ ``--with-assertions``.
-- bpo-28787: Fix out-of-tree builds of Python when configured with ``--with
- --dtrace``.
+- bpo-28787: Fix out-of-tree builds of Python when configured with
+ ``--with--dtrace``.
- bpo-29243: Prevent unnecessary rebuilding of Python during ``make test``,
``make install`` and some other make targets when configured with
- ``--enable- optimizations``.
+ ``--enable-optimizations``.
- bpo-23404: Don't regenerate generated files based on file modification
time anymore: the action is now explicit. Replace ``make touch`` with
when decode astral characters. Patch by Xiang Zhang.
- bpo-19398: Extra slash no longer added to sys.path components in case of
- empty compile- time PYTHONPATH components.
+ empty compile-time PYTHONPATH components.
- bpo-28665: Improve speed of the STORE_DEREF opcode by 40%.
- bpo-27358: Optimized merging var-keyword arguments and improved error
message when passing a non-mapping as a var-keyword argument.
-- bpo-28257: Improved error message when passing a non-iterable as a var-
- positional argument. Added opcode BUILD_TUPLE_UNPACK_WITH_CALL.
+- bpo-28257: Improved error message when passing a non-iterable as a
+ var-positional argument. Added opcode BUILD_TUPLE_UNPACK_WITH_CALL.
- bpo-28322: Fixed possible crashes when unpickle itertools objects from
incorrect pickle data. Based on patch by John Leitch.
middle of the regular expression.
- bpo-26885: xmlrpc now supports unmarshalling additional data types used by
- Apache XML- RPC implementation for numerics and None.
+ Apache XML-RPC implementation for numerics and None.
- bpo-28070: Fixed parsing inline verbose flag in regular expressions.
http.client.HTTPConnection requests. The
urllib.request.AbstractHTTPHandler class does not enforce a Content-Length
header any more. If a HTTP request has a file or iterable body, but no
- Content-Length header, the library now falls back to use chunked transfer-
- encoding.
+ Content-Length header, the library now falls back to use chunked
+ transfer-encoding.
- A new version of typing.py from https://github.com/python/typing: -
Collection (only for 3.6) (Issue #27598) - Add FrozenSet to __all__
- bpo-27983: Cause lack of llvm-profdata tool when using clang as required
for PGO linking to be a configure time error rather than make time when
- --with- optimizations is enabled. Also improve our ability to find the
- llvm- profdata tool on MacOS and some Linuxes.
+ ``--with-optimizations`` is enabled. Also improve our ability to find the
+ llvm-profdata tool on MacOS and some Linuxes.
- bpo-21590: Support for DTrace and SystemTap probes.
Hsuan Yen.
- bpo-27641: The configure script now inserts comments into the makefile to
- prevent the pgen and _freeze_importlib executables from being cross-
- compiled.
+ prevent the pgen and _freeze_importlib executables from being
+ cross-compiled.
- bpo-26662: Set PYTHON_FOR_GEN in configure as the Python program to be
used for file generation during the build.
- bpo-25267: The UTF-8 encoder is now up to 75 times as fast for error
handlers: ``ignore``, ``replace``, ``surrogateescape``, ``surrogatepass``.
- Patch co- written with Serhiy Storchaka.
+ Patch co-written with Serhiy Storchaka.
- bpo-25280: Import trace messages emitted in verbose (-v) mode are no
longer formatted twice.
- bpo-25003: On Solaris 11.3 or newer, os.urandom() now uses the getrandom()
function instead of the getentropy() function. The getentropy() function
is blocking to generate very good quality entropy, os.urandom() doesn't
- need such high- quality entropy.
+ need such high-quality entropy.
- bpo-9232: Modify Python's grammar to allow trailing commas in the argument
list of a function declaration. For example, "def f(\*, a = 3,): pass" is
- bpo-17214: The "urllib.request" module now percent-encodes non-ASCII bytes
found in redirect target URLs. Some servers send Location header fields
- with non- ASCII bytes, but "http.client" requires the request target to be
- ASCII- encodable, otherwise a UnicodeEncodeError is raised. Based on
- patch by Christian Heimes.
+ with non-ASCII bytes, but "http.client" requires the request target to be
+ ASCII-encodable, otherwise a UnicodeEncodeError is raised. Based on patch
+ by Christian Heimes.
- bpo-27033: The default value of the decode_data parameter for
smtpd.SMTPChannel and smtpd.SMTPServer constructors is changed to False.
Xiang Zhang.
- bpo-26804: urllib.request will prefer lower_case proxy environment
- variables over UPPER_CASE or Mixed_Case ones. Patch contributed by Hans-
- Peter Jansen.
+ variables over UPPER_CASE or Mixed_Case ones. Patch contributed by
+ Hans-Peter Jansen.
- bpo-26837: assertSequenceEqual() now correctly outputs non-stringified
differing items (like bytes in the -b mode). This affects
- bpo-25911: Restored support of bytes paths in os.walk() on Windows.
-- bpo-26045: Add UTF-8 suggestion to error message when posting a non-
- Latin-1 string with http.client.
+- bpo-26045: Add UTF-8 suggestion to error message when posting a
+ non-Latin-1 string with http.client.
- bpo-26039: Added zipfile.ZipInfo.from_file() and zipinfo.ZipInfo.is_dir().
Patch by Thomas Kluyver.
and "keywords" attributes of functools.partial have now always types tuple
and dict correspondingly.
-- bpo-26202: copy.deepcopy() now correctly copies range() objects with non-
- atomic attributes.
+- bpo-26202: copy.deepcopy() now correctly copies range() objects with
+ non-atomic attributes.
- bpo-23076: Path.glob() now raises a ValueError if it's called with an
invalid pattern. Patch by Thomas Nyberg.
- bpo-25173: Associate tkinter messageboxes with a specific widget. For Mac
OSX, make them a 'sheet'. Patch by Mark Roseman.
-- bpo-25198: Enhance the initial html viewer now used for Idle Help. *
- Properly indent fixed-pitch text (patch by Mark Roseman). * Give code
- snippet a very Sphinx- like light blueish-gray background. * Re-use
- initial width and height set by users for shell and editor. * When the
- Table of Contents (TOC) menu is used, put the section header at the top of
- the screen.
+- bpo-25198: Enhance the initial html viewer now used for Idle Help.
+ Properly indent fixed-pitch text (patch by Mark Roseman). Give code
+ snippet a very Sphinx-like light blueish-gray background. Re-use initial
+ width and height set by users for shell and editor. When the Table of
+ Contents (TOC) menu is used, put the section header at the top of the
+ screen.
- bpo-25225: Condense and rewrite Idle doc section on text colors.
via macros (in particular on Android). Patch by Chi Hsuan Yen.
- bpo-22359: Disable the rules for running _freeze_importlib and pgen when
- cross- compiling. The output of these programs is normally saved with the
+ cross-compiling. The output of these programs is normally saved with the
source code anyway, and is still regenerated when doing a native build.
Patch by Xavier de Gaye.
-----------
- bpo-26799: Fix python-gdb.py: don't get C types once when the Python code
- is loaded, but get C types on demand. The C types can change if python-
- gdb.py is loaded before the Python executable. Patch written by Thomas
- Ilsche.
+ is loaded, but get C types on demand. The C types can change if
+ python-gdb.py is loaded before the Python executable. Patch written by
+ Thomas Ilsche.
- bpo-26271: Fix the Freeze tool to properly use flags passed through
configure. Patch by Daniel Shaulov.
when decode astral characters. Patch by Xiang Zhang.
- bpo-19398: Extra slash no longer added to sys.path components in case of
- empty compile- time PYTHONPATH components.
+ empty compile-time PYTHONPATH components.
- bpo-28426: Fixed potential crash in PyUnicode_AsDecodedObject() in debug
build.
- bpo-27419: Standard __import__() no longer look up "__import__" in globals
or builtins for importing submodules or "from import". Fixed handling an
- error of non- string package name.
+ error of non-string package name.
- bpo-27083: Respect the PYTHONCASEOK environment variable under Windows.
- bpo-27983: Cause lack of llvm-profdata tool when using clang as required
for PGO linking to be a configure time error rather than make time when
- --with- optimizations is enabled. Also improve our ability to find the
- llvm- profdata tool on MacOS and some Linuxes.
+ ``--with-optimizations`` is enabled. Also improve our ability to find the
+ llvm-profdata tool on MacOS and some Linuxes.
- bpo-26307: The profile-opt build now applies PGO to the built-in modules.
Hsuan Yen.
- bpo-27641: The configure script now inserts comments into the makefile to
- prevent the pgen and _freeze_importlib executables from being cross-
- compiled.
+ prevent the pgen and _freeze_importlib executables from being
+ cross-compiled.
- bpo-26662: Set PYTHON_FOR_GEN in configure as the Python program to be
used for file generation during the build.
- bpo-17214: The "urllib.request" module now percent-encodes non-ASCII bytes
found in redirect target URLs. Some servers send Location header fields
- with non- ASCII bytes, but "http.client" requires the request target to be
- ASCII- encodable, otherwise a UnicodeEncodeError is raised. Based on
- patch by Christian Heimes.
+ with non-ASCII bytes, but "http.client" requires the request target to be
+ ASCII-encodable, otherwise a UnicodeEncodeError is raised. Based on patch
+ by Christian Heimes.
- bpo-26892: Honor debuglevel flag in urllib.request.HTTPHandler. Patch
contributed by Chi Hsuan Yen.
Xiang Zhang.
- bpo-26804: urllib.request will prefer lower_case proxy environment
- variables over UPPER_CASE or Mixed_Case ones. Patch contributed by Hans-
- Peter Jansen.
+ variables over UPPER_CASE or Mixed_Case ones. Patch contributed by
+ Hans-Peter Jansen.
- bpo-26837: assertSequenceEqual() now correctly outputs non-stringified
differing items (like bytes in the -b mode). This affects
- bpo-25911: Restored support of bytes paths in os.walk() on Windows.
-- bpo-26045: Add UTF-8 suggestion to error message when posting a non-
- Latin-1 string with http.client.
+- bpo-26045: Add UTF-8 suggestion to error message when posting a
+ non-Latin-1 string with http.client.
- bpo-12923: Reset FancyURLopener's redirect counter even if there is an
exception. Based on patches by Brian Brazil and Daniel Rocco.
and "keywords" attributes of functools.partial have now always types tuple
and dict correspondingly.
-- bpo-26202: copy.deepcopy() now correctly copies range() objects with non-
- atomic attributes.
+- bpo-26202: copy.deepcopy() now correctly copies range() objects with
+ non-atomic attributes.
- bpo-23076: Path.glob() now raises a ValueError if it's called with an
invalid pattern. Patch by Thomas Nyberg.
de Gaye.
- bpo-22359: Disable the rules for running _freeze_importlib and pgen when
- cross- compiling. The output of these programs is normally saved with the
+ cross-compiling. The output of these programs is normally saved with the
source code anyway, and is still regenerated when doing a native build.
Patch by Xavier de Gaye.
-----------
- bpo-26799: Fix python-gdb.py: don't get C types once when the Python code
- is loaded, but get C types on demand. The C types can change if python-
- gdb.py is loaded before the Python executable. Patch written by Thomas
- Ilsche.
+ is loaded, but get C types on demand. The C types can change if
+ python-gdb.py is loaded before the Python executable. Patch written by
+ Thomas Ilsche.
- bpo-26271: Fix the Freeze tool to properly use flags passed through
configure. Patch by Daniel Shaulov.
- bpo-25003: On Solaris 11.3 or newer, os.urandom() now uses the getrandom()
function instead of the getentropy() function. The getentropy() function
is blocking to generate very good quality entropy, os.urandom() doesn't
- need such high- quality entropy.
+ need such high-quality entropy.
- bpo-25182: The stdprinter (used as sys.stderr before the io module is
imported at startup) now uses the backslashreplace error handler.
- bpo-25173: Associate tkinter messageboxes with a specific widget. For Mac
OSX, make them a 'sheet'. Patch by Mark Roseman.
-- bpo-25198: Enhance the initial html viewer now used for Idle Help. *
- Properly indent fixed-pitch text (patch by Mark Roseman). * Give code
- snippet a very Sphinx- like light blueish-gray background. * Re-use
- initial width and height set by users for shell and editor. * When the
- Table of Contents (TOC) menu is used, put the section header at the top of
- the screen.
+- bpo-25198: Enhance the initial html viewer now used for Idle Help.
+ Properly indent fixed-pitch text (patch by Mark Roseman). Give code
+ snippet a very Sphinx-like light blueish-gray background. Re-use initial
+ width and height set by users for shell and editor. When the Table of
+ Contents (TOC) menu is used, put the section header at the top of the
+ screen.
- bpo-25225: Condense and rewrite Idle doc section on text colors.
- bpo-23672: Allow Idle to edit and run files with astral chars in name.
Patch by Mohd Sanad Zaki Rizvi.
-- bpo-24745: Idle editor default font. Switch from Courier to platform-
- sensitive TkFixedFont. This should not affect current customized font
- selections. If there is a problem, edit $HOME/.idlerc/config-main.cfg and
- remove 'fontxxx' entries from [Editor Window]. Patch by Mark Roseman.
+- bpo-24745: Idle editor default font. Switch from Courier to
+ platform-sensitive TkFixedFont. This should not affect current customized
+ font selections. If there is a problem, edit
+ $HOME/.idlerc/config-main.cfg and remove 'fontxxx' entries from [Editor
+ Window]. Patch by Mark Roseman.
- bpo-21192: Idle editor. When a file is run, put its name in the restart
bar. Do not print false prompts. Original patch by Adnan Umer.
-----
- bpo-24751: When running regrtest with the ``-w`` command line option, a
- test run is no longer marked as a failure if all tests succeed when re-
- run.
+ test run is no longer marked as a failure if all tests succeed when
+ re-run.
What's New in Python 3.5.0 beta 4?
inside a data segment.
- bpo-15014: SMTP.auth() and SMTP.login() now support RFC 4954's optional
- initial- response argument to the SMTP AUTH command.
+ initial-response argument to the SMTP AUTH command.
- bpo-24669: Fix inspect.getsource() for 'async def' functions. Patch by Kai
Groner.
called. Based on patch by Martin Panter.
- bpo-20387: Restore semantic round-trip correctness in tokenize/untokenize
- for tab- indented blocks.
+ for tab-indented blocks.
- bpo-24456: Fixed possible buffer over-read in adpcm2lin() and lin2adpcm()
functions of the audioop module.
Tools/Demos
-----------
-- bpo-18128: pygettext now uses standard +NNNN format in the POT-Creation-
- Date header.
+- bpo-18128: pygettext now uses standard +NNNN format in the
+ POT-Creation-Date header.
- bpo-23935: Argument Clinic's understanding of format units accepting
bytes, bytearrays, and buffers is now consistent with both the
- bpo-23681: The -b option now affects comparisons of bytes with int.
-- bpo-23632: Memoryviews now allow tuple indexing (including for multi-
- dimensional memoryviews).
+- bpo-23632: Memoryviews now allow tuple indexing (including for
+ multi-dimensional memoryviews).
- bpo-23192: Fixed generator lambdas. Patch by Bruno Cauet.
-------
- bpo-14260: The groupindex attribute of regular expression pattern object
- now is non- modifiable mapping.
+ now is non-modifiable mapping.
- bpo-23792: Ignore KeyboardInterrupt when the pydoc pager is active. This
mimics the behavior of the standard unix pagers, and prevents pipepager
- bpo-20335: bytes constructor now raises TypeError when encoding or errors
is specified with non-string argument. Based on patch by Renaud Blanch.
-- bpo-22834: If the current working directory ends up being set to a non-
- existent directory then import will no longer raise FileNotFoundError.
+- bpo-22834: If the current working directory ends up being set to a
+ non-existent directory then import will no longer raise FileNotFoundError.
- bpo-22869: Move the interpreter startup & shutdown code to a new dedicated
pylifecycle.c module
state is now always restored or swapped, not only if why is WHY_YIELD or
WHY_RETURN. Patch co-written with Antoine Pitrou.
-- bpo-14099: Restored support of writing ZIP files to tellable but non-
- seekable streams.
+- bpo-14099: Restored support of writing ZIP files to tellable but
+ non-seekable streams.
- bpo-14099: Writing to ZipFile and reading multiple ZipExtFiles is
threadsafe now.
- bpo-18216: gettext now raises an error when a .mo file has an unsupported
major version number. Patch by Aaron Hill.
-- bpo-13918: Provide a locale.delocalize() function which can remove locale-
- specific number formatting from a string representing a number, without
- then converting it to a specific type. Patch by Cédric Krier.
+- bpo-13918: Provide a locale.delocalize() function which can remove
+ locale-specific number formatting from a string representing a number,
+ without then converting it to a specific type. Patch by Cédric Krier.
- bpo-22676: Make the pickling of global objects which don't have a
__module__ attribute less slow.
without executing the test suite. The new `errors` attribute on TestLoader
exposes these non-fatal errors encountered during discovery.
-- bpo-21991: Make email.headerregistry's header 'params' attributes be read-
- only (MappingProxyType). Previously the dictionary was modifiable but a
- new one was created on each access of the attribute.
+- bpo-21991: Make email.headerregistry's header 'params' attributes be
+ read-only (MappingProxyType). Previously the dictionary was modifiable
+ but a new one was created on each access of the attribute.
- bpo-22638: SSLv3 is now disabled throughout the standard library. It can
still be enabled by instantiating a SSLContext manually.
- bpo-5411: Added support for the "xztar" format in the shutil module.
- bpo-21121: Don't force 3rd party C extensions to be built with
- -Werror=declaration- after-statement.
+ ``-Werror=declaration-after-statement``.
- bpo-21975: Fixed crash when using uninitialized sqlite3.Row (in particular
when unpickling pickled sqlite3.Row). sqlite3.Row is now initialized in
- bpo-20378: Improve repr of inspect.Signature and inspect.Parameter.
- bpo-20816: Fix inspect.getcallargs() to raise correct TypeError for
- missing keyword- only arguments. Patch by Jeremiah Lowin.
+ missing keyword-only arguments. Patch by Jeremiah Lowin.
- bpo-20817: Fix inspect.getcallargs() to fail correctly if more than 3
arguments are missing. Patch by Jeremiah Lowin.
wording by David Gutteridge)
- bpo-21117: Fix inspect.signature to better support functools.partial. Due
- to the specifics of functools.partial implementation, positional-or-
- keyword arguments passed as keyword arguments become keyword-only.
+ to the specifics of functools.partial implementation,
+ positional-or-keyword arguments passed as keyword arguments become
+ keyword-only.
- bpo-20334: inspect.Signature and inspect.Parameter are now hashable.
Thanks to Antony Lee for bug reports and suggestions.
- bpo-21284: Paragraph reformat test passes after user changes reformat
width.
-- bpo-17654: Ensure IDLE menus are customized properly on OS X for non-
- framework builds and for all variants of Tk.
+- bpo-17654: Ensure IDLE menus are customized properly on OS X for
+ non-framework builds and for all variants of Tk.
- bpo-23180: Rename IDLE "Windows" menu item to "Window". Patch by Al
Sweigart.
- bpo-22592: Drop support of the Borland C compiler to build Python. The
distutils module still supports it to build extensions.
-- bpo-22591: Drop support of MS-DOS, especially of the DJGPP compiler (MS-
- DOS port of GCC).
+- bpo-22591: Drop support of MS-DOS, especially of the DJGPP compiler
+ (MS-DOS port of GCC).
- bpo-16537: Check whether self.extensions is empty in setup.py. Patch by
Jonathan Hosmer.
- bpo-21811: Anticipated fixes to support OS X versions > 10.9.
- bpo-21166: Prevent possible segfaults and other random failures of python
- --generate- posix-vars in pybuilddir.txt build target.
+ ``--generate-posix-vars`` in pybuilddir.txt build target.
- bpo-18096: Fix library order returned by python-config.
set $_i = 0
while $_i < f->f_code->co_nlocals
if f->f_localsplus + $_i != 0
- set $_names = co->co_varnames
- set $_name = _PyUnicode_AsString(PyTuple_GetItem($_names, $_i))
+ set $_names = f->f_code->co_varnames
+ set $_name = PyUnicode_AsUTF8(PyTuple_GetItem($_names, $_i))
printf "%s:\n", $_name
pyo f->f_localsplus[$_i]
end
end
define pyframe
- set $__fn = _PyUnicode_AsString(co->co_filename)
- set $__n = _PyUnicode_AsString(co->co_name)
+ set $__fn = PyUnicode_AsUTF8(f->f_code->co_filename)
+ set $__n = PyUnicode_AsUTF8(f->f_code->co_name)
printf "%s (", $__fn
lineno
printf "): %s\n", $__n
#end
define printframe
- if $pc > PyEval_EvalFrameEx && $pc < PyEval_EvalCodeEx
+ if $pc > PyEval_EvalFrameEx && $pc < _PyEval_EvalFrameDefault
pyframe
else
frame
# print the entire Python call stack
define pystack
while $pc < Py_Main || $pc > Py_GetArgcArgv
- if $pc > PyEval_EvalFrameEx && $pc < PyEval_EvalCodeEx
+ if $pc > PyEval_EvalFrameEx && $pc < _PyEval_EvalFrameDefault
pyframe
end
up-silently 1
# print the entire Python call stack - verbose mode
define pystackv
while $pc < Py_Main || $pc > Py_GetArgcArgv
- if $pc > PyEval_EvalFrameEx && $pc < PyEval_EvalCodeEx
+ if $pc > PyEval_EvalFrameEx && $pc < _PyEval_EvalFrameDefault
pyframev
end
up-silently 1
.br
Downloads: https://www.python.org/downloads/
.br
-Module repository: https://pypi.python.org/
+Module repository: https://pypi.org/
.br
Newsgroups: comp.lang.python, comp.lang.python.announce
.SH LICENSING
}
if (res == 1) {
/* `result` is a generator */
- PyObject *ret;
- ret = task_set_error_soon(
+ o = task_set_error_soon(
task, PyExc_RuntimeError,
"yield was used instead of yield from for "
- "generator in task %R with %S", task, result);
+ "generator in task %R with %R", task, result);
Py_DECREF(result);
- return ret;
+ return o;
}
/* The `result` is none of the above */
- Py_DECREF(result);
- return task_set_error_soon(
+ o = task_set_error_soon(
task, PyExc_RuntimeError, "Task got bad yield: %R", result);
+ Py_DECREF(result);
+ return o;
self_await:
o = task_set_error_soon(
return new_deque;
}
-static void
+static int
deque_clear(dequeobject *deque)
{
block *b;
PyObject **itemptr, **limit;
if (Py_SIZE(deque) == 0)
- return;
+ return 0;
/* During the process of clearing a deque, decrefs can cause the
deque to mutate. To avoid fatal confusion, we have to make the
}
CHECK_END(leftblock->rightlink);
freeblock(leftblock);
- return;
+ return 0;
alternate_method:
while (Py_SIZE(deque)) {
assert (item != NULL);
Py_DECREF(item);
}
+ return 0;
}
static PyObject *
StgDictObject *stgdict;
StgDictObject *itemdict;
PyObject *length_attr, *type_attr;
- long length;
- int overflow;
+ Py_ssize_t length;
Py_ssize_t itemsize, itemalign;
/* create the new instance (which is a class,
Py_XDECREF(length_attr);
goto error;
}
- length = PyLong_AsLongAndOverflow(length_attr, &overflow);
- if (overflow) {
- PyErr_SetString(PyExc_OverflowError,
- "The '_length_' attribute is too large");
- Py_DECREF(length_attr);
+ length = PyLong_AsSsize_t(length_attr);
+ Py_DECREF(length_attr);
+ if (length == -1 && PyErr_Occurred()) {
+ if (PyErr_ExceptionMatches(PyExc_OverflowError)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "The '_length_' attribute is too large");
+ }
goto error;
}
- Py_DECREF(length_attr);
type_attr = PyObject_GetAttrString((PyObject *)result, "_type_");
if (!type_attr) {
if (PyCFuncPtrTypeObject_Check(arg))
return 1;
dict = PyType_stgdict(arg);
- if (dict) {
+ if (dict != NULL && dict->proto != NULL) {
if (PyUnicode_Check(dict->proto)
&& (strchr("sPzUZXO", PyUnicode_AsUTF8(dict->proto)[0]))) {
/* simple pointer types, c_void_p, c_wchar_p, BSTR, ... */
return NULL;
if (!HASTZINFO(self) || self->tzinfo == Py_None) {
+ naive:
self_tzinfo = local_timezone_from_local(self);
if (self_tzinfo == NULL)
return NULL;
Py_DECREF(self_tzinfo);
if (offset == NULL)
return NULL;
+ else if(offset == Py_None) {
+ Py_DECREF(offset);
+ goto naive;
+ }
+ else if (!PyDelta_Check(offset)) {
+ Py_DECREF(offset);
+ PyErr_Format(PyExc_TypeError, "utcoffset() returned %.200s,"
+ " expected timedelta or None", Py_TYPE(offset)->tp_name);
+ return NULL;
+ }
/* result = self - offset */
result = (PyDateTime_DateTime *)add_datetime_timedelta(self,
(PyDateTime_Delta *)offset, -1);
_dbm.dbm.get
key: str(accept={str, robuffer}, zeroes=True)
- default: object(c_default="NULL") = b''
+ default: object = None
/
Return the value for key if present, otherwise default.
static PyObject *
_dbm_dbm_get_impl(dbmobject *self, const char *key,
Py_ssize_clean_t key_length, PyObject *default_value)
-/*[clinic end generated code: output=b44f95eba8203d93 input=a3a279957f85eb6d]*/
+/*[clinic end generated code: output=b44f95eba8203d93 input=b788eba0ffad2e91]*/
/*[clinic end generated code: output=4f5c0e523eaf1251 input=9402c0af8582dc69]*/
{
datum dbm_key, val;
}
if (save(self, obj, 0) < 0 ||
- _Pickler_Write(self, &stop_op, 1) < 0)
+ _Pickler_Write(self, &stop_op, 1) < 0 ||
+ _Pickler_CommitFrame(self) < 0)
return -1;
-
+ self->framing = 0;
return 0;
}
either 0, 1 or 2, it is possible that it is overwritten (#12607). */
if (c2pwrite == 0)
POSIX_CALL(c2pwrite = dup(c2pwrite));
- if (errwrite == 0 || errwrite == 1)
+ while (errwrite == 0 || errwrite == 1)
POSIX_CALL(errwrite = dup(errwrite));
/* Dup fds for child.
if (self->check_same_thread) {
if (PyThread_get_thread_ident() != self->thread_ident) {
PyErr_Format(pysqlite_ProgrammingError,
- "SQLite objects created in a thread can only be used in that same thread."
- "The object was created in thread id %ld and this is thread id %ld",
+ "SQLite objects created in a thread can only be used in that same thread. "
+ "The object was created in thread id %ld and this is thread id %ld.",
self->thread_ident, PyThread_get_thread_ident());
return 0;
}
#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER)
# define OPENSSL_VERSION_1_1 1
+# define PY_OPENSSL_1_1_API 1
+#endif
+
+/* LibreSSL 2.7.0 provides necessary OpenSSL 1.1.0 APIs */
+#if defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER >= 0x2070000fL
+# define PY_OPENSSL_1_1_API 1
#endif
/* Openssl comes with TLSv1.1 and TLSv1.2 between 1.0.0h and 1.0.1
#define INVALID_SOCKET (-1)
#endif
-#ifdef OPENSSL_VERSION_1_1
-/* OpenSSL 1.1.0+ */
-#ifndef OPENSSL_NO_SSL2
-#define OPENSSL_NO_SSL2
-#endif
-#else /* OpenSSL < 1.1.0 */
-#if defined(WITH_THREAD)
+/* OpenSSL 1.0.2 and LibreSSL needs extra code for locking */
+#if !defined(OPENSSL_VERSION_1_1) && defined(WITH_THREAD)
#define HAVE_OPENSSL_CRYPTO_LOCK
#endif
+#if defined(OPENSSL_VERSION_1_1) && !defined(OPENSSL_NO_SSL2)
+#define OPENSSL_NO_SSL2
+#endif
+
+#ifndef PY_OPENSSL_1_1_API
+/* OpenSSL 1.1 API shims for OpenSSL < 1.1.0 and LibreSSL < 2.7.0 */
+
#define TLS_method SSLv23_method
#define TLS_client_method SSLv23_client_method
#define TLS_server_method SSLv23_server_method
return s->tlsext_tick_lifetime_hint;
}
-#endif /* OpenSSL < 1.1.0 or LibreSSL */
+#endif /* OpenSSL < 1.1.0 or LibreSSL < 2.7.0 */
enum py_ssl_error {
PyObject *x_attr; /* Attributes dictionary */
} ExampleObject;
+typedef struct {
+ PyObject *integer;
+} testmultiphase_state;
+
/* Example methods */
static int
return 0;
}
-static int
+static void
Example_finalize(ExampleObject *self)
{
Py_CLEAR(self->x_attr);
- return 0;
}
static PyObject *
}
/* Helper for module definitions; there'll be a lot of them */
-#define TEST_MODULE_DEF(name, slots, methods) { \
+
+#define TEST_MODULE_DEF_EX(name, slots, methods, statesize, traversefunc) { \
PyModuleDef_HEAD_INIT, /* m_base */ \
name, /* m_name */ \
PyDoc_STR("Test module " name), /* m_doc */ \
- 0, /* m_size */ \
+ statesize, /* m_size */ \
methods, /* m_methods */ \
slots, /* m_slots */ \
- NULL, /* m_traverse */ \
+ traversefunc, /* m_traverse */ \
NULL, /* m_clear */ \
NULL, /* m_free */ \
}
+#define TEST_MODULE_DEF(name, slots, methods) TEST_MODULE_DEF_EX(name, slots, methods, 0, NULL)
+
PyModuleDef_Slot main_slots[] = {
{Py_mod_exec, execfunc},
{0, NULL},
return PyModuleDef_Init(&def_exec_unreported_exception);
}
+static int
+bad_traverse(PyObject *self, visitproc visit, void *arg) {
+ testmultiphase_state *m_state;
+
+ m_state = PyModule_GetState(self);
+ Py_VISIT(m_state->integer);
+ return 0;
+}
+
+static int
+execfunc_with_bad_traverse(PyObject *mod) {
+ testmultiphase_state *m_state;
+
+ m_state = PyModule_GetState(mod);
+ if (m_state == NULL) {
+ return -1;
+ }
+
+ m_state->integer = PyLong_FromLong(0x7fffffff);
+ Py_INCREF(m_state->integer);
+
+ return 0;
+}
+
+static PyModuleDef_Slot slots_with_bad_traverse[] = {
+ {Py_mod_exec, execfunc_with_bad_traverse},
+ {0, NULL}
+};
+
+static PyModuleDef def_with_bad_traverse = TEST_MODULE_DEF_EX(
+ "_testmultiphase_with_bad_traverse", slots_with_bad_traverse, NULL,
+ sizeof(testmultiphase_state), bad_traverse);
+
+PyMODINIT_FUNC
+PyInit__testmultiphase_with_bad_traverse(PyObject *spec) {
+ return PyModuleDef_Init(&def_with_bad_traverse);
+}
+
/*** Helper for imp test ***/
static PyModuleDef imp_dummy_def = TEST_MODULE_DEF("imp_dummy", main_slots, testexport_methods);
{
return PyModuleDef_Init(&imp_dummy_def);
}
+
static int
fbound(double val, double minval, double maxval)
{
- if (val > maxval)
+ if (val > maxval) {
val = maxval;
- else if (val < minval + 1)
+ }
+ else if (val < minval + 1.0) {
val = minval;
+ }
+
+ /* Round towards minus infinity (-inf) */
+ val = floor(val);
+
+ /* Cast double to integer: round towards zero */
return (int)val;
}
for (i = 0; i < fragment->len; i += width) {
double val = GETRAWSAMPLE(width, fragment->buf, i);
- val *= factor;
- val = floor(fbound(val, minval, maxval));
- SETRAWSAMPLE(width, ncp, i, (int)val);
+ int ival = fbound(val * factor, minval, maxval);
+ SETRAWSAMPLE(width, ncp, i, ival);
}
return rv;
}
for (i = 0; i < len; i += width*2) {
double val1 = GETRAWSAMPLE(width, cp, i);
double val2 = GETRAWSAMPLE(width, cp, i + width);
- double val = val1*lfactor + val2*rfactor;
- val = floor(fbound(val, minval, maxval));
- SETRAWSAMPLE(width, ncp, i/2, val);
+ double val = val1 * lfactor + val2 * rfactor;
+ int ival = fbound(val, minval, maxval);
+ SETRAWSAMPLE(width, ncp, i/2, ival);
}
return rv;
}
for (i = 0; i < fragment->len; i += width) {
double val = GETRAWSAMPLE(width, fragment->buf, i);
- int val1 = (int)floor(fbound(val*lfactor, minval, maxval));
- int val2 = (int)floor(fbound(val*rfactor, minval, maxval));
+ int val1 = fbound(val * lfactor, minval, maxval);
+ int val2 = fbound(val * rfactor, minval, maxval);
SETRAWSAMPLE(width, ncp, i*2, val1);
SETRAWSAMPLE(width, ncp, i*2 + width, val2);
}
else {
double fval = (double)val1 + (double)val2;
/* truncate in case of overflow */
- newval = (int)floor(fbound(fval, minval, maxval));
+ newval = fbound(fval, minval, maxval);
}
SETRAWSAMPLE(width, ncp, i, newval);
}
PyDoc_STRVAR(_dbm_dbm_get__doc__,
-"get($self, key, default=b\'\', /)\n"
+"get($self, key, default=None, /)\n"
"--\n"
"\n"
"Return the value for key if present, otherwise default.");
PyObject *return_value = NULL;
const char *key;
Py_ssize_clean_t key_length;
- PyObject *default_value = NULL;
+ PyObject *default_value = Py_None;
if (!PyArg_ParseTuple(args, "s#|O:get",
&key, &key_length, &default_value)) {
exit:
return return_value;
}
-/*[clinic end generated code: output=001fabffcecb99f1 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=919cc4337be4a5d3 input=a9049054013a1b77]*/
* garbage list (a Python list), else only the objects in finalizers with
* __del__ methods are appended to garbage. All objects in finalizers are
* merged into the old list regardless.
- * Returns 0 if all OK, <0 on error (out of memory to grow the garbage list).
- * The finalizers list is made empty on a successful return.
*/
-static int
+static void
handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
{
PyGC_Head *gc = finalizers->gc.gc_next;
if ((debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
if (PyList_Append(garbage, op) < 0)
- return -1;
+ break;
}
}
gc_list_merge(finalizers, old);
- return 0;
}
/* Run first-time finalizers (if any) on all the objects in collectable.
* reachable list of garbage. The programmer has to deal with
* this if they insist on creating this type of structure.
*/
- (void)handle_legacy_finalizers(&finalizers, old);
+ handle_legacy_finalizers(&finalizers, old);
/* Clear free list only during the collection of the highest
* generation */
PyObject *r, *cb = PyList_GET_ITEM(callbacks, i);
Py_INCREF(cb); /* make sure cb doesn't go away */
r = PyObject_CallFunction(cb, "sO", phase, info);
- Py_XDECREF(r);
- if (r == NULL)
+ if (r == NULL) {
PyErr_WriteUnraisable(cb);
+ }
+ else {
+ Py_DECREF(r);
+ }
Py_DECREF(cb);
}
Py_XDECREF(info);
if (collecting)
n = 0; /* already collecting, don't do anything */
else {
+ PyObject *exc, *value, *tb;
collecting = 1;
+ PyErr_Fetch(&exc, &value, &tb);
n = collect_with_callback(NUM_GENERATIONS - 1);
+ PyErr_Restore(exc, value, tb);
collecting = 0;
}
{
const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
PyGC_Head *g = AS_GC(op);
+ assert(!IS_TRACKED(op));
if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
return (PyVarObject *)PyErr_NoMemory();
g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
}
}
-static PyObject *
-mmap_concat(mmap_object *self, PyObject *bb)
-{
- CHECK_VALID(NULL);
- PyErr_SetString(PyExc_SystemError,
- "mmaps don't support concatenation");
- return NULL;
-}
-
-static PyObject *
-mmap_repeat(mmap_object *self, Py_ssize_t n)
-{
- CHECK_VALID(NULL);
- PyErr_SetString(PyExc_SystemError,
- "mmaps don't support repeat operation");
- return NULL;
-}
-
static int
mmap_ass_item(mmap_object *self, Py_ssize_t i, PyObject *v)
{
static PySequenceMethods mmap_as_sequence = {
(lenfunc)mmap_length, /*sq_length*/
- (binaryfunc)mmap_concat, /*sq_concat*/
- (ssizeargfunc)mmap_repeat, /*sq_repeat*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
(ssizeargfunc)mmap_item, /*sq_item*/
0, /*sq_slice*/
(ssizeobjargproc)mmap_ass_item, /*sq_ass_item*/
new_mmap_object(PyTypeObject *type, PyObject *args, PyObject *kwdict)
{
struct _Py_stat_struct status;
+ int fstat_result = -1;
mmap_object *m_obj;
Py_ssize_t map_size;
off_t offset = 0;
return NULL;
if (map_size < 0) {
PyErr_SetString(PyExc_OverflowError,
- "memory mapped length must be postiive");
+ "memory mapped length must be positive");
return NULL;
}
if (offset < 0) {
if (fd != -1)
(void)fcntl(fd, F_FULLFSYNC);
#endif
- if (fd != -1 && _Py_fstat_noraise(fd, &status) == 0
- && S_ISREG(status.st_mode)) {
+
+ if (fd != -1) {
+ Py_BEGIN_ALLOW_THREADS
+ fstat_result = _Py_fstat_noraise(fd, &status);
+ Py_END_ALLOW_THREADS
+ }
+
+ if (fd != -1 && fstat_result == 0 && S_ISREG(status.st_mode)) {
if (map_size == 0) {
if (status.st_size == 0) {
PyErr_SetString(PyExc_ValueError,
if (map_size < 0) {
PyErr_SetString(PyExc_OverflowError,
- "memory mapped length must be postiive");
+ "memory mapped length must be positive");
return NULL;
}
if (offset < 0) {
goto error;
(void) addelem(result, 1, w);
- if (lineno == 1) {
+ if (lineno) {
w = PyLong_FromLong(n->n_lineno);
if (w == NULL)
goto error;
(void) addelem(result, 2, w);
}
- if (col_offset == 1) {
+ if (col_offset) {
w = PyLong_FromLong(n->n_col_offset);
if (w == NULL)
goto error;
- (void) addelem(result, 3, w);
+ (void) addelem(result, 2 + lineno, w);
}
}
else {
static PyObject *IgnoreHandler;
static PyObject *IntHandler;
-/* On Solaris 8, gcc will produce a warning that the function
- declaration is not a prototype. This is caused by the definition of
- SIG_DFL as (void (*)())0; the correct declaration would have been
- (void (*)(int))0. */
-
-static PyOS_sighandler_t old_siginthandler = SIG_DFL;
-
#ifdef MS_WINDOWS
static HANDLE sigint_event = NULL;
#endif
int result = -1;
PyObject *iterator, *item;
long signum;
- int err;
sigemptyset(mask);
Py_DECREF(item);
if (signum == -1 && PyErr_Occurred())
goto error;
- if (0 < signum && signum < NSIG)
- err = sigaddset(mask, (int)signum);
- else
- err = 1;
- if (err) {
+ if (0 < signum && signum < NSIG) {
+ /* bpo-33329: ignore sigaddset() return value as it can fail
+ * for some reserved signals, but we want the `range(1, NSIG)`
+ * idiom to allow selecting all valid signals.
+ */
+ (void) sigaddset(mask, (int)signum);
+ }
+ else {
PyErr_Format(PyExc_ValueError,
"signal number %ld out of range", signum);
goto error;
/* Install default int handler */
Py_INCREF(IntHandler);
Py_SETREF(Handlers[SIGINT].func, IntHandler);
- old_siginthandler = PyOS_setsig(SIGINT, signal_handler);
+ PyOS_setsig(SIGINT, signal_handler);
}
#ifdef SIGHUP
int i;
PyObject *func;
- PyOS_setsig(SIGINT, old_siginthandler);
- old_siginthandler = SIG_DFL;
-
for (i = 1; i < NSIG; i++) {
func = Handlers[i].func;
_Py_atomic_store_relaxed(&Handlers[i].tripped, 0);
Handlers[i].func = NULL;
- if (i != SIGINT && func != NULL && func != Py_None &&
+ if (func != NULL && func != Py_None &&
func != DefaultHandler && func != IgnoreHandler)
PyOS_setsig(i, SIG_DFL);
Py_XDECREF(func);
# include <fcntl.h>
# endif
-#if defined(_MSC_VER) && _MSC_VER >= 1800
/* Provides the IsWindows7SP1OrGreater() function */
#include <VersionHelpers.h>
-#endif
/* remove some flags on older version Windows during run-time.
https://msdn.microsoft.com/en-us/library/windows/desktop/ms738596.aspx */
#ifdef MS_WINDOWS
if (support_wsa_no_inherit == -1) {
-#if defined(_MSC_VER) && _MSC_VER >= 1800
support_wsa_no_inherit = IsWindows7SP1OrGreater();
-#else
- DWORD version = GetVersion();
- DWORD major = (DWORD)LOBYTE(LOWORD(version));
- DWORD minor = (DWORD)HIBYTE(LOWORD(version));
- /* need Windows 7 SP1, 2008 R2 SP1 or later */
- support_wsa_no_inherit = major > 6 || (major == 6 && minor >= 1);
-#endif
}
#endif
return 0;
}
-static int
+static void
Xxo_finalize(XxoObject *self)
{
Py_CLEAR(self->x_attr);
- return 0;
}
static PyObject *
int n = 1000;
time_t t0, t1;
- if (!PyArg_ParseTuple(args, "OS|i", &obj, &name, &n))
+ if (!PyArg_ParseTuple(args, "OU|i", &obj, &name, &n))
return NULL;
t0 = clock();
while (--n >= 0) {
split_copy->ma_values = newvalues;
split_copy->ma_keys = mp->ma_keys;
split_copy->ma_used = mp->ma_used;
+ split_copy->ma_version_tag = DICT_NEXT_VERSION();
DK_INCREF(mp->ma_keys);
for (i = 0, n = size; i < n; i++) {
PyObject *value = mp->ma_values[i];
-
/* Float object implementation */
/* XXX There should be overflow checks here, but it's hard to check
"is representable"},
{"max_10_exp", "DBL_MAX_10_EXP -- maximum int e such that 10**e "
"is representable"},
- {"min", "DBL_MIN -- Minimum positive normalizer float"},
+ {"min", "DBL_MIN -- Minimum positive normalized float"},
{"min_exp", "DBL_MIN_EXP -- minimum int e such that radix**(e-1) "
"is a normalized float"},
{"min_10_exp", "DBL_MIN_10_EXP -- minimum int e such that 10**e is "
{"epsilon", "DBL_EPSILON -- Difference between 1 and the next "
"representable float"},
{"radix", "FLT_RADIX -- radix of exponent"},
- {"rounds", "FLT_ROUNDS -- addition rounds"},
+ {"rounds", "FLT_ROUNDS -- rounding mode"},
{0}
};
return NULL;
check_error:
- if (PyErr_ExceptionMatches(PyExc_StopAsyncIteration)) {
+ if (PyErr_ExceptionMatches(PyExc_StopAsyncIteration) ||
+ PyErr_ExceptionMatches(PyExc_GeneratorExit))
+ {
o->agt_state = AWAITABLE_STATE_CLOSED;
if (o->agt_args == NULL) {
/* when aclose() is called we don't want to propagate
- StopAsyncIteration; just raise StopIteration, signalling
- that 'aclose()' is done. */
+ StopAsyncIteration or GeneratorExit; just raise
+ StopIteration, signalling that this 'aclose()' await
+ is done.
+ */
PyErr_Clear();
PyErr_SetNone(PyExc_StopIteration);
}
}
- else if (PyErr_ExceptionMatches(PyExc_GeneratorExit)) {
- o->agt_state = AWAITABLE_STATE_CLOSED;
- PyErr_Clear(); /* ignore these errors */
- PyErr_SetNone(PyExc_StopIteration);
- }
return NULL;
}
{0}
};
+
+/* Helper for sanity check for traverse not handling m_state == NULL
+ * Issue #32374 */
+#ifdef Py_DEBUG
+static int
+bad_traverse_test(PyObject *self, void *arg) {
+ assert(self != NULL);
+ return 0;
+}
+#endif
+
PyTypeObject PyModuleDef_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"moduledef", /* tp_name */
}
}
+ /* Sanity check for traverse not handling m_state == NULL
+ * This doesn't catch all possible cases, but in many cases it should
+ * make many cases of invalid code crash or raise Valgrind issues
+ * sooner than they would otherwise.
+ * Issue #32374 */
+#ifdef Py_DEBUG
+ if (def->m_traverse != NULL) {
+ def->m_traverse(m, bad_traverse_test, NULL);
+ }
+#endif
Py_DECREF(nameobj);
return m;
if (otherset == NULL)
return NULL;
rv = set_symmetric_difference_update(otherset, (PyObject *)so);
- if (rv == NULL)
+ if (rv == NULL) {
+ Py_DECREF(otherset);
return NULL;
+ }
Py_DECREF(rv);
return (PyObject *)otherset;
}
SQSLOT("__add__", sq_concat, NULL, wrap_binaryfunc,
"__add__($self, value, /)\n--\n\nReturn self+value."),
SQSLOT("__mul__", sq_repeat, NULL, wrap_indexargfunc,
- "__mul__($self, value, /)\n--\n\nReturn self*value.n"),
+ "__mul__($self, value, /)\n--\n\nReturn self*value."),
SQSLOT("__rmul__", sq_repeat, NULL, wrap_indexargfunc,
- "__rmul__($self, value, /)\n--\n\nReturn self*value."),
+ "__rmul__($self, value, /)\n--\n\nReturn value*self."),
SQSLOT("__getitem__", sq_item, slot_sq_item, wrap_sq_item,
"__getitem__($self, key, /)\n--\n\nReturn self[key]."),
SQSLOT("__setitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_setitem,
@exit /b 1\r
\r
:found\r
-@echo Using %MSBUILD% (found in the %_Py_MSBuild_Source%)\r
+@pushd %MSBUILD% >nul 2>nul\r
+@if not ERRORLEVEL 1 @(\r
+ @if exist msbuild.exe @(set MSBUILD="%CD%\msbuild.exe") else @(set MSBUILD=)\r
+ @popd\r
+)\r
+\r
+@if defined MSBUILD @echo Using %MSBUILD% (found in the %_Py_MSBuild_Source%)\r
+@if not defined MSBUILD @echo Failed to find MSBuild\r
@set _Py_MSBuild_Source=\r
+@if not defined MSBUILD @exit /b 1\r
+@exit /b 0\r
\r
set libraries=\r
set libraries=%libraries% bzip2-1.0.6\r
-if NOT "%IncludeSSL%"=="false" set libraries=%libraries% openssl-1.0.2k\r
+if NOT "%IncludeSSL%"=="false" set libraries=%libraries% openssl-1.0.2o\r
set libraries=%libraries% sqlite-3.21.0.0\r
if NOT "%IncludeTkinter%"=="false" set libraries=%libraries% tcl-core-8.6.6.0\r
if NOT "%IncludeTkinter%"=="false" set libraries=%libraries% tk-8.6.6.0\r
<sqlite3Dir>$(ExternalsDir)sqlite-3.21.0.0\</sqlite3Dir>\r
<bz2Dir>$(ExternalsDir)bzip2-1.0.6\</bz2Dir>\r
<lzmaDir>$(ExternalsDir)xz-5.2.2\</lzmaDir>\r
- <opensslDir>$(ExternalsDir)openssl-1.0.2k\</opensslDir>\r
+ <opensslDir>$(ExternalsDir)openssl-1.0.2o\</opensslDir>\r
<opensslIncludeDir>$(opensslDir)include32</opensslIncludeDir>\r
<opensslIncludeDir Condition="'$(ArchName)' == 'amd64'">$(opensslDir)include64</opensslIncludeDir>\r
<nasmDir>$(ExternalsDir)\nasm-2.11.06\</nasmDir>\r
<ImportGroup Label="ExtensionTargets">\r
</ImportGroup>\r
\r
- <Target Name="BuildPython3_dDef" BeforeTargets="BuildStubDef" Inputs="..\PC\python3.def" Outputs="$(IntDir)python3_d.def" Condition="$(Configuration) == 'Debug'">\r
+ <Target Name="BuildPython3_dDef" BeforeTargets="BuildStubDef" Condition="$(Configuration) == 'Debug'">\r
<ItemGroup>\r
<_DefLines Remove="@(_DefLines)" />\r
<_Lines Remove="@(_Lines)" />\r
+ <_OriginalLines Remove="@(_OriginalLines)" />\r
</ItemGroup>\r
<ReadLinesFromFile File="..\PC\python3.def">\r
<Output TaskParameter="Lines" ItemName="_DefLines" />\r
</ReadLinesFromFile>\r
+ <ReadLinesFromFile File="$(IntDir)python3_d.def" Condition="Exists('$(IntDir)python3_d.def')">\r
+ <Output TaskParameter="Lines" ItemName="_OriginalLines" />\r
+ </ReadLinesFromFile>\r
<PropertyGroup>\r
<_Pattern1>(=python$(MajorVersionNumber)$(MinorVersionNumber))\.</_Pattern1>\r
<_Sub1>$1_d.</_Sub1>\r
</_Lines>\r
</ItemGroup>\r
<MakeDir Directories="$(IntDir)" />\r
- <WriteLinesToFile File="$(IntDir)python3_d.def" Lines="@(_Lines->'%(New)')" Overwrite="true" />\r
+ <Message Text="Updating python3_d.def" Condition="@(_Lines->'%(New)') != @(_OriginalLines)" Importance="high" />\r
+ <WriteLinesToFile File="$(IntDir)python3_d.def" Lines="@(_Lines->'%(New)')" Overwrite="true"\r
+ Condition="@(_Lines->'%(New)') != @(_OriginalLines)" />\r
</Target>\r
\r
- <Target Name="BuildStubDef" BeforeTargets="PreLinkEvent" Inputs="..\PC\python3.def" Outputs="$(IntDir)python3stub.def">\r
+ <Target Name="BuildStubDef" BeforeTargets="PreLinkEvent">\r
<ItemGroup>\r
<_DefLines Remove="@(_DefLines)" />\r
<_Lines Remove="@(_Lines)" />\r
+ <_OriginalLines Remove="@(_OriginalLines)" />\r
</ItemGroup>\r
<ReadLinesFromFile File="..\PC\python3.def">\r
<Output TaskParameter="Lines" ItemName="_DefLines" />\r
</ReadLinesFromFile>\r
+ <ReadLinesFromFile File="$(IntDir)python3stub.def" Condition="Exists('$(IntDir)python3stub.def')">\r
+ <Output TaskParameter="Lines" ItemName="_OriginalLines" />\r
+ </ReadLinesFromFile>\r
<PropertyGroup>\r
<_Pattern>^[\w.]+=.+?\.([^ ]+).*$</_Pattern>\r
<_Sub>$1</_Sub>\r
<_Lines Include="@(_Symbols->'%(Symbol)')" />\r
</ItemGroup>\r
<MakeDir Directories="$(IntDir)" />\r
- <WriteLinesToFile File="$(IntDir)python3stub.def" Lines="@(_Lines)" Overwrite="true" />\r
+ <Message Text="Updating python3stub.def" Condition="@(_Lines) != @(_OriginalLines)" Importance="high" />\r
+ <WriteLinesToFile File="$(IntDir)python3stub.def" Lines="@(_Lines)" Overwrite="true"\r
+ Condition="@(_Lines) != @(_OriginalLines)" />\r
</Target>\r
</Project>
\ No newline at end of file
Homepage:\r
http://tukaani.org/xz/\r
_ssl\r
- Python wrapper for version 1.0.2k of the OpenSSL secure sockets\r
+ Python wrapper for version 1.0.2o of the OpenSSL secure sockets\r
library, which is built by ssl.vcxproj\r
Homepage:\r
http://www.openssl.org/\r
rem after deleting all the .pyc files reachable from Lib/.\r
rem -q runs the tests just once, and without deleting .pyc files.\r
rem -x64 Run the 64-bit build of python (or python_d if -d was specified)\r
-rem from the 'amd64' dir instead of the 32-bit build in this dir.\r
+rem When omitted, uses %PREFIX% if set or the 32-bit build\r
rem All leading instances of these switches are shifted off, and\r
rem whatever remains (up to 9 arguments) is passed to regrtest.py.\r
rem For example,\r
setlocal\r
\r
set pcbuild=%~dp0\r
-set prefix=%pcbuild%win32\\r
set suffix=\r
set qmode=\r
set dashO=\r
set regrtestargs=\r
+set exe=\r
\r
:CheckOpts\r
if "%1"=="-O" (set dashO=-O) & shift & goto CheckOpts\r
if "%1"=="-q" (set qmode=yes) & shift & goto CheckOpts\r
if "%1"=="-d" (set suffix=_d) & shift & goto CheckOpts\r
-if "%1"=="-x64" (set prefix=%pcbuild%amd64\) & shift & goto CheckOpts\r
+if "%1"=="-x64" (set prefix=%pcbuild%amd64) & shift & goto CheckOpts\r
if NOT "%1"=="" (set regrtestargs=%regrtestargs% %1) & shift & goto CheckOpts\r
\r
-set exe=%prefix%python%suffix%.exe\r
-set cmd="%exe%" %dashO% -Wd -E -bb -m test %regrtestargs%\r
+if not defined prefix set prefix=%pcbuild%win32\r
+set exe=%prefix%\python%suffix%.exe\r
+set cmd="%exe%" %dashO% -u -Wd -E -bb -m test %regrtestargs%\r
if defined qmode goto Qmode\r
\r
echo Deleting .pyc files ...\r
"%exe%" "%pcbuild%rmpyc.py"\r
\r
echo Cleaning _pth files ...\r
-if exist %prefix%*._pth del %prefix%*._pth \r
+if exist %prefix%\*._pth del %prefix%\*._pth\r
\r
echo on\r
%cmd%\r
return 0;
}
-static void
+static int
ast_clear(AST_object *self)
{
Py_CLEAR(self->dict);
+ return 0;
}
static int
return 0;
}
-static void
+static int
ast_clear(AST_object *self)
{
Py_CLEAR(self->dict);
+ return 0;
}
static int
}
}
else {
+ /* if we're so late into Python finalization that the module dict is
+ gone, then we can't even use PyImport_GetModule without triggering
+ an interpreter abort.
+ */
+ if (!PyThreadState_GET()->interp->modules) {
+ return NULL;
+ }
all_modules = PyImport_GetModuleDict();
warnings_module = PyDict_GetItem(all_modules, warnings_str);
static PyObject * special_lookup(PyObject *, _Py_Identifier *);
static int check_args_iterable(PyObject *func, PyObject *vararg);
static void format_kwargs_mapping_error(PyObject *func, PyObject *kwargs);
+static void format_awaitable_error(PyTypeObject *, int);
#define NAME_ERROR_MSG \
"name '%.200s' is not defined"
PyObject *iterable = TOP();
PyObject *iter = _PyCoro_GetAwaitableIter(iterable);
+ if (iter == NULL) {
+ format_awaitable_error(Py_TYPE(iterable),
+ _Py_OPCODE(next_instr[-2]));
+ }
+
Py_DECREF(iterable);
if (iter != NULL && PyCoro_CheckExact(iter)) {
}
/* This is gonna seem *real weird*, but if you put some other code between
- PyEval_EvalFrame() and PyEval_EvalCodeEx() you will need to adjust
+ PyEval_EvalFrame() and _PyEval_EvalFrameDefault() you will need to adjust
the test in the if statements in Misc/gdbinit (pystack and pystackv). */
static PyObject *
}
}
+static void
+format_awaitable_error(PyTypeObject *type, int prevopcode)
+{
+ if (type->tp_as_async == NULL || type->tp_as_async->am_await == NULL) {
+ if (prevopcode == BEFORE_ASYNC_WITH) {
+ PyErr_Format(PyExc_TypeError,
+ "'async with' received an object from __aenter__ "
+ "that does not implement __await__: %.100s",
+ type->tp_name);
+ }
+ else if (prevopcode == WITH_CLEANUP_START) {
+ PyErr_Format(PyExc_TypeError,
+ "'async with' received an object from __aexit__ "
+ "that does not implement __await__: %.100s",
+ type->tp_name);
+ }
+ }
+}
+
static PyObject *
unicode_concatenate(PyObject *v, PyObject *w,
PyFrameObject *f, const _Py_CODEUNIT *next_instr)
}
p[i] = '\0';
v = PyUnicode_FromString(p);
- if (v == NULL)
- return NULL;
PyMem_Free(p);
return v;
}
ADDOP(c, DUP_TOP);
ADDOP_O(c, LOAD_GLOBAL, stop_aiter_error, names);
ADDOP_I(c, COMPARE_OP, PyCmp_EXC_MATCH);
- ADDOP_JABS(c, POP_JUMP_IF_FALSE, try_cleanup);
-
- ADDOP(c, POP_TOP);
- ADDOP(c, POP_TOP);
- ADDOP(c, POP_TOP);
- ADDOP(c, POP_EXCEPT); /* for SETUP_EXCEPT */
- ADDOP(c, POP_BLOCK); /* for SETUP_LOOP */
- ADDOP_JABS(c, JUMP_ABSOLUTE, after_loop_else);
-
-
- compiler_use_next_block(c, try_cleanup);
+ ADDOP_JABS(c, POP_JUMP_IF_TRUE, try_cleanup);
ADDOP(c, END_FINALLY);
compiler_use_next_block(c, after_try);
VISIT_SEQ(c, stmt, s->v.AsyncFor.body);
ADDOP_JABS(c, JUMP_ABSOLUTE, try);
+ compiler_use_next_block(c, try_cleanup);
+ ADDOP(c, POP_TOP);
+ ADDOP(c, POP_TOP);
+ ADDOP(c, POP_TOP);
+ ADDOP(c, POP_EXCEPT); /* for SETUP_EXCEPT */
+ ADDOP(c, POP_TOP); /* for correct calculation of stack effect */
ADDOP(c, POP_BLOCK); /* for SETUP_LOOP */
compiler_pop_fblock(c, LOOP, try);
PyUnicode_GET_LENGTH(name));
if (!attr)
return 0;
- ADDOP_O(c, LOAD_ATTR, attr, names);
- Py_DECREF(attr);
+ ADDOP_N(c, LOAD_ATTR, attr, names);
pos = dot + 1;
}
}
if (level == NULL)
return 0;
- ADDOP_O(c, LOAD_CONST, level, consts);
- Py_DECREF(level);
+ ADDOP_N(c, LOAD_CONST, level, consts);
ADDOP_O(c, LOAD_CONST, Py_None, consts);
ADDOP_NAME(c, IMPORT_NAME, alias->name, names);
compiler_from_import(struct compiler *c, stmt_ty s)
{
Py_ssize_t i, n = asdl_seq_LEN(s->v.ImportFrom.names);
-
- PyObject *names = PyTuple_New(n);
- PyObject *level;
+ PyObject *level, *names;
static PyObject *empty_string;
if (!empty_string) {
return 0;
}
- if (!names)
- return 0;
-
level = PyLong_FromLong(s->v.ImportFrom.level);
if (!level) {
- Py_DECREF(names);
return 0;
}
+ ADDOP_N(c, LOAD_CONST, level, consts);
+
+ names = PyTuple_New(n);
+ if (!names)
+ return 0;
/* build up the names */
for (i = 0; i < n; i++) {
if (s->lineno > c->c_future->ff_lineno && s->v.ImportFrom.module &&
_PyUnicode_EqualToASCIIString(s->v.ImportFrom.module, "__future__")) {
- Py_DECREF(level);
Py_DECREF(names);
return compiler_error(c, "from __future__ imports must occur "
"at the beginning of the file");
}
+ ADDOP_N(c, LOAD_CONST, names, consts);
- ADDOP_O(c, LOAD_CONST, level, consts);
- Py_DECREF(level);
- ADDOP_O(c, LOAD_CONST, names, consts);
- Py_DECREF(names);
if (s->v.ImportFrom.module) {
ADDOP_NAME(c, IMPORT_NAME, s->v.ImportFrom.module, names);
}
store_name = alias->asname;
if (!compiler_nameop(c, store_name, Store)) {
- Py_DECREF(names);
return 0;
}
}
"param invalid for local variable");
return 0;
}
- ADDOP_O(c, op, mangled, varnames);
- Py_DECREF(mangled);
+ ADDOP_N(c, op, mangled, varnames);
return 1;
case OP_GLOBAL:
switch (ctx) {
_Py_IDENTIFIER(StopAsyncIteration);
comprehension_ty gen;
- basicblock *anchor, *if_cleanup, *try,
+ basicblock *if_cleanup, *try,
*after_try, *except, *try_cleanup;
Py_ssize_t i, n;
try = compiler_new_block(c);
after_try = compiler_new_block(c);
- try_cleanup = compiler_new_block(c);
except = compiler_new_block(c);
if_cleanup = compiler_new_block(c);
- anchor = compiler_new_block(c);
+ try_cleanup = compiler_new_block(c);
- if (if_cleanup == NULL || anchor == NULL ||
+ if (if_cleanup == NULL ||
try == NULL || after_try == NULL ||
except == NULL || try_cleanup == NULL) {
return 0;
ADDOP(c, DUP_TOP);
ADDOP_O(c, LOAD_GLOBAL, stop_aiter_error, names);
ADDOP_I(c, COMPARE_OP, PyCmp_EXC_MATCH);
- ADDOP_JABS(c, POP_JUMP_IF_FALSE, try_cleanup);
-
- ADDOP(c, POP_TOP);
- ADDOP(c, POP_TOP);
- ADDOP(c, POP_TOP);
- ADDOP(c, POP_EXCEPT); /* for SETUP_EXCEPT */
- ADDOP_JABS(c, JUMP_ABSOLUTE, anchor);
-
-
- compiler_use_next_block(c, try_cleanup);
+ ADDOP_JABS(c, POP_JUMP_IF_TRUE, try_cleanup);
ADDOP(c, END_FINALLY);
compiler_use_next_block(c, after_try);
}
compiler_use_next_block(c, if_cleanup);
ADDOP_JABS(c, JUMP_ABSOLUTE, try);
- compiler_use_next_block(c, anchor);
+
+ compiler_use_next_block(c, try_cleanup);
+ ADDOP(c, POP_TOP);
+ ADDOP(c, POP_TOP);
+ ADDOP(c, POP_TOP);
+ ADDOP(c, POP_EXCEPT); /* for SETUP_EXCEPT */
ADDOP(c, POP_TOP);
return 1;
if (s->v.AnnAssign.simple &&
(c->u->u_scope_type == COMPILER_SCOPE_MODULE ||
c->u->u_scope_type == COMPILER_SCOPE_CLASS)) {
+ VISIT(c, expr, s->v.AnnAssign.annotation);
mangled = _Py_Mangle(c->u->u_private, targ->v.Name.id);
if (!mangled) {
return 0;
}
- VISIT(c, expr, s->v.AnnAssign.annotation);
/* ADDOP_N decrefs its argument */
ADDOP_N(c, STORE_ANNOTATION, mangled, names);
}
if (Py_VerboseFlag)
PySys_WriteStderr("# clear builtins._\n");
- PyDict_SetItemString(interp->builtins, "_", Py_None);
+ if (PyDict_SetItemString(interp->builtins, "_", Py_None) < 0) {
+ PyErr_Clear();
+ }
for (p = sys_deletes; *p != NULL; p++) {
if (Py_VerboseFlag)
PySys_WriteStderr("# clear sys.%s\n", *p);
- PyDict_SetItemString(interp->sysdict, *p, Py_None);
+ if (PyDict_SetItemString(interp->sysdict, *p, Py_None) < 0) {
+ PyErr_Clear();
+ }
}
for (p = sys_files; *p != NULL; p+=2) {
if (Py_VerboseFlag)
value = PyDict_GetItemString(interp->sysdict, *(p+1));
if (value == NULL)
value = Py_None;
- PyDict_SetItemString(interp->sysdict, *p, value);
+ if (PyDict_SetItemString(interp->sysdict, *p, value) < 0) {
+ PyErr_Clear();
+ }
}
/* We prepare a list which will receive (name, weakref) tuples of
#define STORE_MODULE_WEAKREF(name, mod) \
if (weaklist != NULL) { \
PyObject *wr = PyWeakref_NewRef(mod, NULL); \
- if (name && wr) { \
+ if (wr) { \
PyObject *tup = PyTuple_Pack(2, name, wr); \
- PyList_Append(weaklist, tup); \
+ if (!tup || PyList_Append(weaklist, tup) < 0) { \
+ PyErr_Clear(); \
+ } \
Py_XDECREF(tup); \
+ Py_DECREF(wr); \
} \
- Py_XDECREF(wr); \
- if (PyErr_Occurred()) \
+ else { \
PyErr_Clear(); \
+ } \
}
/* Remove all modules from sys.modules, hoping that garbage collection
if (Py_VerboseFlag && PyUnicode_Check(key))
PySys_FormatStderr("# cleanup[2] removing %U\n", key);
STORE_MODULE_WEAKREF(key, value);
- PyDict_SetItem(modules, key, Py_None);
+ if (PyDict_SetItem(modules, key, Py_None) < 0) {
+ PyErr_Clear();
+ }
}
}
/* Once more */
_PyGC_CollectNoFail();
+#undef CLEAR_MODULE
#undef STORE_MODULE_WEAKREF
}
x = (_PyHASH_MULTIPLIER * x) ^ (Py_uhash_t) *p++;
x ^= (Py_uhash_t) len;
x ^= (Py_uhash_t) _Py_HashSecret.fnv.suffix;
- if (x == -1) {
- x = -2;
+ if (x == (Py_uhash_t) -1) {
+ x = (Py_uhash_t) -2;
}
return x;
}
uint64_t k0 = _le64toh(_Py_HashSecret.siphash.k0);
uint64_t k1 = _le64toh(_Py_HashSecret.siphash.k1);
uint64_t b = (uint64_t)src_sz << 56;
- const uint64_t *in = (uint64_t*)src;
+ const uint8_t *in = (uint8_t*)src;
uint64_t v0 = k0 ^ 0x736f6d6570736575ULL;
uint64_t v1 = k1 ^ 0x646f72616e646f6dULL;
uint64_t t;
uint8_t *pt;
- uint8_t *m;
while (src_sz >= 8) {
- uint64_t mi = _le64toh(*in);
- in += 1;
- src_sz -= 8;
+ uint64_t mi;
+ memcpy(&mi, in, sizeof(mi));
+ mi = _le64toh(mi);
+ in += sizeof(mi);
+ src_sz -= sizeof(mi);
v3 ^= mi;
DOUBLE_ROUND(v0,v1,v2,v3);
v0 ^= mi;
t = 0;
pt = (uint8_t *)&t;
- m = (uint8_t *)in;
switch (src_sz) {
- case 7: pt[6] = m[6]; /* fall through */
- case 6: pt[5] = m[5]; /* fall through */
- case 5: pt[4] = m[4]; /* fall through */
- case 4: memcpy(pt, m, sizeof(uint32_t)); break;
- case 3: pt[2] = m[2]; /* fall through */
- case 2: pt[1] = m[1]; /* fall through */
- case 1: pt[0] = m[0]; /* fall through */
+ case 7: pt[6] = in[6]; /* fall through */
+ case 6: pt[5] = in[5]; /* fall through */
+ case 5: pt[4] = in[4]; /* fall through */
+ case 4: memcpy(pt, in, sizeof(uint32_t)); break;
+ case 3: pt[2] = in[2]; /* fall through */
+ case 2: pt[1] = in[1]; /* fall through */
+ case 1: pt[0] = in[0]; /* fall through */
}
b |= _le64toh(t);
if (raise) {
struct _Py_stat_struct st;
+ int fstat_result;
if (urandom_cache.fd >= 0) {
+ Py_BEGIN_ALLOW_THREADS
+ fstat_result = _Py_fstat_noraise(urandom_cache.fd, &st);
+ Py_END_ALLOW_THREADS
+
/* Does the fd point to the same thing as before? (issue #21207) */
- if (_Py_fstat_noraise(urandom_cache.fd, &st)
+ if (fstat_result
|| st.st_dev != urandom_cache.st_dev
|| st.st_ino != urandom_cache.st_ino) {
/* Something changed: forget the cached fd (but don't close it,
-This is Python version 3.6.5
+This is Python version 3.6.6
============================
.. image:: https://travis-ci.org/python/cpython.svg?branch=3.6
to find out more. On macOS and Cygwin, the executable is called ``python.exe``;
elsewhere it's just ``python``.
+If you are running on macOS with the latest updates installed, make sure to install
+openSSL or some other SSL software along with Homebrew or another package manager.
+If issues persist, see https://devguide.python.org/setup/#macos-and-os-x for more
+information.
+
On macOS, if you have configured Python with ``--enable-framework``, you
should use ``make frameworkinstall`` to do the installation. Note that this
installs the Python executable in a place that is not normally on your PATH,
Profile Guided Optimization
---------------------------
-PGO takes advantage of recent versions of the GCC or Clang compilers. If ran,
-``make profile-opt`` will do several steps.
+PGO takes advantage of recent versions of the GCC or Clang compilers. If used,
+either via ``configure --enable-optimizations`` above or by manually running
+``make profile-opt`` regardless of configure flags it will do several steps.
First, the entire Python directory is cleaned of temporary files that may have
resulted in a previous compilation.
Then, an instrumented version of the interpreter is built, using suitable
-compiler flags for each flavour. Note that this is just an intermediary step
-and the binary resulted after this step is not good for real life workloads, as
+compiler flags for each flavour. Note that this is just an intermediary step.
+The binary resulting from this step is not good for real life workloads as
it has profiling instructions embedded inside.
After this instrumented version of the interpreter is built, the Makefile will
By default, tests are prevented from overusing resources like disk space and
memory. To enable these tests, run ``make testall``.
-If any tests fail, you can re-run the failing test(s) in verbose mode::
+If any tests fail, you can re-run the failing test(s) in verbose mode. For
+example, if ``test_os`` and ``test_gdb`` failed, you can run::
- make test TESTOPTS="-v test_that_failed"
+ make test TESTOPTS="-v test_os test_gdb"
If the failure persists and appears to be a problem with Python rather than
your environment, you can `file a bug report <https://bugs.python.org>`_ and
return ''.join(escapes[b] for b in s.encode(encoding))
+def is_literal_string(s):
+ return s[0] in '\'"' or (s[0] in 'rRuU' and s[1] in '\'"')
+
+
def safe_eval(s):
# unwrap quotes, safely
return eval(s, {'__builtins__':{}}, {})
return 1 in [c in str for c in set]
-def _visit_pyfiles(list, dirname, names):
- """Helper for getFilesForName()."""
- # get extension for python source files
- if '_py_ext' not in globals():
- global _py_ext
- _py_ext = importlib.machinery.SOURCE_SUFFIXES[0]
-
- # don't recurse into CVS directories
- if 'CVS' in names:
- names.remove('CVS')
-
- # add all *.py files to list
- list.extend(
- [os.path.join(dirname, file) for file in names
- if os.path.splitext(file)[1] == _py_ext]
- )
-
-
def getFilesForName(name):
"""Get a list of module files for a filename, a module or package name,
or a directory.
if os.path.isdir(name):
# find all python files in directory
list = []
- os.walk(name, _visit_pyfiles, list)
+ # get extension for python source files
+ _py_ext = importlib.machinery.SOURCE_SUFFIXES[0]
+ for root, dirs, files in os.walk(name):
+ # don't recurse into CVS directories
+ if 'CVS' in dirs:
+ dirs.remove('CVS')
+ # add all *.py files to list
+ list.extend(
+ [os.path.join(root, file) for file in files
+ if os.path.splitext(file)[1] == _py_ext]
+ )
return list
elif os.path.exists(name):
# a single file
def __call__(self, ttype, tstring, stup, etup, line):
# dispatch
## import token
-## print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
-## 'tstring:', tstring
+## print('ttype:', token.tok_name[ttype], 'tstring:', tstring,
+## file=sys.stderr)
self.__state(ttype, tstring, stup[0])
def __waiting(self, ttype, tstring, lineno):
if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
# module docstring?
if self.__freshmodule:
- if ttype == tokenize.STRING:
+ if ttype == tokenize.STRING and is_literal_string(tstring):
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
self.__freshmodule = 0
elif ttype not in (tokenize.COMMENT, tokenize.NL):
def __suitedocstring(self, ttype, tstring, lineno):
# ignore any intervening noise
- if ttype == tokenize.STRING:
+ if ttype == tokenize.STRING and is_literal_string(tstring):
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
self.__state = self.__waiting
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
if self.__data:
self.__addentry(EMPTYSTRING.join(self.__data))
self.__state = self.__waiting
- elif ttype == tokenize.STRING:
+ elif ttype == tokenize.STRING and is_literal_string(tstring):
self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
token.NEWLINE, tokenize.NL]:
for project, version in ensurepip._PROJECTS:
data = json.loads(urllib.request.urlopen(
- "https://pypi.python.org/pypi/{}/json".format(project),
+ "https://pypi.org/pypi/{}/json".format(project),
cadefault=True,
).read().decode("utf8"))
upstream_version = data["info"]["version"]
return decorated_fxn
-def mq_patches_applied():
- """Check if there are any applied MQ patches."""
- cmd = 'hg qapplied'
- with subprocess.Popen(cmd.split(),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE) as st:
- bstdout, _ = st.communicate()
- return st.returncode == 0 and bstdout
-
-
def get_git_branch():
"""Get the symbolic name for the current git branch"""
cmd = "git rev-parse --abbrev-ref HEAD".split()
@status("Getting the list of files that have been added/changed",
info=lambda x: n_files_str(len(x)))
def changed_files(base_branch=None):
- """Get the list of changed or added files from Mercurial or git."""
- if os.path.isdir(os.path.join(SRCDIR, '.hg')):
- if base_branch is not None:
- sys.exit('need a git checkout to check PR status')
- cmd = 'hg status --added --modified --no-status'
- if mq_patches_applied():
- cmd += ' --rev qparent'
- with subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) as st:
- filenames = [x.decode().rstrip() for x in st.stdout]
- elif os.path.exists(os.path.join(SRCDIR, '.git')):
+ """Get the list of changed or added files from git."""
+ if os.path.exists(os.path.join(SRCDIR, '.git')):
# We just use an existence check here as:
# directory = normal git checkout/clone
# file = git worktree directory
filename = filename.split(' -> ', 2)[1].strip()
filenames.append(filename)
else:
- sys.exit('need a Mercurial or git checkout to get modified files')
+ sys.exit('need a git checkout to get modified files')
filenames2 = []
for filename in filenames:
# Directories are searched recursively for files whose name looks
# like a python module.
# Symbolic links are always ignored (except as explicit directory
-# arguments). Of course, the original file is kept as a back-up
-# (with a "~" attached to its name).
+# arguments).
+# The original file is kept as a back-up (with a "~" attached to its name),
+# -n flag can be used to disable this.
#
# Undoubtedly you can do this using find and sed or perl, but this is
# a nice example of Python code that recurses down a directory tree
new_interpreter = None
preserve_timestamps = False
+create_backup = True
+
def main():
global new_interpreter
global preserve_timestamps
- usage = ('usage: %s -i /interpreter -p file-or-directory ...\n' %
+ global create_backup
+ usage = ('usage: %s -i /interpreter -p -n file-or-directory ...\n' %
sys.argv[0])
try:
- opts, args = getopt.getopt(sys.argv[1:], 'i:p')
+ opts, args = getopt.getopt(sys.argv[1:], 'i:pn')
except getopt.error as msg:
err(str(msg) + '\n')
err(usage)
new_interpreter = a.encode()
if o == '-p':
preserve_timestamps = True
+ if o == '-n':
+ create_backup = False
if not new_interpreter or not new_interpreter.startswith(b'/') or \
not args:
err('-i option or file-or-directory missing\n')
except OSError as msg:
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
# Then make a backup of the original file as filename~
- try:
- os.rename(filename, filename + '~')
- except OSError as msg:
- err('%s: warning: backup failed (%r)\n' % (filename, msg))
+ if create_backup:
+ try:
+ os.rename(filename, filename + '~')
+ except OSError as msg:
+ err('%s: warning: backup failed (%r)\n' % (filename, msg))
+ else:
+ try:
+ os.remove(filename)
+ except OSError as msg:
+ err('%s: warning: removing failed (%r)\n' % (filename, msg))
# Now move the temp file to the original file
try:
os.rename(tempname, filename)
]
LIBRESSL_RECENT_VERSIONS = [
- "2.5.3",
"2.5.5",
+ "2.6.4",
+ "2.7.1",
]
# store files in ../multissl
action='store_true',
help="Don't run tests, only compile _ssl.c and _hashopenssl.c."
)
+parser.add_argument(
+ '--system',
+ default='',
+ help="Override the automatic system type detection."
+)
class AbstractBuilder(object):
# build directory (removed after install)
self.build_dir = os.path.join(
self.src_dir, self.build_template.format(version))
+ self.system = args.system
def __str__(self):
return "<{0.__class__.__name__} for {0.version}>".format(self)
cwd = self.build_dir
cmd = ["./config", "shared", "--prefix={}".format(self.install_dir)]
cmd.extend(self.compile_args)
- self._subprocess_call(cmd, cwd=cwd)
+ env = None
+ if self.system:
+ env = os.environ.copy()
+ env['SYSTEM'] = self.system
+ self._subprocess_call(cmd, cwd=cwd, env=env)
# Old OpenSSL versions do not support parallel builds.
- self._subprocess_call(["make", "-j1"], cwd=cwd)
+ self._subprocess_call(["make", "-j1"], cwd=cwd, env=env)
def _make_install(self, remove=True):
self._subprocess_call(["make", "-j1", "install"], cwd=self.build_dir)
enableval=$enable_universalsdk;
case $enableval in
yes)
- # Locate the best usable SDK, see Mac/README.txt for more
+ # Locate the best usable SDK, see Mac/README for more
# information
enableval="`/usr/bin/xcodebuild -version -sdk macosx Path 2>/dev/null`"
if ! ( echo $enableval | grep -E '\.sdk' 1>/dev/null )
if test "$ac_sys_system" = "Darwin"
then
# Compiler selection on MacOSX is more complicated than
- # AC_PROG_CC can handle, see Mac/README.txt for more
+ # AC_PROG_CC can handle, see Mac/README for more
# information
if test -z "${CC}"
then
then
case $GCC in
yes)
- if test "$CC" != 'g++' ; then
- STRICT_PROTO="-Wstrict-prototypes"
- fi
# For gcc 4.x we need to use -fwrapv so lets check if its supported
if "$CC" -v --help 2>/dev/null |grep -- -fwrapv > /dev/null; then
WRAP="-fwrapv"
;;
esac
- OPT="$OPT $STRICT_PROTO"
-
case $ac_sys_system in
SCO_SV*) OPT="$OPT -m486 -DSCO5"
;;
CFLAGS_NODIST="$CFLAGS_NODIST -Wno-missing-field-initializers"
fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can turn off $CC invalid function cast warning" >&5
+$as_echo_n "checking if we can turn off $CC invalid function cast warning... " >&6; }
+ ac_save_cc="$CC"
+ CC="$CC -Wcast-function-type -Werror"
+ if ${ac_cv_disable_cast_function_type+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+ ac_cv_disable_cast_function_type=yes
+
+else
+
+ ac_cv_disable_cast_function_type=no
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+ CC="$ac_save_cc"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_disable_cast_function_type" >&5
+$as_echo "$ac_cv_disable_cast_function_type" >&6; }
+
+ if test $ac_cv_disable_cast_function_type = yes
+ then
+ CFLAGS_NODIST="$CFLAGS_NODIST -Wno-cast-function-type"
+ fi
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can turn on $CC mixed sign comparison warning" >&5
$as_echo_n "checking if we can turn on $CC mixed sign comparison warning... " >&6; }
ac_save_cc="$CC"
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_enable_unreachable_code_warning" >&5
$as_echo "$ac_cv_enable_unreachable_code_warning" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can turn on $CC strict-prototypes warning" >&5
+$as_echo_n "checking if we can turn on $CC strict-prototypes warning... " >&6; }
+ ac_save_cc="$CC"
+ CC="$CC -Werror -Wstrict-prototypes"
+ if ${ac_cv_enable_enable_strict_prototypes_warning+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+ ac_cv_enable_strict_prototypes_warning=yes
+
+else
+
+ ac_cv_enable_strict_prototypes_warning=no
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+ CC="$ac_save_cc"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_enable_strict_prototypes_warning" >&5
+$as_echo "$ac_cv_enable_strict_prototypes_warning" >&6; }
+
+ if test $ac_cv_enable_strict_prototypes_warning = yes
+ then
+ CFLAGS_NODIST="$CFLAGS_NODIST -Wstrict-prototypes"
+ fi
+
# if using gcc on alpha, use -mieee to get (near) full IEEE 754
# support. Without this, treatment of subnormals doesn't follow
# the standard.
[
case $enableval in
yes)
- # Locate the best usable SDK, see Mac/README.txt for more
+ # Locate the best usable SDK, see Mac/README for more
# information
enableval="`/usr/bin/xcodebuild -version -sdk macosx Path 2>/dev/null`"
if ! ( echo $enableval | grep -E '\.sdk' 1>/dev/null )
if test "$ac_sys_system" = "Darwin"
then
# Compiler selection on MacOSX is more complicated than
- # AC_PROG_CC can handle, see Mac/README.txt for more
+ # AC_PROG_CC can handle, see Mac/README for more
# information
if test -z "${CC}"
then
then
case $GCC in
yes)
- if test "$CC" != 'g++' ; then
- STRICT_PROTO="-Wstrict-prototypes"
- fi
# For gcc 4.x we need to use -fwrapv so lets check if its supported
if "$CC" -v --help 2>/dev/null |grep -- -fwrapv > /dev/null; then
WRAP="-fwrapv"
;;
esac
- OPT="$OPT $STRICT_PROTO"
-
case $ac_sys_system in
SCO_SV*) OPT="$OPT -m486 -DSCO5"
;;
CFLAGS_NODIST="$CFLAGS_NODIST -Wno-missing-field-initializers"
fi
+ AC_MSG_CHECKING(if we can turn off $CC invalid function cast warning)
+ ac_save_cc="$CC"
+ CC="$CC -Wcast-function-type -Werror"
+ AC_CACHE_VAL(ac_cv_disable_cast_function_type,
+ AC_COMPILE_IFELSE(
+ [
+ AC_LANG_PROGRAM([[]], [[]])
+ ],[
+ ac_cv_disable_cast_function_type=yes
+ ],[
+ ac_cv_disable_cast_function_type=no
+ ]))
+ CC="$ac_save_cc"
+ AC_MSG_RESULT($ac_cv_disable_cast_function_type)
+
+ if test $ac_cv_disable_cast_function_type = yes
+ then
+ CFLAGS_NODIST="$CFLAGS_NODIST -Wno-cast-function-type"
+ fi
+
AC_MSG_CHECKING(if we can turn on $CC mixed sign comparison warning)
ac_save_cc="$CC"
CC="$CC -Wsign-compare"
fi
AC_MSG_RESULT($ac_cv_enable_unreachable_code_warning)
+ AC_MSG_CHECKING(if we can turn on $CC strict-prototypes warning)
+ ac_save_cc="$CC"
+ CC="$CC -Werror -Wstrict-prototypes"
+ AC_CACHE_VAL(ac_cv_enable_enable_strict_prototypes_warning,
+ AC_COMPILE_IFELSE(
+ [
+ AC_LANG_PROGRAM([[]], [[]])
+ ],[
+ ac_cv_enable_strict_prototypes_warning=yes
+ ],[
+ ac_cv_enable_strict_prototypes_warning=no
+ ]))
+ CC="$ac_save_cc"
+ AC_MSG_RESULT($ac_cv_enable_strict_prototypes_warning)
+
+ if test $ac_cv_enable_strict_prototypes_warning = yes
+ then
+ CFLAGS_NODIST="$CFLAGS_NODIST -Wstrict-prototypes"
+ fi
+
# if using gcc on alpha, use -mieee to get (near) full IEEE 754
# support. Without this, treatment of subnormals doesn't follow
# the standard.